code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from ngraph.util.persist import valid_path_append, fetch_file
import os
import numpy as np
class PTB(object):
"""
Penn Treebank data set from http://arxiv.org/pdf/1409.2329v5.pdf
Arguments:
path (string): Data directory to find the data, if not existing, will
download the data
shift_target (boolean): Set the target to be the same sequence of shifted
version of the sequence. Default to be True, for
language models.
"""
def __init__(self, path='.', use_words=False, shift_target=True):
self.path = path
self.url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data'
self.filemap = dict(train=dict(filename='ptb.train.txt', size=5101618),
test=dict(filename='ptb.test.txt', size=449945),
valid=dict(filename='ptb.valid.txt', size=399782))
self.shift_target = shift_target
self.use_words = use_words
def load_data(self):
self.data_dict = {}
self.vocab = None
for phase in ['train', 'test', 'valid']:
filename, filesize = self.filemap[phase]['filename'], self.filemap[phase]['size']
workdir, filepath = valid_path_append(self.path, '', filename)
if not os.path.exists(filepath):
fetch_file(self.url, filename, filepath, filesize)
tokens = open(filepath).read() # add tokenization here if necessary
if self.use_words:
tokens = tokens.strip().split()
self.vocab = sorted(set(tokens)) if self.vocab is None else self.vocab
# vocab dicts
self.token_to_index = dict((t, i) for i, t in enumerate(self.vocab))
self.index_to_token = dict((i, t) for i, t in enumerate(self.vocab))
# map tokens to indices
X = np.asarray([self.token_to_index[t] for t in tokens], dtype=np.uint32)
if self.shift_target:
y = np.concatenate((X[1:], X[:1]))
else:
y = X.copy()
self.data_dict[phase] = {'inp_txt': X, 'tgt_txt': y}
return self.data_dict
| [
"os.path.exists",
"ngraph.util.persist.fetch_file",
"numpy.asarray",
"ngraph.util.persist.valid_path_append",
"numpy.concatenate"
] | [((2037, 2079), 'ngraph.util.persist.valid_path_append', 'valid_path_append', (['self.path', '""""""', 'filename'], {}), "(self.path, '', filename)\n", (2054, 2079), False, 'from ngraph.util.persist import valid_path_append, fetch_file\n'), ((2680, 2749), 'numpy.asarray', 'np.asarray', (['[self.token_to_index[t] for t in tokens]'], {'dtype': 'np.uint32'}), '([self.token_to_index[t] for t in tokens], dtype=np.uint32)\n', (2690, 2749), True, 'import numpy as np\n'), ((2099, 2123), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (2113, 2123), False, 'import os\n'), ((2141, 2191), 'ngraph.util.persist.fetch_file', 'fetch_file', (['self.url', 'filename', 'filepath', 'filesize'], {}), '(self.url, filename, filepath, filesize)\n', (2151, 2191), False, 'from ngraph.util.persist import valid_path_append, fetch_file\n'), ((2804, 2834), 'numpy.concatenate', 'np.concatenate', (['(X[1:], X[:1])'], {}), '((X[1:], X[:1]))\n', (2818, 2834), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
import numpy as np
__all__= ['actor_net']
class ActorNet(nn.Module):
def __init__(self, args):
super(ActorNet, self).__init__()
state_dim = args.state_dim
action_dim = args.z_dim
max_action = args.max_action
self.args =args
self.l1 = nn.Linear(state_dim, 400) # 400
self.l2 = nn.Linear(400, 400)
self.l2_additional = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, x):
x = torch.FloatTensor(x.reshape(1, -1)).to(self.args.device)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l2_additional(x))
x = self.max_action * torch.tanh(self.l3(x))
return x.cpu().data.numpy().flatten()
class ActorNetInSAC(nn.Module):
def __init__(self, args):
super(ActorNetInSAC, self).__init__()
self.args = args
state_dim = args.state_dim
action_dim = args.z_dim
max_action = args.max_action
self.l1 = nn.Linear(state_dim, 400) # 400
self.l2 = nn.Linear(400, 400)
self.l2_additional = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.log_std_layer = nn.Linear(400, action_dim)
self.max_action = max_action
def forward(self, x, deterministic=False, repara_trick=False, with_logprob=True):
x = torch.FloatTensor(x.reshape(1, -1)).to(self.args.device)
x = F.relu(self.l1(x))
log_std = self.log_std_layer(x)
std = torch.exp(log_std)
x = F.relu(self.l2(x))
x = F.relu(self.l2_additional(x))
mu = self.max_action * torch.tanh(self.l3(x))
pi_distribution = Normal(mu, std)
if deterministic:
pi_action = mu
elif repara_trick:
pi_action = pi_distribution.rsample()
else:
pi_action = pi_distribution.sample()
if with_logprob:
logp_pi = pi_distribution.log_prob(pi_action).sum(axis = -1)
logp_pi -= (2 * (np.log(2) - pi_action - F.softplus(-2 * pi_action))).sum(axis=1)
else:
logp_pi = None
pi_action = torch.tanh(pi_action)
pi_action = self.max_action*pi_action
return pi_action.cpu().data.numpy().flatten()
def actor_net(args,data=None):
if args.policy_name == "DDPG":
model = ActorNet(args)
elif args.policy_name == "SoftAC":
model = ActorNetInSAC(args)
else:
pass
model.load_state_dict(data)
return model | [
"torch.tanh",
"torch.distributions.normal.Normal",
"numpy.log",
"torch.exp",
"torch.nn.functional.softplus",
"torch.nn.Linear"
] | [((402, 427), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(400)'], {}), '(state_dim, 400)\n', (411, 427), True, 'import torch.nn as nn\n'), ((453, 472), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(400)'], {}), '(400, 400)\n', (462, 472), True, 'import torch.nn as nn\n'), ((502, 521), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(300)'], {}), '(400, 300)\n', (511, 521), True, 'import torch.nn as nn\n'), ((540, 566), 'torch.nn.Linear', 'nn.Linear', (['(300)', 'action_dim'], {}), '(300, action_dim)\n', (549, 566), True, 'import torch.nn as nn\n'), ((1161, 1186), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(400)'], {}), '(state_dim, 400)\n', (1170, 1186), True, 'import torch.nn as nn\n'), ((1212, 1231), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(400)'], {}), '(400, 400)\n', (1221, 1231), True, 'import torch.nn as nn\n'), ((1261, 1280), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(300)'], {}), '(400, 300)\n', (1270, 1280), True, 'import torch.nn as nn\n'), ((1299, 1325), 'torch.nn.Linear', 'nn.Linear', (['(300)', 'action_dim'], {}), '(300, action_dim)\n', (1308, 1325), True, 'import torch.nn as nn\n'), ((1355, 1381), 'torch.nn.Linear', 'nn.Linear', (['(400)', 'action_dim'], {}), '(400, action_dim)\n', (1364, 1381), True, 'import torch.nn as nn\n'), ((1661, 1679), 'torch.exp', 'torch.exp', (['log_std'], {}), '(log_std)\n', (1670, 1679), False, 'import torch\n'), ((1833, 1848), 'torch.distributions.normal.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (1839, 1848), False, 'from torch.distributions.normal import Normal\n'), ((2298, 2319), 'torch.tanh', 'torch.tanh', (['pi_action'], {}), '(pi_action)\n', (2308, 2319), False, 'import torch\n'), ((2195, 2221), 'torch.nn.functional.softplus', 'F.softplus', (['(-2 * pi_action)'], {}), '(-2 * pi_action)\n', (2205, 2221), True, 'import torch.nn.functional as F\n'), ((2171, 2180), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2177, 2180), True, 'import numpy as np\n')] |
import colorsys
import numpy as np
import cv2
from unidecode import unidecode
# Inspired by https://github.com/hhk7734/tensorflow-yolov4
_MAX_CLASSES = 14 * 6
_HSV = [(x / _MAX_CLASSES, 1.0, 1.0) for x in range(int(_MAX_CLASSES * 1.2))]
_COLORS = [colorsys.hsv_to_rgb(*x) for x in _HSV]
_COLORS = [(int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)) for x in _COLORS]
_BBOX_COLORS = []
for i in range(_MAX_CLASSES):
# 0 14 28 42 56 70 1 15 29 43 57 71 2 ...
_BBOX_COLORS.append(_COLORS[14 * (i % 6) + (i // 6)])
def draw_bboxes_without_text(
image: np.ndarray, bboxes: np.ndarray
):
"""
@parma `image`: Dim(height, width, channel)
@parma `bboxes`
Dim(-1, (x_min, y_min, x_max, y_max))
@return drawn_image
Usage:
image = draw_bboxes(image, bboxes)
"""
height, width, _ = image.shape
image = np.copy(image)
# Draw bboxes
for bbox_id, bbox in enumerate(bboxes):
left = int(bbox[0]) # x_min
top = int(bbox[1]) # y_min
right = int(bbox[2]) # x_max
bottom = int(bbox[3]) # y_max
color = (255, 0, 0)
cv2.rectangle(image, (left, top), (right, bottom), color, 1)
return image
def draw_bboxes(
image: np.ndarray, bboxes: np.ndarray, probs: np.ndarray, names: np.ndarray
):
"""
@parma `image`: Dim(height, width, channel)
@parma `bboxes`
Dim(-1, (x_min, y_min, x_max, y_max))
@parma `probs`
Dim(-1,)
@parma `names`
Dim(-1,)
@return drawn_image
Usage:
image = yolo.draw_bboxes(image, bboxes, probs, names)
"""
height, width, _ = image.shape
image = np.copy(image)
name_ids = np.unique(names)
# Draw bboxes
for bbox_id, bbox in enumerate(bboxes):
left = int(bbox[0]) # x_min
top = int(bbox[1]) # y_min
right = int(bbox[2]) # x_max
bottom = int(bbox[3]) # y_max
font_size = 0.4
font_thickness = 1
# find name id, prob and set color
name_id = np.where(np.array(name_ids) == names[bbox_id])[0][0]
prob = probs[bbox_id]
color = _BBOX_COLORS[name_id%_MAX_CLASSES]
# Get text size
bbox_text = "{}: {:.1%}".format(names[bbox_id], prob)
t_w, t_h = cv2.getTextSize(bbox_text, 0, font_size, font_thickness)[0]
t_h += 3
# Draw box
if top < t_h:
top = t_h
if left < 1:
left = 1
if bottom >= height:
bottom = height - 1
if right >= width:
right = width - 1
cv2.rectangle(image, (left, top), (right, bottom), color, 1)
# Draw text box
cv2.rectangle(image, (left, top), (left + t_w, top - t_h), color, -1)
# Draw text
cv2.putText(
image,
unidecode(bbox_text), # OpenCV does not handle ~, ^, ´, etc..
(left, top - 2),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
(
255 - color[0],
255 - color[1],
255 - color[2],
),
font_thickness,
lineType=cv2.LINE_AA,
)
return image | [
"cv2.rectangle",
"numpy.copy",
"numpy.unique",
"colorsys.hsv_to_rgb",
"numpy.array",
"unidecode.unidecode",
"cv2.getTextSize"
] | [((259, 282), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (278, 282), False, 'import colorsys\n'), ((882, 896), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (889, 896), True, 'import numpy as np\n'), ((1722, 1736), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (1729, 1736), True, 'import numpy as np\n'), ((1753, 1769), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (1762, 1769), True, 'import numpy as np\n'), ((1173, 1233), 'cv2.rectangle', 'cv2.rectangle', (['image', '(left, top)', '(right, bottom)', 'color', '(1)'], {}), '(image, (left, top), (right, bottom), color, 1)\n', (1186, 1233), False, 'import cv2\n'), ((2693, 2753), 'cv2.rectangle', 'cv2.rectangle', (['image', '(left, top)', '(right, bottom)', 'color', '(1)'], {}), '(image, (left, top), (right, bottom), color, 1)\n', (2706, 2753), False, 'import cv2\n'), ((2790, 2859), 'cv2.rectangle', 'cv2.rectangle', (['image', '(left, top)', '(left + t_w, top - t_h)', 'color', '(-1)'], {}), '(image, (left, top), (left + t_w, top - t_h), color, -1)\n', (2803, 2859), False, 'import cv2\n'), ((2370, 2426), 'cv2.getTextSize', 'cv2.getTextSize', (['bbox_text', '(0)', 'font_size', 'font_thickness'], {}), '(bbox_text, 0, font_size, font_thickness)\n', (2385, 2426), False, 'import cv2\n'), ((2938, 2958), 'unidecode.unidecode', 'unidecode', (['bbox_text'], {}), '(bbox_text)\n', (2947, 2958), False, 'from unidecode import unidecode\n'), ((2133, 2151), 'numpy.array', 'np.array', (['name_ids'], {}), '(name_ids)\n', (2141, 2151), True, 'import numpy as np\n')] |
import random
import math
import copy
import numpy as np
import logging
import collections
import pyximport
# pyximport.install()
pyximport.install(setup_args={
# "script_args":["--compiler=mingw32"],
"include_dirs":np.get_include()},
# reload_support=True
)
import cython_encode
def cython_normalize(input_pattern, normalize_f, binary_sum):
assert input_pattern.data.c_contiguous
assert input_pattern.dtype == np.uint8
normalize = int(normalize_f*len(input_pattern))
cython_encode.cython_normalize(
np.ravel(input_pattern, order='A'),
normalize,
len(input_pattern),
binary_sum,
)
return input_pattern
class SimulationLite():
def __init__(self, sim):
self.dendrite_counts = []
self.dendrite_mf_map = []
self.thresholds = []
for grc in sim.grcs:
self.dendrite_counts.append(len(grc.claws))
self.thresholds.append(grc.act_lv_scale)
for claw in grc.claws:
assert claw <= 65535
self.dendrite_mf_map.append(claw)
self.dendrite_mf_map = np.array(self.dendrite_mf_map, dtype=np.uint16)
self.dendrite_counts = np.array(self.dendrite_counts, dtype=np.uint8)
self.thresholds = np.array(self.thresholds, dtype=np.float32)
def encode(self, input_pattern, out_array=None, use_cython=True,
normalize_f=None):
n_grcs = len(self.dendrite_counts)
if out_array is None:
out_array = np.empty(n_grcs, dtype=np.uint8)
if use_cython:
assert input_pattern.data.c_contiguous
assert out_array.data.c_contiguous
assert self.dendrite_mf_map.data.c_contiguous
assert self.dendrite_counts.data.c_contiguous
assert self.thresholds.data.c_contiguous
assert input_pattern.dtype == np.float32
assert out_array.dtype == np.uint8
assert self.dendrite_mf_map.dtype == np.uint16
assert self.dendrite_counts.dtype == np.uint8
assert self.thresholds.dtype == np.float32
normalize = 0
if normalize_f is not None:
normalize = int(normalize_f*n_grcs)
cython_encode.cython_encode(
np.ravel(input_pattern, order='A'),
np.ravel(self.dendrite_counts, order='A'),
np.ravel(self.dendrite_mf_map, order='A'),
np.ravel(self.thresholds, order='A'),
n_grcs,
np.ravel(out_array, order='A'),
normalize
)
return out_array
assert normalize_f is None
dendrite_pos = 0
for i, dendrite_count in enumerate(self.dendrite_counts):
s = 0.0
for j in range(dendrite_count):
s += input_pattern[self.dendrite_mf_map[dendrite_pos]]
dendrite_pos += 1
if s >= self.thresholds[i]:
out_array[i] = 1
else:
out_array[i] = 0
return out_array
| [
"numpy.ravel",
"numpy.array",
"numpy.empty",
"numpy.get_include"
] | [((623, 657), 'numpy.ravel', 'np.ravel', (['input_pattern'], {'order': '"""A"""'}), "(input_pattern, order='A')\n", (631, 657), True, 'import numpy as np\n'), ((1206, 1253), 'numpy.array', 'np.array', (['self.dendrite_mf_map'], {'dtype': 'np.uint16'}), '(self.dendrite_mf_map, dtype=np.uint16)\n', (1214, 1253), True, 'import numpy as np\n'), ((1285, 1331), 'numpy.array', 'np.array', (['self.dendrite_counts'], {'dtype': 'np.uint8'}), '(self.dendrite_counts, dtype=np.uint8)\n', (1293, 1331), True, 'import numpy as np\n'), ((1358, 1401), 'numpy.array', 'np.array', (['self.thresholds'], {'dtype': 'np.float32'}), '(self.thresholds, dtype=np.float32)\n', (1366, 1401), True, 'import numpy as np\n'), ((277, 293), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (291, 293), True, 'import numpy as np\n'), ((1600, 1632), 'numpy.empty', 'np.empty', (['n_grcs'], {'dtype': 'np.uint8'}), '(n_grcs, dtype=np.uint8)\n', (1608, 1632), True, 'import numpy as np\n'), ((2371, 2405), 'numpy.ravel', 'np.ravel', (['input_pattern'], {'order': '"""A"""'}), "(input_pattern, order='A')\n", (2379, 2405), True, 'import numpy as np\n'), ((2423, 2464), 'numpy.ravel', 'np.ravel', (['self.dendrite_counts'], {'order': '"""A"""'}), "(self.dendrite_counts, order='A')\n", (2431, 2464), True, 'import numpy as np\n'), ((2482, 2523), 'numpy.ravel', 'np.ravel', (['self.dendrite_mf_map'], {'order': '"""A"""'}), "(self.dendrite_mf_map, order='A')\n", (2490, 2523), True, 'import numpy as np\n'), ((2541, 2577), 'numpy.ravel', 'np.ravel', (['self.thresholds'], {'order': '"""A"""'}), "(self.thresholds, order='A')\n", (2549, 2577), True, 'import numpy as np\n'), ((2619, 2649), 'numpy.ravel', 'np.ravel', (['out_array'], {'order': '"""A"""'}), "(out_array, order='A')\n", (2627, 2649), True, 'import numpy as np\n')] |
import sys,os,pickle,argparse
import numpy as np
from models import *
from sklearn.model_selection import KFold
#deprecated
#from sklearn.cross_validation import KFold
from scrape import *
import argparse
from torch.multiprocessing import Pool
from random import shuffle
def fit(X,Y,model,criterion= nn.NLLLoss(),epochs=20,batch_size=1,verbose=True,print_batches=1000,opt='Adam'):
"""
Fits a choice model with pytorch's SGD
X- Indicator vectors for choice sets
Y- indices of choices
model- choice model to fit
criterion- which loss function to use (default to negative log likelihood for MLE)
epochs- number of times to loop over the training data
batch_size- how large to make batches
verbose- whether to print updates as training goes on
print_batches- how often to print updates on training
opt- which optimizer to use 'SGD' or 'Adam'
"""
X = torch.Tensor(X)
Y = torch.LongTensor(Y.astype(int))
dataset = torch.utils.data.TensorDataset(X,Y)
if batch_size>1:
dataloader = torch.utils.data.DataLoader(dataset,batch_size=batch_size,shuffle=True)
else:
dataloader = torch.utils.data.DataLoader(dataset,shuffle=True)
if opt=='SGD':
optimizer = optim.SGD(model.parameters(), lr=0.001,momentum=0.9)
elif opt=='Adam':
optimizer = optim.Adam(model.parameters())
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
print('Starting epoch '+str(epoch)+' of '+str(epochs))
for i, data in enumerate(dataloader, 0):
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
#compute predictions from inputs
outputs = model(inputs)
#compute losses from current predictions
loss = criterion(outputs, labels)
#do backprop
loss.backward()
#take a step with the optimizer
optimizer.step()
# print statistics
running_loss += loss.data.item()
if not verbose:
continue
if i % print_batches == print_batches-1: # print every 200 mini-batches
print('(epoch %2d, %5d samples), avg loss: %.3f' %
(epoch + 1, (i + 1)*batch_size, running_loss / print_batches))
running_loss = 0.0
return model
def cv(L,n,models,save_path,K=5,epochs=20,batch_size=1,opt='Adam',seed=True,RE=False):
"""
trains and saves choosing to rank models with SGD via k-fold cv
Args:
L- list of data rankings
n- number of items ranked
model - choice models to fit
save_path- folder to save to
K- number of folds
epochs- number of times to loop over the data
"""
kf = KFold(n_splits=K,shuffle=True)
splits = kf.split(L)
split_store = {'train':[],'test':[],'data':L}
for model in models:
split_store[str(model)]=[]
for k,(train,test) in enumerate(splits):
print('Beginning fold'+str(k)+' of '+str(K))
#scrape training choices and fit model
X_train,Y_train = RS_choices([L[x] for x in train],n)
for model in models:
print('training RS-'+str(model))
if seed and str(model) == 'PCMC':
utils = models[0].parameters().next().data.numpy()
#print utils
g= np.exp(utils)
g/= np.sum(g)
model = PCMC(n,gamma=g)
model = fit(X_train,Y_train,model,criterion=nn.NLLLoss(),epochs=epochs,batch_size=batch_size,opt=opt)
split_store[str(model)].append(model)
#store everything
split_store['train'].append(train)
split_store['test'].append(test)
if not RE:
pickle.dump(split_store,open(save_path+'.p','wb'))
else:
pickle.dump(split_store,open(save_path+'-RE.p','wb'))
return 0
def parallel_helper(tup):
"""
unpacks a tuple so that we can apply the function cv in parallel
(Pool does not allow mapping of an anonymous function)
"""
L,n,models,save_path,epochs,batch_size,opt,seed,RE,K = tup
return cv(L,n,models,save_path,epochs=epochs,batch_size=batch_size,opt=opt,seed=seed,RE=RE,K=K)
def ensure_dir(file_path):
"""
helper function from stack overflow that automatically makes directories
in the cache for me
thanks to this:
https://stackoverflow.com/questions/273192/how-can-i-safely-create-a-nested-directory-in-python
"""
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def trawl(dset,dtype,epochs,parallel=False,batch_size=1,max_n=30,max_rankings=1000,opt='Adam',num_dsets=10,seed=True,RE=False,K=5):
"""
trawls over a directory and fits models to all data files
Args:
dset- name of dataset(s) considered
dtype- 'soi' for partial rankings, 'soc' for complete rankings
epochs- number of times to loop over the data
parallel- whether to train models in parallel over the datasets in the directory
batch_size- number of choices to train on at a time
max_n- largest number of alternatives allowed to train on a dataset
max_rankings- maximum number of rankings to fit a dataset
opt- which optimizer to use
num_dsets- number of datasets to fit
seed- whether to seed PCMC
RE- whether to compute repeated elimianation (RS if false)
K- number of CV folds for each dataset
"""
#we will loop over the datasets stored in this directory
path = os.getcwd()+os.sep+'data'+os.sep+dset
files = os.listdir(path)
#shuffle(files)
#this is where we'll save the output models
save_path = os.getcwd()+os.sep+'cache'+os.sep+'learned_models'+os.sep+dset+os.sep
job_list = []
batch = (batch_size>1)
for filename in files:#loop over the directory
print(filename)
if filename.endswith(dtype):#will
filepath = path+os.sep+filename
if dtype=='soi':
L,n = scrape_soi(filepath)
else:
L,n = scrape_soc(filepath)
if len(L)<=10 or len(L)>max_rankings or n>max_n:
if len(L)<=10:
reason = 'too few rankings- '+str(len(L))+', min is 10'
elif len(L)>max_rankings:
reason = 'too many rankings- '+str(len(L))+', max is '+str(max_rankings)
else:
reason = 'too many alternatives- '+str(n)+', max is '+str(max_n)
print(filename+' skipped, '+reason)
continue
else:
print(filename+' added')
#collect models
models = []
for d in [1,4,8]:
if d>n:
continue
models.append(CDM(n=n,d=d))
models.append(MNL(n))
#models.append(PCMC(n,batch=batch))
###
#models.append(BP(n=n,k=3,d=2))
#models=[BP(n=n,k=3,d=2)]
#append tuple containing all the ojects needed to train the model on the dataset
job_list.append((L,n,models,save_path+filename[:-4]+'-'+dtype,epochs,batch_size,opt,seed,False,K))
if RE:
job_list.append((map(lambda x:x[::-1],L),n,models,save_path+filename[:-4]+'-'+dtype,epochs,batch_size,opt,seed,True,K))
if len(job_list)>=num_dsets:
print('maximum number of datasets reached')
continue
print(str(len(job_list))+' datasets total')
print(str(sum(map(lambda x: len(x[0]),job_list)))+ ' total rankings')
#sorts the jobs by number of alternatives*number of (partial) rankings
#will roughly be the number of choices, up to partial ranking length
sorted(job_list,key=lambda x: x[1]*len(x[0]))
#training for each dataset can be done in parallel with this
if parallel:
p = Pool(4)
p.map(parallel_helper,job_list)
else:
[x for x in map(parallel_helper,job_list)]
def parse():
"""
parses command line args, run when train.py is __main__
"""
np.set_printoptions(suppress=True, precision=3)
parser = argparse.ArgumentParser(description='ctr data parser')
parser.add_argument('-dset', help="dataset name", default=None)
parser.add_argument('-dtype', help="dataset type", default ='soi')
parser.add_argument('-epochs', help="number of epochs to use", default='10')
parser.add_argument('-batch_size', help='batch_size for training', default = '1')
parser.add_argument('-max_n', help='maximum number of items ranked', default = '10')
parser.add_argument('-max_rankings', help='maximum number of rankings', default = '1000')
parser.add_argument('-opt', help='SGD or Adam', default='Adam')
parser.add_argument('-num_dsets', help='how many datasets to use', default='100')
parser.add_argument('-seed_pcmc', help='whether to seed pcmc with MNL (y/n)', default = 'n')
parser.add_argument('-re', help='whether to train RE models (y/n)', default = 'n')
parser.add_argument('-folds', help='number of folds for cv on each dataset', default='5')
args = parser.parse_args()
if args.dtype not in ['soi','soc']:
print('wrong data type')
assert False
if args.opt not in ['SGD','Adam']:
print('optmizer can be SGD or Adam')
assert False
if args.dset=='soc':
args.dtype='soc'
path = os.getcwd()+os.sep+'data'+os.sep+args.dset
if args.dset == 'soi':
path += os.sep+'filtered'
if args.seed_pcmc not in ['y','n']:
print('y or n required for -seed_pcmc')
seed = (args.seed_pcmc=='y')
RE = (args.re == 'y')
K = int(args.folds)
trawl(args.dset,args.dtype,epochs=int(args.epochs),batch_size=int(args.batch_size),
max_n=int(args.max_n),max_rankings=int(args.max_rankings),opt=args.opt,
num_dsets=int(args.num_dsets),seed=seed,RE=RE,K=K)
if __name__ == '__main__':
parse()
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"os.getcwd",
"numpy.exp",
"os.path.dirname",
"torch.multiprocessing.Pool",
"numpy.sum",
"sklearn.model_selection.KFold",
"numpy.set_printoptions"
] | [((2908, 2939), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'K', 'shuffle': '(True)'}), '(n_splits=K, shuffle=True)\n', (2913, 2939), False, 'from sklearn.model_selection import KFold\n'), ((4653, 4679), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (4668, 4679), False, 'import sys, os, pickle, argparse\n'), ((5735, 5751), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5745, 5751), False, 'import sys, os, pickle, argparse\n'), ((8257, 8304), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'precision': '(3)'}), '(suppress=True, precision=3)\n', (8276, 8304), True, 'import numpy as np\n'), ((8318, 8372), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ctr data parser"""'}), "(description='ctr data parser')\n", (8341, 8372), False, 'import argparse\n'), ((4691, 4716), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (4705, 4716), False, 'import sys, os, pickle, argparse\n'), ((4726, 4748), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (4737, 4748), False, 'import sys, os, pickle, argparse\n'), ((8054, 8061), 'torch.multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (8058, 8061), False, 'from torch.multiprocessing import Pool\n'), ((3519, 3532), 'numpy.exp', 'np.exp', (['utils'], {}), '(utils)\n', (3525, 3532), True, 'import numpy as np\n'), ((3553, 3562), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (3559, 3562), True, 'import numpy as np\n'), ((5685, 5696), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5694, 5696), False, 'import sys, os, pickle, argparse\n'), ((9586, 9597), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9595, 9597), False, 'import sys, os, pickle, argparse\n'), ((5837, 5848), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5846, 5848), False, 'import sys, os, pickle, argparse\n')] |
from setuptools import setup, Extension
from setuptools import dist
dist.Distribution().fetch_build_eggs(['numpy>=1.18.2', 'cython>=0.29.16'])
import numpy as np
# To compile and install locally run "python setup.py build_ext --inplace"
# To install library to Python site-packages run "python setup.py build_ext install"
ext_modules = [
Extension(
'pycocotools._mask',
sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [np.get_include(), '../common'],
extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
)
]
setup(
name='pycocotools',
packages=['pycocotools'],
package_dir = {'pycocotools': 'pycocotools'},
install_requires=[
'setuptools>=18.0',
'cython>=0.27.3',
'matplotlib>=2.1.0'
],
version='2.0',
ext_modules= ext_modules
)
| [
"setuptools.dist.Distribution",
"setuptools.setup",
"numpy.get_include"
] | [((594, 821), 'setuptools.setup', 'setup', ([], {'name': '"""pycocotools"""', 'packages': "['pycocotools']", 'package_dir': "{'pycocotools': 'pycocotools'}", 'install_requires': "['setuptools>=18.0', 'cython>=0.27.3', 'matplotlib>=2.1.0']", 'version': '"""2.0"""', 'ext_modules': 'ext_modules'}), "(name='pycocotools', packages=['pycocotools'], package_dir={\n 'pycocotools': 'pycocotools'}, install_requires=['setuptools>=18.0',\n 'cython>=0.27.3', 'matplotlib>=2.1.0'], version='2.0', ext_modules=\n ext_modules)\n", (599, 821), False, 'from setuptools import setup, Extension\n'), ((69, 88), 'setuptools.dist.Distribution', 'dist.Distribution', ([], {}), '()\n', (86, 88), False, 'from setuptools import dist\n'), ((476, 492), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (490, 492), True, 'import numpy as np\n')] |
# from ripe.atlas.sagan import Result
from ripe.atlas.cousteau import Probe
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from collections import Counter
from sklearn.mixture import GaussianMixture
import urllib.request
import json
import pickle
import decimal
from OllivierRicci import ricciCurvature,compute_ricciFlow
sns.set_context("paper",rc={"xtick.labelsize":10,'figure.figsize':(250,250),"ytick.labelsize":10,"axes.labelsize":10
,"legend.labelsize":15})
from ripe.atlas.cousteau import (
Measurement
)
from matplotlib.patches import Ellipse
from numpy.linalg import norm
#c is the speed of the light
c = 299792458
#a collection of colors used in GMM
colors = dict(enumerate([ "red", "blue", "green", "yellow", "purple", "orange" ,"white", "black"]))
def float_range(start, stop, step):
l = [start]
while start < stop:
start = decimal.Decimal(start)
start += decimal.Decimal(step)
l.append(start)
return l
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""
draw an ellipse which tells us what is the area of influence of each centroids in the GMM
:param position:
:param covariance:
:param ax:
:param kwargs:
:return:
"""
"""Draw an ellipse with a given position and covariance"""
ax = ax or plt.gca()
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# Draw the Ellipse
for nsig in range(1, 4):
ax.add_patch(Ellipse(position, nsig * width, nsig * height,
angle, **kwargs))
def plot_gmm(gmm, X, label=True, ax=None):
ax = ax or plt.gca()
labels = gmm.fit(X).predict(X)
if label:
ax.scatter(X[:, 0], X[:, 1], c=labels, s=7, cmap='viridis', zorder=2)
else:
ax.scatter(X[:, 0], X[:, 1], s=7, zorder=2)
# ax.axis('equal')
w_factor = 0.2 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):
draw_ellipse(pos, covar, alpha=w * w_factor)
#id = 6278 #(sorbonne universite)
#id = 6231 #(boston university)
# id =6285 #Atlanta
# id = 6271 #Paris Afnic
# def map_Louis(id_meas):
# dico = {}
# with urllib.request.urlopen(
# "https://atlas.ripe.net/api/v2/measurements/%d/results/?format=txt" % id_meas) as my_results:
# for result in my_results.readlines():
# result = Result.get(result.decode("utf-8"))
# dico.update({result.probe_id: result.rtt_min})
# # print(json.load(open('/Users/loqman/Downloads/20190616.json')))
# # print(json.load(open('/Users/loqman/Downloads/20190521.json'))['objects'])
# probes = {d['id']: d for d in json.load(open('/Users/loqman/Downloads/20190521.json'))['objects']}
# df = pd.DataFrame(probes).transpose()
# all = []
# for t in dico.keys():
# value = df[df.index==t][['id','latitude','longitude']].values
# value = np.append(value,dico[t])
# # dg['latency'] = dico[t]
# print(value)
# all.append(value)
# dg = pd.DataFrame(all,index=dico.keys(),columns = ['id','latitude','longitude','latency'])
# dg.to_csv('losquinquihios.csv')
# return dg
def read_all(id,path_ripe,path_geo,type="min",internet= False,id_meas = None):
"""
this function translates the raw data into a readable dataframe
:param id: int which corresponds to the id of the probe we did the measure on
:param path_ripe: string the path to the ripe json data
:param path_geo: string to the geographic_distance matrix
:param type: categorical, it indicates which type of rtt we should take
:param internet: boolean, are we directly importing the data from RIPE website?
:param id_meas: the id associated to the measure
:return: pandas dataframe of two columns with latency and geographic distance
"""
dico = {}
hist = []
hist1 = []
hist2 = []
hist3 = []
if internet:
with urllib.request.urlopen(
"https://atlas.ripe.net/api/v2/measurements/%d/results/?format=txt" % id_meas) as my_results:
for result in my_results.readlines():
result = Result.get(result.decode("utf-8"))
if type == 'min':
dico.update({result.probe_id: result.rtt_min})
else:
with open(path_ripe) as my_results:
for result in my_results.readlines():
result = Result.get(result)
if type == 'min':
dico.update({result.probe_id:result.rtt_min})
hist.append(result.rtt_min)
hist1.append(result.rtt_max)
hist2.append(result.rtt_median)
hist3.append(result.rtt_max - result.rtt_min)
print(result.rtt_median,result.probe_id)
# print(dico)
geo_matrix = pd.read_pickle(path_geo).transpose()
limit = geo_matrix.loc[dico.keys()]
# print(Probe(id=6278).address_v4)
dlat = pd.Series(dico)
# print(dlat)
# print([dlat,limit[[Probe(id=6278).address_v4]]])
# df = pd.DataFrame(dlat.values,limit[['66.31.16.75']]).transpose(),index=limit.index, columns = ['latency','distance'])
df = pd.DataFrame()
id = id.split('-')[-1]
print(id)
#print(Probe(id=id).address_v4)
# print(limit[Probe(id=id).address_v4])
df['latency'] = dlat
try:
lim = limit[Probe(id=id).address_v4]
except:
return []
try:
lim.columns = [Probe(id=id).address_v4,'off']
except:
print('no worries')
# print(lim)
# print(Probe(id=id).address_v4)
try:
df['geographic'] = lim
except:
df['geographic'] = lim[Probe(id=id).address_v4]
# [Probe(id=id).address_v4]
# print(df.head())
print(df.shape)
df.dropna(inplace=True)
print(df.shape)
return df
def AS_analysis(labels,index,col =False):
"""
reads the probes id and their index in the dataframe and returns to which AS they are associated
:param labels:
:param index:
:param col:
:return:
"""
dic ={}
for (i, t) in enumerate(index):
dic[t] = labels[i]
dic_as = {}
for i in list(set(labels)):
l = []
for t in index:
if dic[t] == i:
l.append(Probe(id=t).asn_v4)
if col:
dic_as[colors[i]] = l
else:
dic_as[i] = l
return dic_as
def func(x,type):
"""
returns a regression associated to one of the three type
:param x: np.array constituting of the values
:param type: categorical : either linear, squared, root or cubical
:return: the associated function
"""
if type == "linear":
return x
elif type == 'squared':
return np.square(x)
elif type=='root':
return np.sqrt(x)
elif type =='cubical':
return x**(3)
def influence_plot(df):
"""
Quantifies the influences of each value on the linear regression (this allows us to observe outlier to a certain extent)
:param df: dataframe
:return: a plot
"""
import statsmodels.api as sm
x = np.array(df['geographic'].values)
y = np.array(df['latency'].values)
lm = sm.OLS(y, sm.add_constant(x)).fit()
plt.scatter(np.sort(x), y[np.argsort(x)])
plt.scatter(np.mean(x), np.mean(y), color="green")
plt.plot(np.sort(x), lm.predict()[np.argsort(x)], label="regression")
plt.title("Linear Regression plots with the regression line")
plt.legend()
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.influence_plot(lm, alpha=0.05, ax=ax, criterion="cooks")
plt.show()
def regression(df,clusters=True,type='linear'):
if clusters:
x = np.array(df.means_[:, 0])
y= np.array(df.means_[:, 1])
else:
x = np.array(df['geographic'].values)
y = np.array(df['latency'].values)
funco = func(x,type)
M = np.column_stack((funco,)) # construct design matrix
k, res, _, _ = np.linalg.lstsq(M, y,rcond=None)
plt.plot(x, y, '.')
x_bis = np.linspace(start=0,stop=max(x)+100)
plt.plot(x_bis, k * (func(x_bis,type)), 'r', linewidth=1)
y_bis = np.linspace(start=0,stop=max(y)+100)
print(y)
plt.plot(1/3*c*y_bis*10**(-6),y_bis,'y',linewidth = 1)
plt.legend(('measurement', 'fit','optimal'), loc=2)
plt.title('best fit: y = {:.8f}'.format(k[0]) + " for regression of type " +type)
plt.xlim(xmax=max(x)+100,xmin = -100)
plt.show()
plt.plot(x, y, '.','b')
plt.plot(x,k * (func(x,type)),'^','r')
plt.plot(x,y-k * (func(x,type)),'*','y')
plt.show()
return res
def distance(df):
#p1 = np.array((0,0))
# p2 = np.array((1,1))
#p2 = np.array((1/3*c*10**(-6),1))
print(df)
print(np.array(df.values).shape)
second = max(np.array(df.values)[:, 1])
a = 1 / 3 * c * 10 ** (-6) / second
dict = {}
print(np.array(df.values).shape, df.index.shape)
first = max(np.array(df.values)[:, 0])
for ((coord1, coord2), i) in zip(np.array(df.values), df.index):
p3 = (coord1, coord2)
# d = norm(np.cross(p2 - p1, p1 - p3)) / norm(p2 - p1)
# d_bis= norm(np.cross(p2 - p1, p1 - p3)) / norm(p2 - p1)
d = np.absolute(p3[0] / first - a * p3[1] / second) / np.sqrt(1 + a ** 2)
# print(d,d_bis)
dict[i] = d
# print(d,d_bis)
dict[i] = d
print(np.array(df.values)[:,0]/first,np.array(df.values)[:,1]/second)
# plt.plot(np.array(df.values)[:,1]/second,np.array(df.values)[:,0]/first, '.')
# plt.show()
sorted_dict = sorted(dict.items(), key=lambda kv: kv[1])
return sorted_dict
import networkx as nx
def graph_inference(values,df,data):
new_data = {}
for t in data.keys():
for s in data[t]:
new_data[s] = t
dist = distance(df)
dist_bis = list(zip(*dist))
print(dist_bis[0])
G = nx.Graph()
G.add_nodes_from(list(dist_bis[0]))
t_0 = dist_bis[0][0]
for (t,s) in dist:
print(t,s)
if s <= values:
if t!=t_0 :
G.add_edge(t_0,t)
nx.set_node_attributes(G, new_data, 'city')
return G
def combining_graph(graphs):
# id_of_interest = path_to_graphs.split('_')[1]
G_all = nx.Graph()
for i,G in enumerate(graphs):
print(i)
if i == 0:
G_all.add_nodes_from(G.nodes())
city = nx.get_node_attributes(G, 'city')
print(city)
# nx.set_node_attributes(G_all,city,'city')
G_all.add_edges_from(G.edges())
# nx.write_graphml(G_all,"/Users/loqman/Downloads/hi.graphml")
return G_all
def pipeline_ricci(path_to_data,list_of_ids,geo_matrix_path,values,internet=False):
with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
data = json.load(json_file)
graphs = []
if internet:
for (s,t) in zip(path_to_data,list_of_ids):
df = read_all(path_ripe=path_to_data,id=t,path_geo=geo_matrix_path,internet=True,id_meas=s[0])
print(len(df))
if len(df) == 0:
continue
graphs.append(graph_inference(values,df,data))
else:
for (s,t) in zip(path_to_data,list_of_ids):
df = read_all(t,s,geo_matrix_path)
graphs.append(graph_inference(values,df,data))
G = combining_graph(graphs)
return G
def gmm_visual(df,n):
elem = df[['geographic','latency']].values
elem = [list(e) for e in elem]
print(elem)
gmm = GaussianMixture(n_components=n,covariance_type='full',random_state=1).fit(elem)
labels = gmm.predict(elem)
dic = {}
for (i,t) in enumerate(df[['geographic','latency']].index):
dic[t] = labels[i]
print(Counter(labels))
print(dic)
# dist_bis = list(zip(*distance(df)))
for t in ["root","squared","linear","cubical"]:
print(regression(gmm,True,t))
probs = gmm.predict_proba(elem)
print(probs)
# print(gmm.means_)
plt.scatter(gmm.means_[:,0], gmm.means_[:, 1],c= [ "red", "blue", "green", "yellow", "purple", "orange" ,"white", "black"][:n], s=40, cmap='viridis')
plt.show()
influence_plot(df)
# with open('interest_probes.json') as json_file:
# data = json.load(json_file)
# ripe_path = ['/Users/loqman/PycharmProjects/privacy-preserving/RIPE-Atlas-measurement-parisarpnic.json','/Users/loqman/PycharmProjects/privacy-preserving/RIPE-Atlas-measurement-21715861.json']
# measurement = Measurement(id='21715861')
# print(measurement.meta_data)
# ids = [6231,6271]
# # id =6285 #Atlanta
# id = 6271 #Paris Afnic
# print(dir(measurement))
# pipeline_ricci(ripe_path,list_of_ids=ids,geo_matrix_path='/Users/loqman/PycharmProjects/privacy-preserving/geo_matrixBostonAtlantaChicagoParisLondon.pickle')
def full_pipeline(measurements,probes,matrix_geo,name,val):
with open(measurements, 'rb') as fp:
list_of_measurements = pickle.load(fp)
with open(probes, 'rb') as fp:
list_of_ids = pickle.load(fp)
print(len(list_of_measurements), len(list_of_ids))
G = pipeline_ricci(list_of_measurements, list_of_ids=list_of_ids.keys(),
geo_matrix_path=matrix_geo,
values=val, internet=True)
print(len(G.nodes()))
with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
data = json.load(json_file)
city = {}
for t in data.keys():
for s in data[t]:
print(t)
city[s] = t
nx.set_node_attributes(G, city, 'city')
# nx.write_graphml(G,"/Users/loqman/Downloads/combinaison_probes.graphml")
G = ricciCurvature(G)
ricci = nx.get_node_attributes(G, 'ricciCurvature')
abs_ricci = {}
for t in ricci.keys():
abs_ricci[t] = abs(ricci[t])
nx.set_node_attributes(G, abs_ricci, 'abs_ricci')
# G = compute_ricciFlow(G)
# # nx.write_graphml(G,)
nx.write_graphml(G, "/Users/loqman/Downloads/graph/"+name+str(val)+".graphml")
if __name__ == "__main__":
# with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
# data = json.load(json_file)
# with open('list_of_measurements_bis', 'rb') as fp:
# list_of_measurements = pickle.load(fp)
# with open('list_of_ids', 'rb') as fp:
# list_of_ids = pickle.load(fp)
# for (s, t) in zip(list_of_measurements,list_of_ids.keys()):
# map_Louis(s[0])
# break
# with open('metainfo_aug.pickle', 'rb') as fp:
# list_of_ids = pickle.load(fp)
with open('metainfo_cloudincluded_all.pickle', 'rb') as fp:
list_of_ids = pickle.load(fp)
print(list_of_ids)
# for s in list_of_ids[0].keys():
# if list_of_ids[0][s] == 'Utah':
# print(s)
# #
# for val in list(float_range(0.5, 0.7, '0.01')):
# val = float(val)
# full_pipeline('list_of_measurements_bis','list_of_ids_bis','/Users/loqman/PycharmProjects/privacy-preserving/geo_matrix_90sec.pickle','probes_06',val)
# with open('list_of_measurements', 'rb') as fp:
# list_of_measurements = pickle.load(fp)
# with open('list_of_ids', 'rb') as fp:
# list_of_ids = pickle.load(fp)
# print(len(list_of_measurements),len(list_of_ids))
# G = pipeline_ricci(list_of_measurements,list_of_ids = list_of_ids.keys(),geo_matrix_path='/Users/loqman/PycharmProjects/privacy-preserving/geo_matrix_90.pickle',values=0.5,internet=True)
# print(len(G.nodes()))
# with open('interest_probesBostonAtlantaChicagoParisMarseille.json') as json_file:
# data = json.load(json_file)
# city = {}
# for t in data.keys():
# for s in data[t]:
# print(t)
# city[s]=t
# nx.set_node_attributes(G,city,'city')
# # nx.write_graphml(G,"/Users/loqman/Downloads/combinaison_probes.graphml")
# G = ricciCurvature(G)
# ricci = nx.get_node_attributes(G, 'ricciCurvature')
# abs_ricci = {}
# for t in ricci.keys():
# abs_ricci[t] = abs(ricci[t])
# nx.set_node_attributes(G,abs_ricci,'abs_ricci')
# # G = compute_ricciFlow(G)
# # # nx.write_graphml(G,)
# nx.write_graphml(G,"/Users/loqman/Downloads/combinaison_probes_90-0.5ricci.graphml")
# for (s,t) in zip(list_of_ids.keys(),list_of_measurements):
# print(t[0])
# df = read_all(s,"",'/Users/loqman/PycharmProjects/privacy-preserving/geo_matrixBostonAtlantaChicagoParisLondon.pickle',type="min",internet= True,id_meas = t[0])
# # df = read_all(id,'/Users/loqman/PycharmProjects/privacy-preserving/RIPE-Atlas-measurement-parisarpnic.json','/Users/loqman/PycharmProjects/privacy-preserving/geo_matrixBostonAtlantaChicagoParisLondon.pickle')
# name_ordered = dist_bis[0]
# value_ordered =dist_bis[1]
# new_data = {}
# for t in data.keys():
# for s in data[t]:
# new_data[s] = t
# for (t,s) in zip(name_ordered,value_ordered):
# print(new_data[t],s)
# with open('new_data.json', 'w') as outfile:
# json.dump(new_data, outfile)
# df.to_pickle('data.pickle')
# G = graph_inference(df,data)
# nx.write_graphml(G,"/Users/loqman/Downloads/graph_try_min.graphml")
# import json
# options = {
# 'node_color': 'red',
# 'node_size': 1,
# 'line_color': 'blue',
# 'linewidths': 1,
# 'width': 0.1,
# }
# # nx.draw(G, **options)
# plt.show()
# plt.plot()
# print(distance(df))
# for t in ["root","squared","linear","cubical"]:
# print(regression(gmm,True,t))
# regression(gmm)
# l =[]
# l_bis = []
# l_third = []
# for t in dic.keys():
# if dic[t] == 3:
# l.append(t)
# elif dic[t] == 1:
# l_bis.append(t)
# elif dic[t] == 5:
# l_third.append(t)
# for n in set(l_third):
# print('Cluster orange', Probe(id=n).asn_v4)
# for n in set(l):
# print('Premier cluster:', Probe(id=n).asn_v4)
# for n in set(l_bis):
# print('Second cluster: ',Probe(id=n).asn_v4)
# probs = gmm.predict_proba(elem)
# print(probs)
# print(gmm.means_)
# plt.scatter(gmm.means_[:,0], gmm.means_[:, 1],c= [ "red", "blue", "green", "yellow", "purple", "orange" ,"white", "black"][:n], s=40, cmap='viridis')
# plt.show()
# print(kmeans.cluster_centers_)
# range_n_clusters = list(range(2,10))
# from sklearn.metrics import silhouette_score
# for n_clusters in range_n_clusters:
# Gaussian = GaussianMixture(n_components=n_clusters covariance_type='full').fit(elem)
# cluster_labels = clusterer.fit_predict(elem)
# # The silhouette_score gives the average value for all the samples.
# # This gives a perspective into the density and separation of the formed
# # clusters
# silhouette_avg = silhouette_score(elem, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# plt.scatter(df['geographic'], df['latency'])
# plot_gmm(gmm, elem)
# plt.show()
# influence_plot(df)
# print(AS_analysis(labels,df.index,True))
| [
"numpy.sqrt",
"numpy.column_stack",
"numpy.argsort",
"numpy.array",
"numpy.arctan2",
"pandas.read_pickle",
"numpy.mean",
"numpy.sort",
"matplotlib.pyplot.plot",
"OllivierRicci.ricciCurvature",
"numpy.linalg.lstsq",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"ripe.atlas.cousteau.Probe... | [((368, 523), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'rc': "{'xtick.labelsize': 10, 'figure.figsize': (250, 250), 'ytick.labelsize': 10,\n 'axes.labelsize': 10, 'legend.labelsize': 15}"}), "('paper', rc={'xtick.labelsize': 10, 'figure.figsize': (250,\n 250), 'ytick.labelsize': 10, 'axes.labelsize': 10, 'legend.labelsize': 15})\n", (383, 523), True, 'import seaborn as sns\n'), ((5252, 5267), 'pandas.Series', 'pd.Series', (['dico'], {}), '(dico)\n', (5261, 5267), True, 'import pandas as pd\n'), ((5475, 5489), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5487, 5489), True, 'import pandas as pd\n'), ((7393, 7426), 'numpy.array', 'np.array', (["df['geographic'].values"], {}), "(df['geographic'].values)\n", (7401, 7426), True, 'import numpy as np\n'), ((7435, 7465), 'numpy.array', 'np.array', (["df['latency'].values"], {}), "(df['latency'].values)\n", (7443, 7465), True, 'import numpy as np\n'), ((7691, 7752), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear Regression plots with the regression line"""'], {}), "('Linear Regression plots with the regression line')\n", (7700, 7752), True, 'import matplotlib.pyplot as plt\n'), ((7757, 7769), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7767, 7769), True, 'import matplotlib.pyplot as plt\n'), ((7785, 7814), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (7797, 7814), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7893), 'statsmodels.api.graphics.influence_plot', 'sm.graphics.influence_plot', (['lm'], {'alpha': '(0.05)', 'ax': 'ax', 'criterion': '"""cooks"""'}), "(lm, alpha=0.05, ax=ax, criterion='cooks')\n", (7851, 7893), True, 'import statsmodels.api as sm\n'), ((7898, 7908), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7906, 7908), True, 'import matplotlib.pyplot as plt\n'), ((8184, 8209), 'numpy.column_stack', 'np.column_stack', (['(funco,)'], {}), '((funco,))\n', (8199, 8209), True, 'import numpy as np\n'), ((8256, 8289), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'y'], {'rcond': 'None'}), '(M, y, rcond=None)\n', (8271, 8289), True, 'import numpy as np\n'), ((8293, 8312), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (8301, 8312), True, 'import matplotlib.pyplot as plt\n'), ((8490, 8553), 'matplotlib.pyplot.plot', 'plt.plot', (['(1 / 3 * c * y_bis * 10 ** -6)', 'y_bis', '"""y"""'], {'linewidth': '(1)'}), "(1 / 3 * c * y_bis * 10 ** -6, y_bis, 'y', linewidth=1)\n", (8498, 8553), True, 'import matplotlib.pyplot as plt\n'), ((8549, 8601), 'matplotlib.pyplot.legend', 'plt.legend', (["('measurement', 'fit', 'optimal')"], {'loc': '(2)'}), "(('measurement', 'fit', 'optimal'), loc=2)\n", (8559, 8601), True, 'import matplotlib.pyplot as plt\n'), ((8733, 8743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8741, 8743), True, 'import matplotlib.pyplot as plt\n'), ((8748, 8772), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""', '"""b"""'], {}), "(x, y, '.', 'b')\n", (8756, 8772), True, 'import matplotlib.pyplot as plt\n'), ((8864, 8874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8872, 8874), True, 'import matplotlib.pyplot as plt\n'), ((10152, 10162), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10160, 10162), True, 'import networkx as nx\n'), ((10356, 10399), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', 'new_data', '"""city"""'], {}), "(G, new_data, 'city')\n", (10378, 10399), True, 'import networkx as nx\n'), ((10507, 10517), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10515, 10517), True, 'import networkx as nx\n'), ((12251, 12404), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gmm.means_[:, 0]', 'gmm.means_[:, 1]'], {'c': "['red', 'blue', 'green', 'yellow', 'purple', 'orange', 'white', 'black'][:n]", 's': '(40)', 'cmap': '"""viridis"""'}), "(gmm.means_[:, 0], gmm.means_[:, 1], c=['red', 'blue', 'green',\n 'yellow', 'purple', 'orange', 'white', 'black'][:n], s=40, cmap='viridis')\n", (12262, 12404), True, 'import matplotlib.pyplot as plt\n'), ((12405, 12415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12413, 12415), True, 'import matplotlib.pyplot as plt\n'), ((13762, 13801), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', 'city', '"""city"""'], {}), "(G, city, 'city')\n", (13784, 13801), True, 'import networkx as nx\n'), ((13889, 13906), 'OllivierRicci.ricciCurvature', 'ricciCurvature', (['G'], {}), '(G)\n', (13903, 13906), False, 'from OllivierRicci import ricciCurvature, compute_ricciFlow\n'), ((13919, 13962), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""ricciCurvature"""'], {}), "(G, 'ricciCurvature')\n", (13941, 13962), True, 'import networkx as nx\n'), ((14050, 14099), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', 'abs_ricci', '"""abs_ricci"""'], {}), "(G, abs_ricci, 'abs_ricci')\n", (14072, 14099), True, 'import networkx as nx\n'), ((934, 956), 'decimal.Decimal', 'decimal.Decimal', (['start'], {}), '(start)\n', (949, 956), False, 'import decimal\n'), ((974, 995), 'decimal.Decimal', 'decimal.Decimal', (['step'], {}), '(step)\n', (989, 995), False, 'import decimal\n'), ((1372, 1381), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1379, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1480, 1505), 'numpy.linalg.svd', 'np.linalg.svd', (['covariance'], {}), '(covariance)\n', (1493, 1505), True, 'import numpy as np\n'), ((1906, 1915), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1913, 1915), True, 'import matplotlib.pyplot as plt\n'), ((7528, 7538), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (7535, 7538), True, 'import numpy as np\n'), ((7574, 7584), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (7581, 7584), True, 'import numpy as np\n'), ((7586, 7596), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (7593, 7596), True, 'import numpy as np\n'), ((7626, 7636), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (7633, 7636), True, 'import numpy as np\n'), ((7989, 8014), 'numpy.array', 'np.array', (['df.means_[:, 0]'], {}), '(df.means_[:, 0])\n', (7997, 8014), True, 'import numpy as np\n'), ((8026, 8051), 'numpy.array', 'np.array', (['df.means_[:, 1]'], {}), '(df.means_[:, 1])\n', (8034, 8051), True, 'import numpy as np\n'), ((8074, 8107), 'numpy.array', 'np.array', (["df['geographic'].values"], {}), "(df['geographic'].values)\n", (8082, 8107), True, 'import numpy as np\n'), ((8120, 8150), 'numpy.array', 'np.array', (["df['latency'].values"], {}), "(df['latency'].values)\n", (8128, 8150), True, 'import numpy as np\n'), ((9283, 9302), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9291, 9302), True, 'import numpy as np\n'), ((11075, 11095), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (11084, 11095), False, 'import json\n'), ((11998, 12013), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (12005, 12013), False, 'from collections import Counter\n'), ((13177, 13192), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (13188, 13192), False, 'import pickle\n'), ((13250, 13265), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (13261, 13265), False, 'import pickle\n'), ((13626, 13646), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (13635, 13646), False, 'import json\n'), ((14889, 14904), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (14900, 14904), False, 'import pickle\n'), ((1533, 1561), 'numpy.arctan2', 'np.arctan2', (['U[1, 0]', 'U[0, 0]'], {}), '(U[1, 0], U[0, 0])\n', (1543, 1561), True, 'import numpy as np\n'), ((1591, 1601), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (1598, 1601), True, 'import numpy as np\n'), ((1658, 1677), 'numpy.sqrt', 'np.sqrt', (['covariance'], {}), '(covariance)\n', (1665, 1677), True, 'import numpy as np\n'), ((1752, 1815), 'matplotlib.patches.Ellipse', 'Ellipse', (['position', '(nsig * width)', '(nsig * height)', 'angle'], {}), '(position, nsig * width, nsig * height, angle, **kwargs)\n', (1759, 1815), False, 'from matplotlib.patches import Ellipse\n'), ((5125, 5149), 'pandas.read_pickle', 'pd.read_pickle', (['path_geo'], {}), '(path_geo)\n', (5139, 5149), True, 'import pandas as pd\n'), ((7030, 7042), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (7039, 7042), True, 'import numpy as np\n'), ((7542, 7555), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (7552, 7555), True, 'import numpy as np\n'), ((7651, 7664), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (7661, 7664), True, 'import numpy as np\n'), ((9025, 9044), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9033, 9044), True, 'import numpy as np\n'), ((9069, 9088), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9077, 9088), True, 'import numpy as np\n'), ((9160, 9179), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9168, 9179), True, 'import numpy as np\n'), ((9219, 9238), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9227, 9238), True, 'import numpy as np\n'), ((9486, 9533), 'numpy.absolute', 'np.absolute', (['(p3[0] / first - a * p3[1] / second)'], {}), '(p3[0] / first - a * p3[1] / second)\n', (9497, 9533), True, 'import numpy as np\n'), ((9536, 9555), 'numpy.sqrt', 'np.sqrt', (['(1 + a ** 2)'], {}), '(1 + a ** 2)\n', (9543, 9555), True, 'import numpy as np\n'), ((10651, 10684), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""city"""'], {}), "(G, 'city')\n", (10673, 10684), True, 'import networkx as nx\n'), ((11773, 11844), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n', 'covariance_type': '"""full"""', 'random_state': '(1)'}), "(n_components=n, covariance_type='full', random_state=1)\n", (11788, 11844), False, 'from sklearn.mixture import GaussianMixture\n'), ((5665, 5677), 'ripe.atlas.cousteau.Probe', 'Probe', ([], {'id': 'id'}), '(id=id)\n', (5670, 5677), False, 'from ripe.atlas.cousteau import Probe\n'), ((5752, 5764), 'ripe.atlas.cousteau.Probe', 'Probe', ([], {'id': 'id'}), '(id=id)\n', (5757, 5764), False, 'from ripe.atlas.cousteau import Probe\n'), ((7081, 7091), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (7088, 7091), True, 'import numpy as np\n'), ((7485, 7503), 'statsmodels.api.add_constant', 'sm.add_constant', (['x'], {}), '(x)\n', (7500, 7503), True, 'import statsmodels.api as sm\n'), ((9656, 9675), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9664, 9675), True, 'import numpy as np\n'), ((9687, 9706), 'numpy.array', 'np.array', (['df.values'], {}), '(df.values)\n', (9695, 9706), True, 'import numpy as np\n'), ((5960, 5972), 'ripe.atlas.cousteau.Probe', 'Probe', ([], {'id': 'id'}), '(id=id)\n', (5965, 5972), False, 'from ripe.atlas.cousteau import Probe\n'), ((6563, 6574), 'ripe.atlas.cousteau.Probe', 'Probe', ([], {'id': 't'}), '(id=t)\n', (6568, 6574), False, 'from ripe.atlas.cousteau import Probe\n')] |
import config as cfg
import numpy as np
import pandas as pd
import warnings
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, multilabel_confusion_matrix
def print_conf_mat(true_labels, preds):
cm = confusion_matrix(true_labels, preds)
I = pd.Index(['True Negative', 'True Positive'], name="rows")
C = pd.Index(['Predicted Negative', 'Predicted Positive'], name="columns")
cm_df = pd.DataFrame(data=cm, index=I, columns=C)
print("Confusion Matrix \n", cm_df)
def print_multilabel_report(true_labels, preds, filehandler=None, classes=None):
warnings.filterwarnings("ignore")
I = pd.Index(['True Negative', 'True Positive'], name="rows")
C = pd.Index(['Predicted Negative', 'Predicted Positive'], name="columns")
ml_cm = multilabel_confusion_matrix(true_labels, preds)
tot_acc = 0.0
tot_prec = 0.0
tot_rec = 0.0
tot_misclass = 0.0
tot_f1 = 0.0
for i, cm in enumerate(ml_cm):
cm_df = pd.DataFrame(data=cm, index=I, columns=C)
# Produce the mtrics from confusion matrix
tp = cm[1,1]
tn = cm[0,0]
fn = cm[1,0]
fp = cm[0,1]
accuracy = (tp+tn)/np.sum(cm)
misclass_rate = 1 - accuracy
recall = tp / (tp + fn)
precision = tp / (tp + fp)
f1 = 2*(recall * precision) / (recall + precision)
if filehandler:
print("Confusion Matrix: {}".format(cfg.TARGETS[classes[i]]), file=filehandler)
print(cm_df, file=filehandler)
print("Accuracy: ", accuracy,
"Misclassification Rate: ", misclass_rate,
"Recall: ", recall,
"Precision: ", precision,
"F1: ", f1,
file=filehandler)
else:
if classes:
print("Confusion Matrix: {}".format(classes[i]))
else:
print("Confusion Matrix: {}".format(cfg.TARGETS[classes[i]]))
print(cm_df)
print("Accuracy: ", accuracy,
"Misclassification Rate: ", misclass_rate,
"Recall: ", recall,
"Precision: ", precision)
tot_acc += accuracy
tot_prec += precision
tot_rec += recall
tot_misclass += misclass_rate
tot_f1 += f1
i += 1
return tot_acc/i, tot_prec/i, tot_rec/i, tot_misclass/i, tot_f1/i | [
"pandas.Index",
"numpy.sum",
"pandas.DataFrame",
"sklearn.metrics.multilabel_confusion_matrix",
"warnings.filterwarnings",
"sklearn.metrics.confusion_matrix"
] | [((242, 278), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_labels', 'preds'], {}), '(true_labels, preds)\n', (258, 278), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, multilabel_confusion_matrix\n'), ((287, 344), 'pandas.Index', 'pd.Index', (["['True Negative', 'True Positive']"], {'name': '"""rows"""'}), "(['True Negative', 'True Positive'], name='rows')\n", (295, 344), True, 'import pandas as pd\n'), ((353, 423), 'pandas.Index', 'pd.Index', (["['Predicted Negative', 'Predicted Positive']"], {'name': '"""columns"""'}), "(['Predicted Negative', 'Predicted Positive'], name='columns')\n", (361, 423), True, 'import pandas as pd\n'), ((436, 477), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'cm', 'index': 'I', 'columns': 'C'}), '(data=cm, index=I, columns=C)\n', (448, 477), True, 'import pandas as pd\n'), ((605, 638), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (628, 638), False, 'import warnings\n'), ((647, 704), 'pandas.Index', 'pd.Index', (["['True Negative', 'True Positive']"], {'name': '"""rows"""'}), "(['True Negative', 'True Positive'], name='rows')\n", (655, 704), True, 'import pandas as pd\n'), ((713, 783), 'pandas.Index', 'pd.Index', (["['Predicted Negative', 'Predicted Positive']"], {'name': '"""columns"""'}), "(['Predicted Negative', 'Predicted Positive'], name='columns')\n", (721, 783), True, 'import pandas as pd\n'), ((796, 843), 'sklearn.metrics.multilabel_confusion_matrix', 'multilabel_confusion_matrix', (['true_labels', 'preds'], {}), '(true_labels, preds)\n', (823, 843), False, 'from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, multilabel_confusion_matrix\n'), ((993, 1034), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'cm', 'index': 'I', 'columns': 'C'}), '(data=cm, index=I, columns=C)\n', (1005, 1034), True, 'import pandas as pd\n'), ((1199, 1209), 'numpy.sum', 'np.sum', (['cm'], {}), '(cm)\n', (1205, 1209), True, 'import numpy as np\n')] |
import itertools
import numpy as np
import torch
from torchvision.utils import make_grid
import plac
from pathlib import Path
from .data.loader import encode_batch_of_pairs, load_processed_train_batch
from .utils.plot import plt, cm, sns
from .utils.loading import load_model_skeleton
CMAPS = {
"velocity": cm.viridis,
"angle": cm.twilight,
"position": cm.viridis,
"default": cm.viridis,
}
@torch.no_grad()
def visualize_embeddings(val_data, vae, env_info, summary_writer, epoch):
# Visualize latents, colored by states and actions
sns.axes_style("whitegrid", {"axes.edgecolor": "1."})
latent_size = vae.latent_size
data = val_data.get_pairs_as_batch(max_pairs=1000)
latents = encode_batch_of_pairs(data, vae).current.mean
observations = data["observation"].current
latents, observations = map(
lambda x: x if not isinstance(x, torch.Tensor) else x.cpu().detach().numpy(),
(latents, observations),
)
states = env_info.obs_to_state_fcn(observations)
state_names = env_info.state_names
state_types = env_info.state_types
if latent_size == 2:
latent_slices = np.array([[0, 1]])
else:
latent_slices = np.array(list(itertools.combinations(range(latent_size), 3)))
ncols = len(state_names)
nrows = len(latent_slices)
fig, axes = plt.subplots(
nrows,
ncols,
subplot_kw=None if latent_size <= 2 else dict(projection="3d"),
figsize=(4 * ncols, 4 * nrows),
squeeze=False,
)
for col_idx, (state_name, state_type) in enumerate(zip(state_names, state_types)):
for row_idx, slice_idxs in enumerate(latent_slices):
ax = axes[row_idx, col_idx]
cmap = CMAPS[state_type]
plt.set_cmap(cmap)
if len(slice_idxs) <= 2:
_plot = ax.scatter(
latents[:, slice_idxs[0]],
latents[:, slice_idxs[1]],
c=states[:, col_idx],
)
else:
_plot = ax.scatter(
latents[:, slice_idxs[0]],
latents[:, slice_idxs[1]],
latents[:, slice_idxs[2]],
c=states[:, col_idx],
)
cb = fig.colorbar(_plot, ax=ax)
cb.set_label(state_name)
fig.suptitle(f"X visualization: Epoch {epoch}")
fig.tight_layout()
if summary_writer:
summary_writer.add_figure(f"embeddings", fig, epoch)
if summary_writer:
plt.close("all")
else:
return fig
@torch.no_grad()
def visualize_reconstructions(
val_data, vae, val_reconstruction_n_rollouts, summary_writer, epoch
):
data_batch = val_data.get_chunks_as_batch(max_chunks=val_reconstruction_n_rollouts)
rendering = data_batch["rendering"]
latents = vae.encode_sequence(rendering)
latent_samples = latents.sample
reconstructions = vae.decode_sequence(latent_samples)
# number of reconstructions may be smaller than number of observations,
# if we have a warmup period
n_reconstructions = reconstructions.shape[0]
# compare reconstructions to last 'n_reconstructions' observations
differences = rendering[-n_reconstructions:] - reconstructions
# pad reconstructions and differences with white frames at the beginning
padding = torch.ones(
rendering.shape[0] - n_reconstructions,
*reconstructions.shape[1:],
device=reconstructions.device,
)
reconstructions = torch.cat([padding, reconstructions])
differences = torch.cat([padding, differences])
tb_grid = grid_from_batched_timeseries(
[rendering, reconstructions, differences], normalize=False
)
summary_writer.add_image("reconstructions/reconstructions", tb_grid, epoch)
def grid_from_batched_timeseries(tensor_or_list, **kwargs):
# make grid from batched timeseries tensor (T x B x <image_dims>)
if type(tensor_or_list) in [list, tuple]:
n_cols = tensor_or_list[0].shape[0]
strip_n_rows = tensor_or_list[0].shape[1]
assert all([t.shape[1] == strip_n_rows for t in tensor_or_list])
assert all([t.shape[0] == n_cols for t in tensor_or_list])
padding_row = torch.ones_like(tensor_or_list[0])
tensor_or_list.append(padding_row)
interleaved_tensor = torch.cat(
[
torch.stack(
[tensor_or_list[k][:, n] for k in range(len(tensor_or_list))]
)
for n in range(strip_n_rows)
]
)
images = interleaved_tensor.reshape(-1, 3, 64, 64)
else:
n_cols = tensor_or_list.shape[0]
images = tensor_or_list.transpose(0, 1).reshape(-1, 3, 64, 64)
# nrow parameter is number of images per row -> n_cols
grid = make_grid(images, nrow=n_cols, **kwargs)
return grid
@torch.no_grad()
def visualize_reward(val_data, vae, reward_model, env_info, summary_writer, epoch):
pair_data = val_data.get_pairs_as_batch(max_pairs=1000)
latents = encode_batch_of_pairs(pair_data, vae).current.mean.detach()
next_obs = pair_data["observation"].next.cpu()
actions = pair_data["action"]
real_rewards = pair_data["reward"]
real_rewards = real_rewards.flatten().cpu()
predicted_rewards = reward_model(latents, actions).detach().cpu()
predicted_rewards = predicted_rewards.flatten()
fig, axes = _plot_states_rewards(
next_obs, real_rewards, predicted_rewards, env_info
)
if summary_writer:
summary_writer.add_figure(f"rewards", fig, epoch)
if summary_writer:
plt.close("all")
else:
return fig
def _set_labels(ax, *state_names):
ax.set_xlabel(state_names[0])
ax.set_ylabel(state_names[1])
if hasattr(ax, "zaxis"):
ax.zaxis.set_rotate_label(False)
ax.set_zlabel(state_names[2])
def _plot_states_rewards(observations, real_rewards, predicted_rewards, env_info):
observations = np.array(observations)
real_rewards = np.array(real_rewards)
predicted_rewards = np.array(predicted_rewards)
reward_diff = np.log(np.abs(real_rewards - predicted_rewards)).clip(-4, 100)
states = env_info.obs_to_state_fcn(observations)
state_names = env_info.state_names
state_size = states.shape[-1]
sns.axes_style("whitegrid")
if state_size == 2:
slices = np.array([[0, 1]])
else:
slices = np.array(list(itertools.combinations(range(state_size), 3)))
ncols = 3
nrows = len(slices)
fig, axes = plt.subplots(
nrows,
ncols,
subplot_kw=None if state_size <= 2 else dict(projection="3d"),
figsize=(4 * ncols, 4 * nrows),
squeeze=False,
)
for row_idx, slice_idxs in enumerate(slices):
if state_size == 2:
_states_args = states[:, 0], states[:, 1]
_slice_state_names = state_names[0], state_names[1]
else:
_states_args = (
states[:, slice_idxs[0]],
states[:, slice_idxs[1]],
states[:, slice_idxs[2]],
)
_slice_state_names = [state_names[k] for k in slice_idxs]
ax = axes[row_idx, 0]
_plot = ax.scatter(*_states_args, c=real_rewards.flatten(), s=5)
cb = fig.colorbar(_plot, ax=ax, pad=0.1, shrink=0.8)
ax.set_title("True Rewards")
cb.set_label("True Rewards")
_set_labels(ax, *_slice_state_names)
ax.dist = 13
ax = axes[row_idx, 1]
_plot = ax.scatter(*_states_args, c=predicted_rewards.flatten(), s=5)
cb = fig.colorbar(_plot, ax=ax, pad=0.1, shrink=0.8)
ax.set_title("Learned Reward Model")
cb.set_label("Learned Rewards")
_set_labels(ax, *_slice_state_names)
ax.dist = 13
ax = axes[row_idx, 2]
_plot = ax.scatter(*_states_args, c=reward_diff.flatten(), s=5)
cb = fig.colorbar(_plot, ax=ax, pad=0.1, shrink=0.8)
ax.set_title("Difference")
cb.set_label("Reward Differences (log-scale)")
ticks = cb.get_ticks()
cb.set_ticks(ticks)
cb.set_ticklabels([f"1e({int(t)})" for t in ticks])
_set_labels(ax, *_slice_state_names)
ax.dist = 13
plt.tight_layout()
plt.set_cmap(cm.viridis)
return fig, axes
@plac.annotations(model_path=plac.Annotation(kind="positional", type=str))
def main(model_path,):
""" Write latent states to *.npz file """
config, model = load_model_skeleton(model_path)
model.load_state_dict(torch.load(model_path))
model.eval()
latent, action, reward, observation = load_processed_train_batch(
config, model.vae, max_pairs=1000, with_observation=True
)
# save latents to run directory
latent = latent.current.mean.cpu().numpy()
observation = observation.current.cpu().numpy()
model_path_obj = Path(model_path)
npz_file = model_path_obj.parent.parent.joinpath(
f"{model_path_obj.stem}_latents.npz"
)
np.savez(npz_file, {"latent": latent, "observation": observation})
print(f"Latent states saved at {npz_file}")
if __name__ == "__main__":
with torch.no_grad():
plac.call(main)
| [
"numpy.abs",
"numpy.savez",
"torch.ones_like",
"pathlib.Path",
"torch.load",
"plac.Annotation",
"plac.call",
"numpy.array",
"torch.no_grad",
"torchvision.utils.make_grid",
"torch.cat",
"torch.ones"
] | [((413, 428), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (426, 428), False, 'import torch\n'), ((2592, 2607), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2605, 2607), False, 'import torch\n'), ((4893, 4908), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4906, 4908), False, 'import torch\n'), ((3368, 3482), 'torch.ones', 'torch.ones', (['(rendering.shape[0] - n_reconstructions)', '*reconstructions.shape[1:]'], {'device': 'reconstructions.device'}), '(rendering.shape[0] - n_reconstructions, *reconstructions.shape[1\n :], device=reconstructions.device)\n', (3378, 3482), False, 'import torch\n'), ((3531, 3568), 'torch.cat', 'torch.cat', (['[padding, reconstructions]'], {}), '([padding, reconstructions])\n', (3540, 3568), False, 'import torch\n'), ((3587, 3620), 'torch.cat', 'torch.cat', (['[padding, differences]'], {}), '([padding, differences])\n', (3596, 3620), False, 'import torch\n'), ((4833, 4873), 'torchvision.utils.make_grid', 'make_grid', (['images'], {'nrow': 'n_cols'}), '(images, nrow=n_cols, **kwargs)\n', (4842, 4873), False, 'from torchvision.utils import make_grid\n'), ((6000, 6022), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (6008, 6022), True, 'import numpy as np\n'), ((6042, 6064), 'numpy.array', 'np.array', (['real_rewards'], {}), '(real_rewards)\n', (6050, 6064), True, 'import numpy as np\n'), ((6089, 6116), 'numpy.array', 'np.array', (['predicted_rewards'], {}), '(predicted_rewards)\n', (6097, 6116), True, 'import numpy as np\n'), ((8898, 8914), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (8902, 8914), False, 'from pathlib import Path\n'), ((9024, 9090), 'numpy.savez', 'np.savez', (['npz_file', "{'latent': latent, 'observation': observation}"], {}), "(npz_file, {'latent': latent, 'observation': observation})\n", (9032, 9090), True, 'import numpy as np\n'), ((1154, 1172), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (1162, 1172), True, 'import numpy as np\n'), ((4252, 4286), 'torch.ones_like', 'torch.ones_like', (['tensor_or_list[0]'], {}), '(tensor_or_list[0])\n', (4267, 4286), False, 'import torch\n'), ((6400, 6418), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (6408, 6418), True, 'import numpy as np\n'), ((8560, 8582), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (8570, 8582), False, 'import torch\n'), ((8366, 8410), 'plac.Annotation', 'plac.Annotation', ([], {'kind': '"""positional"""', 'type': 'str'}), "(kind='positional', type=str)\n", (8381, 8410), False, 'import plac\n'), ((9177, 9192), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9190, 9192), False, 'import torch\n'), ((9202, 9217), 'plac.call', 'plac.call', (['main'], {}), '(main)\n', (9211, 9217), False, 'import plac\n'), ((6143, 6183), 'numpy.abs', 'np.abs', (['(real_rewards - predicted_rewards)'], {}), '(real_rewards - predicted_rewards)\n', (6149, 6183), True, 'import numpy as np\n')] |
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of padding ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_compression.python.ops import padding_ops
class PaddingOpsTest(tf.test.TestCase):
def test_same_padding_corr(self):
for ishape in [[10], [11]]:
inputs = np.zeros(ishape, dtype=np.float32)
inputs[len(inputs) // 2] = 1
for kshape in [[4], [5]]:
kernel = np.zeros(kshape, dtype=np.float32)
kernel[len(kernel) // 2] = 1
outputs = tf.nn.convolution(
tf.reshape(inputs, (1, 1, -1, 1)),
tf.reshape(kernel, (1, -1, 1, 1)),
padding="VALID", data_format="NHWC")
with self.test_session() as sess:
outputs = np.squeeze(sess.run(outputs))
pos_inp = np.squeeze(np.nonzero(inputs))
pos_out = np.squeeze(np.nonzero(outputs))
padding = padding_ops.same_padding_for_kernel(kshape, True)
self.assertEqual(padding[0][0], pos_inp - pos_out)
def test_same_padding_conv(self):
for ishape in [[10], [11]]:
inputs = np.zeros(ishape, dtype=np.float32)
inputs[len(inputs) // 2] = 1
for kshape in [[4], [5]]:
kernel = np.zeros(kshape, dtype=np.float32)
kernel[len(kernel) // 2] = 1
outputs = tf.nn.conv2d_transpose(
tf.reshape(inputs, (1, 1, -1, 1)),
tf.reshape(kernel, (1, -1, 1, 1)),
(1, 1, ishape[0] + kshape[0] - 1, 1),
strides=(1, 1, 1, 1), padding="VALID", data_format="NHWC")
outputs = outputs[:, :, (kshape[0] - 1):-(kshape[0] - 1), :]
with self.test_session() as sess:
outputs = np.squeeze(sess.run(outputs))
pos_inp = np.squeeze(np.nonzero(inputs))
pos_out = np.squeeze(np.nonzero(outputs))
padding = padding_ops.same_padding_for_kernel(kshape, False)
self.assertEqual(padding[0][0], pos_inp - pos_out)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.compat.v1.reshape",
"tensorflow_compression.python.ops.padding_ops.same_padding_for_kernel",
"numpy.zeros",
"numpy.nonzero",
"tensorflow.compat.v1.test.main"
] | [((2694, 2708), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (2706, 2708), True, 'import tensorflow.compat.v1 as tf\n'), ((1054, 1088), 'numpy.zeros', 'np.zeros', (['ishape'], {'dtype': 'np.float32'}), '(ishape, dtype=np.float32)\n', (1062, 1088), True, 'import numpy as np\n'), ((1827, 1861), 'numpy.zeros', 'np.zeros', (['ishape'], {'dtype': 'np.float32'}), '(ishape, dtype=np.float32)\n', (1835, 1861), True, 'import numpy as np\n'), ((1173, 1207), 'numpy.zeros', 'np.zeros', (['kshape'], {'dtype': 'np.float32'}), '(kshape, dtype=np.float32)\n', (1181, 1207), True, 'import numpy as np\n'), ((1634, 1683), 'tensorflow_compression.python.ops.padding_ops.same_padding_for_kernel', 'padding_ops.same_padding_for_kernel', (['kshape', '(True)'], {}), '(kshape, True)\n', (1669, 1683), False, 'from tensorflow_compression.python.ops import padding_ops\n'), ((1946, 1980), 'numpy.zeros', 'np.zeros', (['kshape'], {'dtype': 'np.float32'}), '(kshape, dtype=np.float32)\n', (1954, 1980), True, 'import numpy as np\n'), ((2553, 2603), 'tensorflow_compression.python.ops.padding_ops.same_padding_for_kernel', 'padding_ops.same_padding_for_kernel', (['kshape', '(False)'], {}), '(kshape, False)\n', (2588, 2603), False, 'from tensorflow_compression.python.ops import padding_ops\n'), ((1294, 1327), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['inputs', '(1, 1, -1, 1)'], {}), '(inputs, (1, 1, -1, 1))\n', (1304, 1327), True, 'import tensorflow.compat.v1 as tf\n'), ((1341, 1374), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['kernel', '(1, -1, 1, 1)'], {}), '(kernel, (1, -1, 1, 1))\n', (1351, 1374), True, 'import tensorflow.compat.v1 as tf\n'), ((1546, 1564), 'numpy.nonzero', 'np.nonzero', (['inputs'], {}), '(inputs)\n', (1556, 1564), True, 'import numpy as np\n'), ((1595, 1614), 'numpy.nonzero', 'np.nonzero', (['outputs'], {}), '(outputs)\n', (1605, 1614), True, 'import numpy as np\n'), ((2072, 2105), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['inputs', '(1, 1, -1, 1)'], {}), '(inputs, (1, 1, -1, 1))\n', (2082, 2105), True, 'import tensorflow.compat.v1 as tf\n'), ((2119, 2152), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['kernel', '(1, -1, 1, 1)'], {}), '(kernel, (1, -1, 1, 1))\n', (2129, 2152), True, 'import tensorflow.compat.v1 as tf\n'), ((2465, 2483), 'numpy.nonzero', 'np.nonzero', (['inputs'], {}), '(inputs)\n', (2475, 2483), True, 'import numpy as np\n'), ((2514, 2533), 'numpy.nonzero', 'np.nonzero', (['outputs'], {}), '(outputs)\n', (2524, 2533), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Utility functions for welly.
:copyright: 2016 Agile Geoscience
:license: Apache 2.0
"""
from __future__ import division
import re
import glob
import numpy as np
import matplotlib.pyplot as plt
def deprecated(instructions):
"""
Flags a method as deprecated. This decorator can be used to mark functions
as deprecated. It will result in a warning being emitted when the function
is used.
Args:
instructions (str): A human-friendly string of instructions, such
as: 'Please migrate to add_proxy() ASAP.'
Returns:
The decorated function.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
message = 'Call to deprecated function {}. {}'.format(
func.__name__,
instructions)
frame = inspect.currentframe().f_back
warnings.warn_explicit(message,
category=DeprecationWarning,
filename=inspect.getfile(frame.f_code),
lineno=frame.f_lineno)
return func(*args, **kwargs)
return wrapper
return decorator
def round_to_n(x, n):
"""
Round to sig figs
"""
return round(x, -int(np.floor(np.log10(x))) + (n - 1))
def null(x):
"""
Null function. Used for default in functions that can apply a user-
supplied function to data before returning.
"""
return x
def null_default(x):
"""
Null function. Used for default in functions that can apply a user-
supplied function to data before returning.
"""
def null(y):
return x
return null
def skip(x):
"""
Always returns None.
"""
return
def are_close(x, y):
return abs(x - y) < 0.00001
def sharey(axes):
"""
Shared axes limits without shared locators, ticks, etc.
By <NAME>
"""
linker = Linker(axes)
for ax in axes:
ax._linker = linker
def unsharey(ax):
"""
Remove sharing from an axes.
By <NAME>
"""
ax._linker.unlink(ax)
ax._linker = None
class Linker(object):
"""
Keeps y-limits of a sequence of axes in sync when panning/zooming.
By <NAME>
"""
def __init__(self, axes):
self.axes = axes
self._cids = {}
for ax in self.axes:
self.link(ax)
def unlink(self, ax):
ax.callbacks.disconnect(self._cids.pop(ax))
def link(self, ax):
self._cids[ax] = ax.callbacks.connect('ylim_changed', self.rescale)
def rescale(self, axes):
limits = axes.yaxis._scale.get_transform().transform(axes.get_ylim())
for ax in self.axes:
lim = ax.yaxis._scale.get_transform().inverted().transform(limits)
ax.set_ylim(lim, emit=False, auto=None)
# Note - This is specifically for this application!
fix_ticks(ax)
def fix_ticks(ax):
"""
Center ticklabels and hide any outside axes limits.
By <NAME>
"""
plt.setp(ax.get_yticklabels(), ha='center', x=0.5,
transform=ax._yaxis_transform)
# We'll still wind up with some tick labels beyond axes limits for reasons
# I don't fully understand...
limits = ax.get_ylim()
for label, loc in zip(ax.yaxis.get_ticklabels(), ax.yaxis.get_ticklocs()):
if loc < min(limits) or loc > max(limits):
label.set(visible=False)
else:
label.set(visible=True)
def flatten_list(l):
"""
Unpacks lists in a list:
[1, 2, [3, 4], [5, [6, 7]]]
becomes
[1, 2, 3, 4, 5, 6, 7]
http://stackoverflow.com/a/12472564/3381305
"""
if (l == []) or (l is None):
return l
if isinstance(l[0], list):
return flatten_list(l[0]) + flatten_list(l[1:])
return l[:1] + flatten_list(l[1:])
def list_and_add(a, b):
"""
Concatenate anything into a list.
Args:
a: the first thing
b: the second thing
Returns:
list. All the things in a list.
"""
if not isinstance(b, list):
b = [b]
if not isinstance(a, list):
a = [a]
return a + b
def lasio_get(l,
section,
item,
attrib='value',
default=None,
remap=None,
funcs=None):
"""
Grabs, renames and transforms stuff from a lasio object.
Args:
l (lasio): a lasio instance.
section (str): The LAS section to grab from, eg ``well``
item (str): The item in the LAS section to grab from, eg ``name``
attrib (str): The attribute of the item to grab, eg ``value``
default (str): What to return instead.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
The transformed item.
"""
remap = remap or {}
item_to_fetch = remap.get(item, item)
if item_to_fetch is None:
return None
try:
obj = getattr(l, section)
result = getattr(obj, item_to_fetch)[attrib]
except:
return default
if funcs is not None:
f = funcs.get(item, null)
result = f(result)
return result
def parabolic(f, x):
"""
Interpolation. From ageobot, from somewhere else.
"""
xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x
yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x)
return (xv, yv)
def linear(u, v, d):
"""
Linear interpolation.
Args:
u (float)
v (float)
d (float): the relative distance between the two to return.
Returns:
float. The interpolated value.
"""
return u + d*(v-u)
def find_nearest(a, value, index=False):
"""
Find the array value, or index of the array value, closest to some given
value.
Args:
a (ndarray)
value (float)
index (bool): whether to return the index instead of the array value.
Returns:
float. The array value (or index, as int) nearest the specified value.
"""
i = np.abs(a - value).argmin()
if index:
return i
else:
return a[i]
def find_previous(a, value, index=False, return_distance=False):
"""
Find the nearest array value, or index of the array value, before some
given value. Optionally also return the fractional distance of the given
value from that previous value.
Args:
a (ndarray)
value (float)
index (bool): whether to return the index instead of the array value.
Default: False.
return_distance(bool): whether to return the fractional distance from
the nearest value to the specified value. Default: False.
Returns:
float. The array value (or index, as int) before the specified value.
If ``return_distance==True`` then a tuple is returned, where the
second value is the distance.
"""
b = a - value
i = np.where(b > 0)[0][0]
d = (value - a[i-1]) / (a[i] - a[i-1])
if index:
if return_distance:
return i - 1, d
else:
return i - 1
else:
if return_distance:
return a[i - 1], d
else:
return a[i - 1]
def find_edges(a):
"""
Return two arrays: one of the changes, and one of the values.
Returns:
tuple: Two ndarrays, tops and values.
"""
edges = a[1:] == a[:-1]
tops = np.where(~edges)[0] + 1
tops = np.append(0, tops)
values = a[tops]
return tops, values
def rms(a):
"""
From ``bruges``
Calculates the RMS of an array.
:param a: An array.
:returns: The RMS of the array.
"""
return np.sqrt(np.sum(a**2.0)/a.size)
def normalize(a, new_min=0.0, new_max=1.0):
"""
From ``bruges``
Normalize an array to [0,1] or to arbitrary new min and max.
Args:
a (ndarray)
new_min (float): the new min, default 0.
new_max (float): the new max, default 1.
Returns:
ndarray. The normalized array.
"""
n = (a - np.amin(a)) / np.amax(a - np.amin(a))
return n * (new_max - new_min) + new_min
def moving_average(a, length, mode='valid'):
"""
From ``bruges``
Computes the mean in a moving window. Naive implementation.
Example:
>>> test = np.array([1,9,9,9,9,9,9,2,3,9,2,2,3,1,1,1,1,3,4,9,9,9,8,3])
>>> moving_average(test, 7, mode='same')
[ 4.42857143, 5.57142857, 6.71428571, 7.85714286, 8. ,
7.14285714, 7.14285714, 6.14285714, 5.14285714, 4.28571429,
3.14285714, 3. , 2.71428571, 1.57142857, 1.71428571,
2. , 2.85714286, 4. , 5.14285714, 6.14285714,
6.42857143, 6.42857143, 6.28571429, 5.42857143]
TODO:
Other types of average.
"""
pad = np.floor(length/2)
if mode == 'full':
pad *= 2
pad = int(pad)
# Make a padded version, paddding with first and last values
r = np.zeros(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1, ...] + out[1:, ...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out
def moving_avg_conv(a, length):
"""
From ``bruges``
Moving average via convolution. Seems slower than naive.
"""
boxcar = np.ones(length)/length
return np.convolve(a, boxcar, mode="same")
def nan_idx(y):
"""Helper to handle indices and logical indices of NaNs.
From https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
Args:
y (ndarray): 1D array with possible NaNs
Returns:
nans, logical indices of NaNs
index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def extrapolate(a):
"""
From ``bruges``
Extrapolate up and down an array from the first and last non-NaN samples.
E.g. Continue the first and last non-NaN values of a log up and down.
"""
nans = np.where(~np.isnan(a))[0]
first, last = nans[0], nans[-1]
a[:first] = a[first]
a[last + 1:] = a[last]
return a
def top_and_tail(a):
"""
Remove the NaNs from the top and tail (only) of a well log.
Args:
a (ndarray): An array.
Returns:
ndarray: The top and tailed array.
"""
if np.all(np.isnan(a)):
return np.array([])
nans = np.where(~np.isnan(a))[0]
last = None if nans[-1]+1 == a.size else nans[-1]+1
return a[nans[0]:last]
def dms2dd(dms):
"""
DMS to decimal degrees.
Args:
dms (list). d must be negative for S and W.
Return:
float.
"""
d, m, s = dms
return d + m/60. + s/3600.
def dd2dms(dd):
"""
Decimal degrees to DMS.
Args:
dd (float). Decimal degrees.
Return:
tuple. Degrees, minutes, and seconds.
"""
m, s = divmod(dd * 3600, 60)
d, m = divmod(m, 60)
return int(d), int(m), s
def ricker(f, length, dt):
"""
A Ricker wavelet.
Args:
f (float): frequency in Haz, e.g. 25 Hz.
length (float): Length in s, e.g. 0.128.
dt (float): sample interval in s, e.g. 0.001.
Returns:
tuple. time basis, amplitude values.
"""
t = np.linspace(-int(length/2), int((length-dt)/2), int(length/dt))
y = (1. - 2.*(np.pi**2)*(f**2)*(t**2))*np.exp(-(np.pi**2)*(f**2)*(t**2))
return t, y
def hex_to_rgb(hexx):
"""
Utility function to convert hex to (r,g,b) triples.
http://ageo.co/1CFxXpO
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
tuple: The equivalent RGB triple, in the range 0 to 255.
"""
h = hexx.strip('#')
l = len(h)
return tuple(int(h[i:i+l//3], 16) for i in range(0, l, l//3))
def hex_is_dark(hexx, percent=50):
"""
Function to decide if a hex colour is dark.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
bool: The colour's brightness is less than the given percent.
"""
r, g, b = hex_to_rgb(hexx)
luma = (0.2126 * r + 0.7152 * g + 0.0722 * b) / 2.55 # per ITU-R BT.709
return (luma < percent)
def text_colour_for_hex(hexx, percent=50, dark='#000000', light='#ffffff'):
"""
Function to decide what colour to use for a given hex colour.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
bool: The colour's brightness is less than the given percent.
"""
return light if hex_is_dark(hexx, percent=percent) else dark
def get_lines(handle, line):
"""
Get zero-indexed line from an open file-like.
"""
for i, l in enumerate(handle):
if i == line:
return l
def find_file(pattern, path):
"""
A bit like grep. Finds a pattern, looking in path. Returns the filename.
"""
for fname in glob.iglob(path):
with open(fname) as f:
if re.search(pattern, f.read()):
return fname
return
| [
"numpy.abs",
"numpy.convolve",
"numpy.log10",
"numpy.ones",
"glob.iglob",
"numpy.amin",
"numpy.where",
"numpy.floor",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.exp",
"numpy.isnan",
"numpy.sum",
"numpy.cumsum"
] | [((7674, 7692), 'numpy.append', 'np.append', (['(0)', 'tops'], {}), '(0, tops)\n', (7683, 7692), True, 'import numpy as np\n'), ((9050, 9070), 'numpy.floor', 'np.floor', (['(length / 2)'], {}), '(length / 2)\n', (9058, 9070), True, 'import numpy as np\n'), ((9203, 9233), 'numpy.zeros', 'np.zeros', (['(a.shape[0] + 2 * pad)'], {}), '(a.shape[0] + 2 * pad)\n', (9211, 9233), True, 'import numpy as np\n'), ((9334, 9359), 'numpy.cumsum', 'np.cumsum', (['r'], {'dtype': 'float'}), '(r, dtype=float)\n', (9343, 9359), True, 'import numpy as np\n'), ((9954, 9989), 'numpy.convolve', 'np.convolve', (['a', 'boxcar'], {'mode': '"""same"""'}), "(a, boxcar, mode='same')\n", (9965, 9989), True, 'import numpy as np\n'), ((13738, 13754), 'glob.iglob', 'glob.iglob', (['path'], {}), '(path)\n', (13748, 13754), False, 'import glob\n'), ((9920, 9935), 'numpy.ones', 'np.ones', (['length'], {}), '(length)\n', (9927, 9935), True, 'import numpy as np\n'), ((10588, 10599), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (10596, 10599), True, 'import numpy as np\n'), ((11191, 11202), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11199, 11202), True, 'import numpy as np\n'), ((11220, 11232), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11228, 11232), True, 'import numpy as np\n'), ((12216, 12253), 'numpy.exp', 'np.exp', (['(-np.pi ** 2 * f ** 2 * t ** 2)'], {}), '(-np.pi ** 2 * f ** 2 * t ** 2)\n', (12222, 12253), True, 'import numpy as np\n'), ((6249, 6266), 'numpy.abs', 'np.abs', (['(a - value)'], {}), '(a - value)\n', (6255, 6266), True, 'import numpy as np\n'), ((7152, 7167), 'numpy.where', 'np.where', (['(b > 0)'], {}), '(b > 0)\n', (7160, 7167), True, 'import numpy as np\n'), ((7639, 7655), 'numpy.where', 'np.where', (['(~edges)'], {}), '(~edges)\n', (7647, 7655), True, 'import numpy as np\n'), ((7908, 7924), 'numpy.sum', 'np.sum', (['(a ** 2.0)'], {}), '(a ** 2.0)\n', (7914, 7924), True, 'import numpy as np\n'), ((8275, 8285), 'numpy.amin', 'np.amin', (['a'], {}), '(a)\n', (8282, 8285), True, 'import numpy as np\n'), ((8301, 8311), 'numpy.amin', 'np.amin', (['a'], {}), '(a)\n', (8308, 8311), True, 'import numpy as np\n'), ((10859, 10870), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (10867, 10870), True, 'import numpy as np\n'), ((11254, 11265), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11262, 11265), True, 'import numpy as np\n'), ((1326, 1337), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (1334, 1337), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
LOG_FILE = './log.txt'
def get_log(log):
f = open(log, 'r')
lines = f.readlines()
f.close()
loss = []
for line in lines:
loss.append(float(line.strip('\n').split(' ')[1]))
return loss
def plot_iteration(log):
loss = get_log(log)
plt.plot(range(len(loss)), loss)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training Curve')
plt.show()
def plot_epoch(log, num_samples, batch_size):
"""Avg for each epoch
num_per_epoch: number of samples in the training dataset
batch_size: training batch size
"""
loss = get_log(log)
epochs = len(loss) * batch_size // num_samples
iters_per_epochs = num_samples // batch_size
x = range(0, epochs+1)
y = [loss[0]]
for i in range(epochs):
y.append(np.mean(np.array(loss[i*iters_per_epochs+1: (i+1)*iters_per_epochs+1])))
plt.plot(x, y)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Curve')
plt.show()
if __name__ == '__main__':
plot_epoch(LOG_FILE, 10582, 10) | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((329, 352), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (339, 352), True, 'import matplotlib.pyplot as plt\n'), ((354, 372), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (364, 372), True, 'import matplotlib.pyplot as plt\n'), ((374, 401), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Curve"""'], {}), "('Training Curve')\n", (383, 401), True, 'import matplotlib.pyplot as plt\n'), ((403, 413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (411, 413), True, 'import matplotlib.pyplot as plt\n'), ((844, 858), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (852, 858), True, 'import matplotlib.pyplot as plt\n'), ((860, 879), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (870, 879), True, 'import matplotlib.pyplot as plt\n'), ((881, 899), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (891, 899), True, 'import matplotlib.pyplot as plt\n'), ((901, 928), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Curve"""'], {}), "('Training Curve')\n", (910, 928), True, 'import matplotlib.pyplot as plt\n'), ((930, 940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (938, 940), True, 'import matplotlib.pyplot as plt\n'), ((778, 849), 'numpy.array', 'np.array', (['loss[i * iters_per_epochs + 1:(i + 1) * iters_per_epochs + 1]'], {}), '(loss[i * iters_per_epochs + 1:(i + 1) * iters_per_epochs + 1])\n', (786, 849), True, 'import numpy as np\n')] |
# %% import library
import pathlib
import sys
from glob import glob
import numpy as np
import pandas as pd
# import residual_node2vec as rv
import utils_link_pred
from scipy import sparse
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
# Helper Functions
def get_params(filename):
params = pathlib.Path(filename).stem.split("_")
retval = {"filename": filename}
for p in params:
if "=" not in p:
continue
kv = p.split("=")
retval[kv[0]] = kv[1]
return retval
# Loading
if "snakemake" in sys.modules:
net_files = snakemake.input["net_files"]
emb_files = snakemake.input["emb_files"]
edge_files = snakemake.input["edge_files"]
output_file = snakemake.output["output_file"]
else:
net_files = [f for f in glob("../data/link-prediction/networks/net=*")]
emb_files = [f for f in glob("../data/link-prediction/embeddings/*")]
edge_files = [
f for f in glob("../data/link-prediction/networks/test_edgelist_*.csv")
]
output_file = "../data/link-prediction/results/auc_score.csv"
# %%
# Loading
#
emb_file_table = pd.DataFrame([get_params(r) for r in emb_files])
net_file_table = pd.DataFrame([get_params(r) for r in net_files])
edge_file_table = pd.DataFrame([get_params(r) for r in edge_files])
# %%
# Merging
#
emb_file_table = emb_file_table.rename(columns={"filename": "emb_file"})
edge_file_table = edge_file_table.rename(columns={"filename": "edge_file"})
net_file_table = net_file_table.rename(columns={"filename": "net_file"})
cols = list(set(emb_file_table.columns).intersection(set(edge_file_table.columns)))
file_table = pd.merge(emb_file_table, edge_file_table, on=cols)
cols = list(set(file_table.columns).intersection(set(net_file_table.columns)))
file_table = pd.merge(file_table, net_file_table, on=cols)
# %%
# Calculate the AUC
#
def calc_modeled_prob(emb, net, src, trg, model_name, membership, offset):
dotsim = np.sum(emb[src, :] * emb[trg, :], axis=1)
if model_name in [
"deepwalk",
"residual2vec-unbiased",
"residual2vec-dotsim",
"residual2vec-truncated-dotsim",
"glove-dotsim",
"jresidual2vec-unbiased",
"node2vec-unbiased",
"node2vec-qhalf",
"node2vec-qdouble",
"leigenmap",
"netmf",
"node2vec",
"fairwalk",
"levy-word2vec",
"gcn",
"gat",
"graphsage",
"gcn-doubleK",
"graphsage-doubleK",
"gat-doubleK",
]:
return dotsim
elif model_name == "glee":
return -dotsim
elif model_name == "glove":
a, b = utils_link_pred.fit_glove_bias(net, emb)
return dotsim + a[src] + b[trg]
elif model_name in [
"residual2vec",
"jresidual2vec",
"residual2vec-sim",
"residual2vec-truncated",
"lndeg",
]:
# Modeled probability using degree
outdeg = np.array(net.sum(axis=1)).reshape(-1)
indeg = np.array(net.sum(axis=0)).reshape(-1)
return (
dotsim
+ np.log(np.maximum(indeg[src], 1))
+ np.log(np.maximum(outdeg[trg], 1))
)
elif model_name in ["residual2vec-adap"]:
# Modeled probability using degree
outdeg = np.array(net.sum(axis=1)).reshape(-1)
indeg = np.array(net.sum(axis=0)).reshape(-1)
return dotsim + offset[src] + offset[trg]
dg = file_table[file_table["model"].isin(["residual2vec", "glove"])]
dg["model"] += "-dotsim"
file_table = pd.concat([file_table, dg], ignore_index=True)
#
# Evaluation
#
def eval_link_pred(edge_file, df):
results = []
edge_table = pd.read_csv(edge_file).rename(columns={"0": "src", "1": "trg"})
net = sparse.load_npz(df["net_file"].values[0])
net = net + net.T
for _i, row in df.iterrows():
data = np.load(row["emb_file"])
emb = data["emb"]
membership = np.zeros(emb.shape[0])
node_offset = np.zeros(emb.shape[0])
if "membership" in data.keys():
membership = data["membership"]
if "offset" in data.keys():
offset = data["offset"]
node_offset = offset
src, trg, y = edge_table["src"], edge_table["trg"], edge_table["edge_type"]
n = emb.shape[0]
s = (src < n) & (trg < n)
src, trg, y = src[s], trg[s], y[s]
likelihood = calc_modeled_prob(
emb, net, src, trg, row["model"], membership, node_offset
)
# node_offset = (
# np.log(np.maximum(1, np.array(net[:, :n][:n, :].sum(axis=0)))).reshape(-1)
# * node_offset
# )
if any(np.isnan(likelihood)):
score = 0.5
else:
score = roc_auc_score(y, likelihood)
row["score"] = score
results += [row]
return results
list_results = [
eval_link_pred(edge_file, df)
for edge_file, df in tqdm(file_table.groupby("edge_file"))
]
#
# Merge
#
results = []
for res in list_results:
results += res
result_table = pd.DataFrame(results)
#
# Save
#
# %%
result_table.to_csv(output_file, index=False)
| [
"pandas.read_csv",
"pathlib.Path",
"scipy.sparse.load_npz",
"pandas.merge",
"sklearn.metrics.roc_auc_score",
"utils_link_pred.fit_glove_bias",
"numpy.sum",
"numpy.zeros",
"numpy.isnan",
"pandas.DataFrame",
"numpy.maximum",
"numpy.load",
"pandas.concat",
"glob.glob"
] | [((1643, 1693), 'pandas.merge', 'pd.merge', (['emb_file_table', 'edge_file_table'], {'on': 'cols'}), '(emb_file_table, edge_file_table, on=cols)\n', (1651, 1693), True, 'import pandas as pd\n'), ((1787, 1832), 'pandas.merge', 'pd.merge', (['file_table', 'net_file_table'], {'on': 'cols'}), '(file_table, net_file_table, on=cols)\n', (1795, 1832), True, 'import pandas as pd\n'), ((3534, 3580), 'pandas.concat', 'pd.concat', (['[file_table, dg]'], {'ignore_index': '(True)'}), '([file_table, dg], ignore_index=True)\n', (3543, 3580), True, 'import pandas as pd\n'), ((5078, 5099), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (5090, 5099), True, 'import pandas as pd\n'), ((1951, 1992), 'numpy.sum', 'np.sum', (['(emb[src, :] * emb[trg, :])'], {'axis': '(1)'}), '(emb[src, :] * emb[trg, :], axis=1)\n', (1957, 1992), True, 'import numpy as np\n'), ((3742, 3783), 'scipy.sparse.load_npz', 'sparse.load_npz', (["df['net_file'].values[0]"], {}), "(df['net_file'].values[0])\n", (3757, 3783), False, 'from scipy import sparse\n'), ((3855, 3879), 'numpy.load', 'np.load', (["row['emb_file']"], {}), "(row['emb_file'])\n", (3862, 3879), True, 'import numpy as np\n'), ((3927, 3949), 'numpy.zeros', 'np.zeros', (['emb.shape[0]'], {}), '(emb.shape[0])\n', (3935, 3949), True, 'import numpy as np\n'), ((3972, 3994), 'numpy.zeros', 'np.zeros', (['emb.shape[0]'], {}), '(emb.shape[0])\n', (3980, 3994), True, 'import numpy as np\n'), ((794, 840), 'glob.glob', 'glob', (['"""../data/link-prediction/networks/net=*"""'], {}), "('../data/link-prediction/networks/net=*')\n", (798, 840), False, 'from glob import glob\n'), ((870, 914), 'glob.glob', 'glob', (['"""../data/link-prediction/embeddings/*"""'], {}), "('../data/link-prediction/embeddings/*')\n", (874, 914), False, 'from glob import glob\n'), ((954, 1014), 'glob.glob', 'glob', (['"""../data/link-prediction/networks/test_edgelist_*.csv"""'], {}), "('../data/link-prediction/networks/test_edgelist_*.csv')\n", (958, 1014), False, 'from glob import glob\n'), ((3668, 3690), 'pandas.read_csv', 'pd.read_csv', (['edge_file'], {}), '(edge_file)\n', (3679, 3690), True, 'import pandas as pd\n'), ((4691, 4711), 'numpy.isnan', 'np.isnan', (['likelihood'], {}), '(likelihood)\n', (4699, 4711), True, 'import numpy as np\n'), ((4772, 4800), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'likelihood'], {}), '(y, likelihood)\n', (4785, 4800), False, 'from sklearn.metrics import roc_auc_score\n'), ((314, 336), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (326, 336), False, 'import pathlib\n'), ((2641, 2681), 'utils_link_pred.fit_glove_bias', 'utils_link_pred.fit_glove_bias', (['net', 'emb'], {}), '(net, emb)\n', (2671, 2681), False, 'import utils_link_pred\n'), ((3139, 3165), 'numpy.maximum', 'np.maximum', (['outdeg[trg]', '(1)'], {}), '(outdeg[trg], 1)\n', (3149, 3165), True, 'import numpy as np\n'), ((3091, 3116), 'numpy.maximum', 'np.maximum', (['indeg[src]', '(1)'], {}), '(indeg[src], 1)\n', (3101, 3116), True, 'import numpy as np\n')] |
"""
Base class for Filters, Factors and Classifiers
"""
from abc import ABCMeta, abstractproperty
from bisect import insort
from collections import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
dtype as dtype_class,
ndarray,
searchsorted,
)
from six import with_metaclass
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonExistentAssetInTimeFrame,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
NonPipelineInputs,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .domain import Domain, GENERIC, infer_domain
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(with_metaclass(ABCMeta, object)):
"""
Base class for terms in a Pipeline API compute graph.
"""
# These are NotSpecified because a subclass is required to provide them.
dtype = NotSpecified
missing_value = NotSpecified
# Subclasses aren't required to provide `params`. The default behavior is
# no params.
params = ()
# All terms are generic by default.
domain = GENERIC
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls,
domain=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
window_safe=NotSpecified,
ndim=NotSpecified,
# params is explicitly not allowed to be passed to an instance.
*args,
**kwargs):
"""
Memoized constructor for Terms.
Caching previously-constructed Terms is useful because it allows us to
only compute equivalent sub-expressions once when traversing a Pipeline
dependency graph.
Caching previously-constructed Terms is **sane** because terms and
their inputs are both conceptually immutable.
"""
# Subclasses can override these class-level attributes to provide
# different default values for instances.
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = \
super(Term, cls).__new__(cls)._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
"""
Noop constructor to play nicely with our caching __new__. Subclasses
should implement _init instead of this method.
When a class' __new__ returns an instance of that class, Python will
automatically call __init__ on the object, even if a new object wasn't
actually constructed. Because we memoize instances, we often return an
object that was already initialized from __new__, in which case we
don't want to call __init__ again.
Subclasses that need to initialize new instances should override _init,
which is guaranteed to be called only once.
"""
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
return Slice(self, key)
@classmethod
def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
"""
Parameters
----------
domain : zipline.pipeline.domain.Domain
The domain of this term.
dtype : np.dtype
Dtype of this term's output.
missing_value : object
Missing value for this term.
ndim : 1 or 2
The dimensionality of this term.
params : tuple[(str, hashable)]
Tuple of key/value pairs of additional parameters.
"""
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
"""
Assert that this term is well-formed. This should be called exactly
once, at the end of Term._init().
"""
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Calculate the number of extra rows needed to compute ``self``.
Must return at least ``min_extra_rows``, and the default implementation
is to just return ``min_extra_rows``. This is overridden by
downsampled terms to ensure that the first date computed is a
recomputation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. Must be at least
``min_extra_rows``.
"""
return min_extra_rows
@abstractproperty
def inputs(self):
"""
A tuple of other Terms needed as direct inputs for this Term.
"""
raise NotImplementedError('inputs')
@abstractproperty
def windowed(self):
"""
Boolean indicating whether this term is a trailing-window computation.
"""
raise NotImplementedError('windowed')
@abstractproperty
def mask(self):
"""
A Filter representing asset/date pairs to include while
computing this Term. (True means include; False means exclude.)
"""
raise NotImplementedError('mask')
@abstractproperty
def dependencies(self):
"""
A dictionary mapping terms that must be computed before `self` to the
number of extra rows needed for those terms.
"""
raise NotImplementedError('dependencies')
def graph_repr(self):
"""A short repr to use when rendering GraphViz graphs.
"""
# Default graph_repr is just the name of the type.
return type(self).__name__
def recursive_repr(self):
"""A short repr to use when recursively rendering terms with inputs.
"""
# Default recursive_repr is just the name of the type.
return type(self).__name__
class AssetExists(Term):
"""
Pseudo-filter describing whether or not an asset existed on a given day.
This is the default mask for all terms that haven't been passed a mask
explicitly.
This is morally a Filter, in the sense that it produces a boolean value for
every asset on every date. We don't subclass Filter, however, because
`AssetExists` is computed directly by the PipelineEngine.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
See Also
--------
zipline.assets.AssetFinder.lifetimes
"""
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
"""
1-Dimensional term providing date labels for other term inputs.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
"""
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
"""
A Term that should be loaded from an external resource by a PipelineLoader.
This is the base class for :class:`zipline.pipeline.data.BoundColumn`.
"""
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
"""
A Term that should be computed from a tuple of inputs.
This is the base class for :class:`zipline.pipeline.Factor`,
:class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.
"""
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
domain = NotSpecified
def __new__(cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
domain=domain,
*args, **kwargs):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
# Make sure all our inputs are valid pipeline objects before trying
# to infer a domain.
non_terms = [t for t in inputs if not isinstance(t, Term)]
if non_terms:
raise NonPipelineInputs(cls.__name__, non_terms)
if domain is NotSpecified:
domain = infer_domain(inputs)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
domain=domain,
*args, **kwargs
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
inputs,
outputs,
window_length,
mask,
*args,
**kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
# Check inputs.
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if not isinstance(self.domain, Domain):
raise TypeError(
"Expected {}.domain to be an instance of Domain, "
"but got {}.".format(type(self).__name__, type(self.domain))
)
# Check outputs.
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm)
if not attr.startswith('_')
]
# The name 'compute' is an added special case that is disallowed.
# Use insort to add it to the list in alphabetical order.
insort(disallowed_names, 'compute')
for output in self.outputs:
if output.startswith('_') or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
# This isn't user error, this is a bug in our code.
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length > 1:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
"""
Subclasses should implement this to perform actual computation.
This is named ``_compute`` rather than just ``compute`` because
``compute`` is reserved for user-supplied functions in
CustomFilter/CustomFactor/CustomClassifier.
"""
raise NotImplementedError()
@lazyval
def windowed(self):
"""
Whether or not this term represents a trailing window computation.
If term.windowed is truthy, its compute_from_windows method will be
called with instances of AdjustedArray as inputs.
If term.windowed is falsey, its compute_from_baseline will be called
with instances of np.ndarray as inputs.
"""
return (
self.window_length is not NotSpecified
and self.window_length > 0
)
@lazyval
def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
"""
Called with an result of ``self``, unravelled (i.e. 1-dimensional)
after any user-defined screens have been applied.
This is mostly useful for transforming the dtype of an output, e.g., to
convert a LabelArray into a pandas Categorical.
The default implementation is to just return data unchanged.
"""
return data
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values
def _downsampled_type(self, *args, **kwargs):
"""
The expression type to return from self.downsample().
"""
raise NotImplementedError(
"downsampling is not yet implemented "
"for instances of %s." % type(self).__name__
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
"""
Make a term that computes from ``self`` at lower-than-daily frequency.
Parameters
----------
{frequency}
"""
return self._downsampled_type(term=self, frequency=frequency)
def _aliased_type(self, *args, **kwargs):
"""
The expression type to return from self.alias().
"""
raise NotImplementedError(
"alias is not yet implemented "
"for instances of %s." % type(self).__name__
)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
"""
Make a term from ``self`` that names the expression.
Parameters
----------
{name}
Returns
-------
aliased : Aliased
``self`` with a name.
Notes
-----
This is useful for giving a name to a numerical or boolean expression.
"""
return self._aliased_type(term=self, name=name)
def __repr__(self):
return (
"{type}([{inputs}], {window_length})"
).format(
type=type(self).__name__,
inputs=', '.join(i.recursive_repr() for i in self.inputs),
window_length=self.window_length,
)
def recursive_repr(self):
return type(self).__name__ + '(...)'
class Slice(ComputableTerm):
"""
Term for extracting a single column of a another term's output.
Parameters
----------
term : zipline.pipeline.term.Term
The term from which to extract a column of data.
asset : zipline.assets.Asset
The asset corresponding to the column of `term` to be extracted.
Notes
-----
Users should rarely construct instances of `Slice` directly. Instead, they
should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.
"""
def __new__(cls, term, asset):
return super(Slice, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{parent_term}[{asset}])".format(
type=type(self).__name__,
parent_term=self.inputs[0].__name__,
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(Slice, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (super(Slice, cls)._static_identity(*args, **kwargs), asset)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset, start_date=dates[0], end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def asset(self):
"""Get the asset whose data is selected by this slice.
"""
return self._asset
@property
def _downsampled_type(self):
raise NotImplementedError(
'downsampling of slices is not yet supported'
)
def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
if (dtype == categorical_dtype):
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(missing_value)
# For any other type, we can check if the missing_value is safe by
# making an array of that value and trying to safely convert it to
# the desired type.
# 'same_kind' allows casting between things like float32 and
# float64, but not str and int.
array([missing_value]).astype(dtype=dtype, casting='same_kind')
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
)
| [
"zipline.errors.UnsupportedDType",
"weakref.WeakValueDictionary",
"zipline.errors.NonSliceableTerm",
"zipline.errors.NonWindowSafeInput",
"numpy.searchsorted",
"zipline.lib.adjusted_array.can_represent_dtype",
"numpy.array",
"zipline.utils.numpy_utils.default_missing_value_for_dtype",
"zipline.utils... | [((1286, 1317), 'six.with_metaclass', 'with_metaclass', (['ABCMeta', 'object'], {}), '(ABCMeta, object)\n', (1300, 1317), False, 'from six import with_metaclass\n'), ((1883, 1904), 'weakref.WeakValueDictionary', 'WeakValueDictionary', ([], {}), '()\n', (1902, 1904), False, 'from weakref import WeakValueDictionary\n'), ((6608, 6631), 'zipline.utils.input_validation.expect_types', 'expect_types', ([], {'key': 'Asset'}), '(key=Asset)\n', (6620, 6631), False, 'from zipline.utils.input_validation import expect_types\n'), ((20150, 20176), 'zipline.utils.input_validation.expect_types', 'expect_types', ([], {'data': 'ndarray'}), '(data=ndarray)\n', (20162, 20176), False, 'from zipline.utils.input_validation import expect_types\n'), ((21812, 21878), 'zipline.utils.sharedoc.templated_docstring', 'templated_docstring', ([], {'frequency': 'PIPELINE_DOWNSAMPLING_FREQUENCY_DOC'}), '(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)\n', (21831, 21878), False, 'from zipline.utils.sharedoc import templated_docstring, PIPELINE_ALIAS_NAME_DOC, PIPELINE_DOWNSAMPLING_FREQUENCY_DOC\n'), ((22428, 22477), 'zipline.utils.sharedoc.templated_docstring', 'templated_docstring', ([], {'name': 'PIPELINE_ALIAS_NAME_DOC'}), '(name=PIPELINE_ALIAS_NAME_DOC)\n', (22447, 22477), False, 'from zipline.utils.sharedoc import templated_docstring, PIPELINE_ALIAS_NAME_DOC, PIPELINE_DOWNSAMPLING_FREQUENCY_DOC\n'), ((24713, 24751), 'numpy.searchsorted', 'searchsorted', (['assets.values', 'asset.sid'], {}), '(assets.values, asset.sid)\n', (24725, 24751), False, 'from numpy import array, dtype as dtype_class, ndarray, searchsorted\n'), ((26208, 26244), 'zipline.errors.DTypeNotSpecified', 'DTypeNotSpecified', ([], {'termname': 'termname'}), '(termname=termname)\n', (26225, 26244), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n'), ((26271, 26289), 'numpy.dtype', 'dtype_class', (['dtype'], {}), '(dtype)\n', (26282, 26289), True, 'from numpy import array, dtype as dtype_class, ndarray, searchsorted\n'), ((26379, 26405), 'zipline.lib.adjusted_array.can_represent_dtype', 'can_represent_dtype', (['dtype'], {}), '(dtype)\n', (26398, 26405), False, 'from zipline.lib.adjusted_array import can_represent_dtype\n'), ((26421, 26469), 'zipline.errors.UnsupportedDType', 'UnsupportedDType', ([], {'dtype': 'dtype', 'termname': 'termname'}), '(dtype=dtype, termname=termname)\n', (26437, 26469), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n'), ((26533, 26571), 'zipline.utils.numpy_utils.default_missing_value_for_dtype', 'default_missing_value_for_dtype', (['dtype'], {}), '(dtype)\n', (26564, 26571), False, 'from zipline.utils.numpy_utils import bool_dtype, categorical_dtype, datetime64ns_dtype, default_missing_value_for_dtype\n'), ((6725, 6752), 'zipline.errors.NonSliceableTerm', 'NonSliceableTerm', ([], {'term': 'self'}), '(term=self)\n', (6741, 6752), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n'), ((24816, 24902), 'zipline.errors.NonExistentAssetInTimeFrame', 'NonExistentAssetInTimeFrame', ([], {'asset': 'asset', 'start_date': 'dates[0]', 'end_date': 'dates[-1]'}), '(asset=asset, start_date=dates[0], end_date=\n dates[-1])\n', (24843, 24902), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n'), ((26326, 26366), 'zipline.errors.NotDType', 'NotDType', ([], {'dtype': 'dtype', 'termname': 'termname'}), '(dtype=dtype, termname=termname)\n', (26334, 26366), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n'), ((15501, 15543), 'zipline.errors.NonPipelineInputs', 'NonPipelineInputs', (['cls.__name__', 'non_terms'], {}), '(cls.__name__, non_terms)\n', (15518, 15543), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n'), ((18043, 18078), 'bisect.insort', 'insort', (['disallowed_names', '"""compute"""'], {}), "(disallowed_names, 'compute')\n", (18049, 18078), False, 'from bisect import insort\n'), ((27195, 27217), 'numpy.array', 'array', (['[missing_value]'], {}), '([missing_value])\n', (27200, 27217), False, 'from numpy import array, dtype as dtype_class, ndarray, searchsorted\n'), ((18856, 18900), 'zipline.errors.NonWindowSafeInput', 'NonWindowSafeInput', ([], {'parent': 'self', 'child': 'child'}), '(parent=self, child=child)\n', (18874, 18900), False, 'from zipline.errors import DTypeNotSpecified, InvalidOutputName, NonExistentAssetInTimeFrame, NonSliceableTerm, NonWindowSafeInput, NotDType, NonPipelineInputs, TermInputsNotSpecified, TermOutputsEmpty, UnsupportedDType, WindowLengthNotSpecified\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
from tensorflow.python.util import tf_inspect
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
@test_util.run_v1_only("Sessions are not available in TF 2.x")
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session(config=self._no_rewrite_session_config())
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclasses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegex(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegex(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegex(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegex(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
self.assertAllClose([[3.0], [4.0]], self._s)
self.assertEqual(1, self._observer["on_run_start_count"])
self.assertEqual(self._s, self._observer["run_fetches"])
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertAllClose(
[[11.0], [-1.0]],
sess.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(2, self._observer["on_run_start_count"])
self.assertEqual(self._q, self._observer["run_fetches"])
self.assertEqual(2, self._observer["on_run_end_count"])
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, self.evaluate(foo))
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
def _is_public_method_name(method_name):
return (method_name.startswith("__") and method_name.endswith("__")
or not method_name.startswith("_"))
class SessionWrapperPublicMethodParityTest(test_util.TensorFlowTestCase):
def testWrapperHasAllPublicMethodsOfSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(session.Session, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
def testWrapperHasAllPublicMethodsOfMonitoredSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(monitored_session.MonitoredSession,
predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
if __name__ == "__main__":
googletest.main()
| [
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.debug.wrappers.framework.OnRunEndResponse",
"tensorflow.python.debug.wrappers.framework.BaseDebugWrapperSession.__init__",
"numpy.array",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.variables.Variable",
"t... | [((4999, 5060), 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""Sessions are not available in TF 2.x"""'], {}), "('Sessions are not available in TF 2.x')\n", (5020, 5060), False, 'from tensorflow.python.framework import test_util\n'), ((16725, 16742), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (16740, 16742), False, 'from tensorflow.python.platform import googletest\n'), ((2261, 2359), 'tensorflow.python.debug.wrappers.framework.BaseDebugWrapperSession.__init__', 'framework.BaseDebugWrapperSession.__init__', (['self', 'sess'], {'thread_name_filter': 'thread_name_filter'}), '(self, sess, thread_name_filter=\n thread_name_filter)\n', (2303, 2359), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((2563, 2633), 'tensorflow.python.debug.wrappers.framework.OnSessionInitResponse', 'framework.OnSessionInitResponse', (['framework.OnSessionInitAction.PROCEED'], {}), '(framework.OnSessionInitAction.PROCEED)\n', (2594, 2633), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((2889, 2991), 'tensorflow.python.debug.wrappers.framework.OnRunStartResponse', 'framework.OnRunStartResponse', (['framework.OnRunStartAction.DEBUG_RUN', "['file://' + self._dump_root]"], {}), "(framework.OnRunStartAction.DEBUG_RUN, [\n 'file://' + self._dump_root])\n", (2917, 2991), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((3252, 3280), 'tensorflow.python.debug.wrappers.framework.OnRunEndResponse', 'framework.OnRunEndResponse', ([], {}), '()\n', (3278, 3280), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((4303, 4357), 'tensorflow.python.debug.wrappers.framework.BaseDebugWrapperSession.__init__', 'framework.BaseDebugWrapperSession.__init__', (['self', 'sess'], {}), '(self, sess)\n', (4345, 4357), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((4967, 4995), 'tensorflow.python.debug.wrappers.framework.OnRunEndResponse', 'framework.OnRunEndResponse', ([], {}), '()\n', (4993, 4995), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((5185, 5247), 'tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig', 'rewriter_config_pb2.RewriterConfig', ([], {'disable_model_pruning': '(True)'}), '(disable_model_pruning=True)\n', (5219, 5247), False, 'from tensorflow.core.protobuf import rewriter_config_pb2\n'), ((5277, 5333), 'tensorflow.core.protobuf.config_pb2.GraphOptions', 'config_pb2.GraphOptions', ([], {'rewrite_options': 'rewriter_config'}), '(rewrite_options=rewriter_config)\n', (5300, 5333), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((5345, 5396), 'tensorflow.core.protobuf.config_pb2.ConfigProto', 'config_pb2.ConfigProto', ([], {'graph_options': 'graph_options'}), '(graph_options=graph_options)\n', (5367, 5396), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((5713, 5731), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5729, 5731), False, 'import tempfile\n'), ((5832, 5867), 'numpy.array', 'np.array', (['[[5.0, 3.0], [-1.0, 0.0]]'], {}), '([[5.0, 3.0], [-1.0, 0.0]])\n', (5840, 5867), True, 'import numpy as np\n'), ((5891, 5916), 'numpy.array', 'np.array', (['[[2.0], [-1.0]]'], {}), '([[2.0], [-1.0]])\n', (5899, 5916), True, 'import numpy as np\n'), ((5935, 5960), 'numpy.array', 'np.array', (['[[-4.0], [6.0]]'], {}), '([[-4.0], [6.0]])\n', (5943, 5960), True, 'import numpy as np\n'), ((5981, 6048), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['self._a_init_val'], {'shape': '[2, 2]', 'name': '"""a_init"""'}), "(self._a_init_val, shape=[2, 2], name='a_init')\n", (6001, 6048), False, 'from tensorflow.python.framework import constant_op\n'), ((6077, 6144), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['self._b_init_val'], {'shape': '[2, 1]', 'name': '"""b_init"""'}), "(self._b_init_val, shape=[2, 1], name='b_init')\n", (6097, 6144), False, 'from tensorflow.python.framework import constant_op\n'), ((6170, 6224), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.float64', 'name': '"""ph"""'}), "(dtype=dtypes.float64, name='ph')\n", (6191, 6224), False, 'from tensorflow.python.ops import array_ops\n'), ((6240, 6283), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['self._a_init'], {'name': '"""a1"""'}), "(self._a_init, name='a1')\n", (6258, 6283), False, 'from tensorflow.python.ops import variables\n'), ((6298, 6340), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['self._b_init'], {'name': '"""b"""'}), "(self._b_init, name='b')\n", (6316, 6340), False, 'from tensorflow.python.ops import variables\n'), ((6355, 6412), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['self._c_val'], {'shape': '[2, 1]', 'name': '"""c"""'}), "(self._c_val, shape=[2, 1], name='c')\n", (6375, 6412), False, 'from tensorflow.python.framework import constant_op\n'), ((6461, 6505), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['self._a', 'self._b'], {'name': '"""p1"""'}), "(self._a, self._b, name='p1')\n", (6476, 6505), False, 'from tensorflow.python.ops import math_ops\n'), ((6555, 6599), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['self._a', 'self._ph'], {'name': '"""q"""'}), "(self._a, self._ph, name='q')\n", (6570, 6599), False, 'from tensorflow.python.ops import math_ops\n'), ((6641, 6681), 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['self._p', 'self._c'], {'name': '"""s"""'}), "(self._p, self._c, name='s')\n", (6653, 6681), False, 'from tensorflow.python.ops import math_ops\n'), ((6867, 6897), 'os.path.isdir', 'os.path.isdir', (['self._dump_root'], {}), '(self._dump_root)\n', (6880, 6897), False, 'import os\n'), ((6954, 6979), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (6977, 6979), False, 'from tensorflow.python.framework import ops\n'), ((9120, 9160), 'tensorflow.python.debug.lib.debug_data.DebugDumpDir', 'debug_data.DebugDumpDir', (['self._dump_root'], {}), '(self._dump_root)\n', (9143, 9160), False, 'from tensorflow.python.debug.lib import debug_data\n'), ((13214, 13275), 'threading.Thread', 'threading.Thread', ([], {'name': '"""ChildThread"""', 'target': 'child_thread_job'}), "(name='ChildThread', target=child_thread_job)\n", (13230, 13275), False, 'import threading\n'), ((13456, 13496), 'tensorflow.python.debug.lib.debug_data.DebugDumpDir', 'debug_data.DebugDumpDir', (['self._dump_root'], {}), '(self._dump_root)\n', (13479, 13496), False, 'from tensorflow.python.debug.lib import debug_data\n'), ((13911, 13972), 'threading.Thread', 'threading.Thread', ([], {'name': '"""ChildThread"""', 'target': 'child_thread_job'}), "(name='ChildThread', target=child_thread_job)\n", (13927, 13972), False, 'import threading\n'), ((14153, 14193), 'tensorflow.python.debug.lib.debug_data.DebugDumpDir', 'debug_data.DebugDumpDir', (['self._dump_root'], {}), '(self._dump_root)\n', (14176, 14193), False, 'from tensorflow.python.debug.lib import debug_data\n'), ((14602, 14663), 'threading.Thread', 'threading.Thread', ([], {'name': '"""ChildThread"""', 'target': 'child_thread_job'}), "(name='ChildThread', target=child_thread_job)\n", (14618, 14663), False, 'import threading\n'), ((14844, 14900), 'tensorflow.python.debug.lib.debug_data.DebugDumpDir', 'debug_data.DebugDumpDir', (['self._dump_root'], {'validate': '(False)'}), '(self._dump_root, validate=False)\n', (14867, 14900), False, 'from tensorflow.python.debug.lib import debug_data\n'), ((4440, 4494), 'tensorflow.python.debug.wrappers.framework.OnSessionInitResponse', 'framework.OnSessionInitResponse', (['self._bad_init_action'], {}), '(self._bad_init_action)\n', (4471, 4494), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((4518, 4588), 'tensorflow.python.debug.wrappers.framework.OnSessionInitResponse', 'framework.OnSessionInitResponse', (['framework.OnSessionInitAction.PROCEED'], {}), '(framework.OnSessionInitAction.PROCEED)\n', (4549, 4588), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((4729, 4797), 'tensorflow.python.debug.wrappers.framework.OnRunStartResponse', 'framework.OnRunStartResponse', (['self._bad_run_start_action', 'debug_urls'], {}), '(self._bad_run_start_action, debug_urls)\n', (4757, 4797), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((4832, 4910), 'tensorflow.python.debug.wrappers.framework.OnRunStartResponse', 'framework.OnRunStartResponse', (['framework.OnRunStartAction.DEBUG_RUN', 'debug_urls'], {}), '(framework.OnRunStartAction.DEBUG_RUN, debug_urls)\n', (4860, 4910), False, 'from tensorflow.python.debug.wrappers import framework\n'), ((6905, 6948), 'tensorflow.python.lib.io.file_io.delete_recursively', 'file_io.delete_recursively', (['self._dump_root'], {}), '(self._dump_root)\n', (6931, 6948), False, 'from tensorflow.python.lib.io import file_io\n'), ((8195, 8223), 'tensorflow.python.client.session.InteractiveSession', 'session.InteractiveSession', ([], {}), '()\n', (8221, 8223), False, 'from tensorflow.python.client import session\n'), ((8646, 8670), 'numpy.array', 'np.array', (['[[3.0], [4.0]]'], {}), '([[3.0], [4.0]])\n', (8654, 8670), True, 'import numpy as np\n'), ((11050, 11076), 'numpy.array', 'np.array', (['[[11.0], [-1.0]]'], {}), '([[11.0], [-1.0]])\n', (11058, 11076), True, 'import numpy as np\n'), ((12568, 12604), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(42)'], {'name': '"""foo"""'}), "(42, name='foo')\n", (12588, 12604), False, 'from tensorflow.python.framework import constant_op\n'), ((15429, 15498), 'tensorflow.python.util.tf_inspect.getmembers', 'tf_inspect.getmembers', (['session.Session'], {'predicate': 'tf_inspect.ismethod'}), '(session.Session, predicate=tf_inspect.ismethod)\n', (15450, 15498), False, 'from tensorflow.python.util import tf_inspect\n'), ((15634, 15726), 'tensorflow.python.util.tf_inspect.getmembers', 'tf_inspect.getmembers', (['framework.BaseDebugWrapperSession'], {'predicate': 'tf_inspect.ismethod'}), '(framework.BaseDebugWrapperSession, predicate=\n tf_inspect.ismethod)\n', (15655, 15726), False, 'from tensorflow.python.util import tf_inspect\n'), ((16110, 16203), 'tensorflow.python.util.tf_inspect.getmembers', 'tf_inspect.getmembers', (['monitored_session.MonitoredSession'], {'predicate': 'tf_inspect.ismethod'}), '(monitored_session.MonitoredSession, predicate=\n tf_inspect.ismethod)\n', (16131, 16203), False, 'from tensorflow.python.util import tf_inspect\n'), ((16364, 16456), 'tensorflow.python.util.tf_inspect.getmembers', 'tf_inspect.getmembers', (['framework.BaseDebugWrapperSession'], {'predicate': 'tf_inspect.ismethod'}), '(framework.BaseDebugWrapperSession, predicate=\n tf_inspect.ismethod)\n', (16385, 16456), False, 'from tensorflow.python.util import tf_inspect\n'), ((11369, 11400), 'numpy.array', 'np.array', (['[[1.0], [2.0], [3.0]]'], {}), '([[1.0], [2.0], [3.0]])\n', (11377, 11400), True, 'import numpy as np\n'), ((11128, 11152), 'numpy.array', 'np.array', (['[[1.0], [2.0]]'], {}), '([[1.0], [2.0]])\n', (11136, 11152), True, 'import numpy as np\n'), ((12118, 12142), 'numpy.array', 'np.array', (['[[1.0], [2.0]]'], {}), '([[1.0], [2.0]])\n', (12126, 12142), True, 'import numpy as np\n')] |
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
class TorchModel(nn.Module):
def __init__(self):
super(TorchModel, self).__init__()
self.conv1 = nn.Conv2d(3, 128, 3, stride=2)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(128, 256, 3, stride=2)
self.conv3 = nn.Conv2d(256, 512, 3, stride=2)
self.dense1 = nn.Linear(1*1*512, 256)
self.dense2 = nn.Linear(256, 10)
self.objective = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
# 15 x 15 x 128
x = F.relu(self.conv1(x))
# 7 x 7 x 128
x = self.pool(x)
# 3 x 3 x 256
x = F.relu(self.conv2(x))
# 1 x 1 x 512
x = F.relu(self.conv3(x))
x = x.view(-1, 1*1*512)
x = F.relu(self.dense1(x))
x = self.dense2(x)
return x
def train(self, dataset, device, epochs=2):
for epoch in range(epochs):
epoch_loss = []
for i, (x, y) in enumerate(dataset):
# set gradients to zero
self.optimizer.zero_grad()
# forward pass
outputs = self(x.to(device))
loss = self.objective(outputs, y.to(device))
# backward pass
loss.backward()
self.optimizer.step()
epoch_loss.append(loss.item())
#if i % 1000 == 0:
# print('Batch, %d finished with loss: %.3f' % (i+1, loss))
print('Epoch finished, mean loss: %.3f' % np.mean(epoch_loss))
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
weights = torch.load(path)
self.load_state_dict(weights)
| [
"numpy.mean",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear"
] | [((271, 301), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(128)', '(3)'], {'stride': '(2)'}), '(3, 128, 3, stride=2)\n', (280, 301), True, 'import torch.nn as nn\n'), ((318, 336), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (330, 336), True, 'import torch.nn as nn\n'), ((353, 385), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'stride': '(2)'}), '(128, 256, 3, stride=2)\n', (362, 385), True, 'import torch.nn as nn\n'), ((402, 434), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)'], {'stride': '(2)'}), '(256, 512, 3, stride=2)\n', (411, 434), True, 'import torch.nn as nn\n'), ((451, 478), 'torch.nn.Linear', 'nn.Linear', (['(1 * 1 * 512)', '(256)'], {}), '(1 * 1 * 512, 256)\n', (460, 478), True, 'import torch.nn as nn\n'), ((491, 509), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(10)'], {}), '(256, 10)\n', (500, 509), True, 'import torch.nn as nn\n'), ((530, 551), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (549, 551), True, 'import torch.nn as nn\n'), ((1534, 1550), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1544, 1550), False, 'import torch\n'), ((1413, 1432), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (1420, 1432), True, 'import numpy as np\n')] |
# coding=utf-8
# !/usr/bin/python3.6 ## Please use python 3.6
"""
__synopsis__ : Calculates cosine similarity of support sets with target sample.
__description__ : Calculates cosine similarity of support sets with target sample.
__project__ : MNXC
__author__ : <NAME> <<EMAIL>>
__version__ : "0.1"
__date__ : "08-11-2018"
__copyright__ : "Copyright (c) 2019"
__license__ : This source code is licensed under the MIT-style license found in the LICENSE file in the root
directory of this source tree.
__classes__ : PairCosineSim
__variables__ :
__methods__ :
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import cosine_similarity
from logger.logger import logger
class PairCosineSim(nn.Module):
def __init__(self):
super(PairCosineSim,self).__init__()
def forward(self,supports: torch.Tensor,targets: torch.Tensor,normalize: bool = True,
test: bool = False) -> torch.Tensor:
"""
Calculates pairwise cosine similarity of support sets with target sample.
:param test: Flag to denote if checking with sklearn.cosine_similarity is needed.
:param normalize: Whether to normalize the matrix to range: (0,1) from (-1,+1)
:param supports: The embeddings of the support set samples, tensor of shape [batch_size, sequence_length, input_size]
:param targets: The embedding of the target sample, tensor of shape [batch_size, input_size] -> [batch_size, sequence_length, input_size]
:return: Tensor with cosine similarities of shape [batch_size, target_size, support_size]
"""
eps = 1e-10
batch_targets_similarities = []
if test:
targets_detached = targets.clone().detach() ## Creating clone() and detaching from graph.
supports_detached = supports.clone().detach()
for i in np.arange(targets.size(0)):
targets_similarities = []
for j in np.arange(targets.size(1)):
target_similarities = F.cosine_similarity(targets[i,j,:].unsqueeze(0),supports[i,:,:],eps=eps)
targets_similarities.append(target_similarities)
batch_x_hat_similarities = torch.stack(targets_similarities)
if test:
logger.debug("Computed sim: {}".format(batch_x_hat_similarities))
sim = cosine_similarity(targets_detached[i,:,:].numpy(),supports_detached[i,:,:].numpy())
logger.debug("sklearn sim: {}".format(sim))
batch_targets_similarities.append(batch_x_hat_similarities)
batch_targets_similarities = torch.stack(batch_targets_similarities)
if normalize:
batch_targets_similarities = torch.add(batch_targets_similarities,1)
batch_targets_similarities = torch.mul(batch_targets_similarities,0.5)
return batch_targets_similarities
def flatten_except_batchdim(self,tensor_data,batch_dim=0):
"""
Flattens a tensor except on [batch_dim] dimension.
:param tensor_data: [batch_size, sequence_size, input_size]
:param batch_dim: Dimension of batch, Default: 0 if batch_first = Ture, else 1 for Pytorch tensors.
:return: [batch_size, (sequence_size x input_size)]
"""
if len(tensor_data.shape) == 2:
logger.info("Flattening 2D tensor to (1, dim), [batch_dim] not used.")
tensor_data_flat = tensor_data.contiguous().view(1,tensor_data.numel())
elif len(tensor_data.shape) == 3:
logger.info("Flattening 3D tensor to 2D except dim: [batch_dim={}].".format(batch_dim))
logger.info("tensor_data.shape: [{}].".format(tensor_data.shape))
tensor_data_flat = tensor_data.contiguous().view(tensor_data.shape[batch_dim],-1)
else:
logger.warn("Tensor shape not supported. Got: [{}].".format(tensor_data.shape))
raise NotImplementedError
return tensor_data_flat
@staticmethod
def cosine_sim_2d(tensor1,tensor2,dim=1):
"""
Calculates cosine similarity between two 2D tensors of same shape. [Batch_size, input_size]
NOTE: for more than 2D tensors, flatten on one dimension.
:param tensor1:
:param tensor2:
:param dim: Axis for norm calculation.
:return:
"""
assert tensor1.shape == tensor2.shape,"Shape of all tensors should be same."
tensor1_norm = tensor1 / tensor1.norm(dim=dim)[:,None]
tensor2_norm = tensor2 / tensor2.norm(dim=dim)[:,None]
cosine_sim = torch.mm(tensor1_norm,tensor2_norm.transpose(0,1))
logger.debug(cosine_sim.shape)
return cosine_sim
if __name__ == '__main__':
a1_np = np.array([[[1.,0.4],
[1.,1.],
[0.,1.5]],
[[1.,0.6],
[1.,1.],
[0.,1.5]]])
a1_pt = torch.from_numpy(a1_np)
a2_np = np.array([[[1.,2.],
[3.,4.],
[5.,6.]],
[[1.,7.],
[2.,5.],
[5.,6.]]])
a2_pt = torch.from_numpy(a2_np)
b1_np = np.array([[[1.,0.4],
[1.,1.5]],
[[1.,0.7],
[1.,1.5]]])
b1_pt = torch.from_numpy(b1_np)
b2_np = np.array([[[1.,2.],
[3.,4.]],
[[1.,7.],
[5.,6.]]])
b2_pt = torch.from_numpy(b2_np)
output = torch.tensor([[0.8103,1.0000,0.8793],
[0.9804,0.8793,1.0000]])
# a = torch.rand(5, 8, 7)
# b = torch.rand(5, 2, 7)
# logger.debug(a)
# logger.debug(a.shape)
# logger.debug(b)
# logger.debug(b.shape)
test_DN = PairCosineSim()
sim = test_DN.forward(a1_pt,b1_pt,test=True)
# logger.debug(sim)
logger.debug(sim.shape)
sim = test_DN.forward(a2_pt,b2_pt,test=True)
# logger.debug(sim)
logger.debug(sim.shape)
| [
"torch.mul",
"torch.stack",
"torch.from_numpy",
"logger.logger.logger.debug",
"numpy.array",
"torch.tensor",
"torch.add",
"logger.logger.logger.info"
] | [((4805, 4896), 'numpy.array', 'np.array', (['[[[1.0, 0.4], [1.0, 1.0], [0.0, 1.5]], [[1.0, 0.6], [1.0, 1.0], [0.0, 1.5]]]'], {}), '([[[1.0, 0.4], [1.0, 1.0], [0.0, 1.5]], [[1.0, 0.6], [1.0, 1.0], [\n 0.0, 1.5]]])\n', (4813, 4896), True, 'import numpy as np\n'), ((5004, 5027), 'torch.from_numpy', 'torch.from_numpy', (['a1_np'], {}), '(a1_np)\n', (5020, 5027), False, 'import torch\n'), ((5040, 5131), 'numpy.array', 'np.array', (['[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[1.0, 7.0], [2.0, 5.0], [5.0, 6.0]]]'], {}), '([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[1.0, 7.0], [2.0, 5.0], [\n 5.0, 6.0]]])\n', (5048, 5131), True, 'import numpy as np\n'), ((5235, 5258), 'torch.from_numpy', 'torch.from_numpy', (['a2_np'], {}), '(a2_np)\n', (5251, 5258), False, 'import torch\n'), ((5272, 5334), 'numpy.array', 'np.array', (['[[[1.0, 0.4], [1.0, 1.5]], [[1.0, 0.7], [1.0, 1.5]]]'], {}), '([[[1.0, 0.4], [1.0, 1.5]], [[1.0, 0.7], [1.0, 1.5]]])\n', (5280, 5334), True, 'import numpy as np\n'), ((5407, 5430), 'torch.from_numpy', 'torch.from_numpy', (['b1_np'], {}), '(b1_np)\n', (5423, 5430), False, 'import torch\n'), ((5443, 5505), 'numpy.array', 'np.array', (['[[[1.0, 2.0], [3.0, 4.0]], [[1.0, 7.0], [5.0, 6.0]]]'], {}), '([[[1.0, 2.0], [3.0, 4.0]], [[1.0, 7.0], [5.0, 6.0]]])\n', (5451, 5505), True, 'import numpy as np\n'), ((5574, 5597), 'torch.from_numpy', 'torch.from_numpy', (['b2_np'], {}), '(b2_np)\n', (5590, 5597), False, 'import torch\n'), ((5612, 5672), 'torch.tensor', 'torch.tensor', (['[[0.8103, 1.0, 0.8793], [0.9804, 0.8793, 1.0]]'], {}), '([[0.8103, 1.0, 0.8793], [0.9804, 0.8793, 1.0]])\n', (5624, 5672), False, 'import torch\n'), ((5970, 5993), 'logger.logger.logger.debug', 'logger.debug', (['sim.shape'], {}), '(sim.shape)\n', (5982, 5993), False, 'from logger.logger import logger\n'), ((6071, 6094), 'logger.logger.logger.debug', 'logger.debug', (['sim.shape'], {}), '(sim.shape)\n', (6083, 6094), False, 'from logger.logger import logger\n'), ((2698, 2737), 'torch.stack', 'torch.stack', (['batch_targets_similarities'], {}), '(batch_targets_similarities)\n', (2709, 2737), False, 'import torch\n'), ((4706, 4736), 'logger.logger.logger.debug', 'logger.debug', (['cosine_sim.shape'], {}), '(cosine_sim.shape)\n', (4718, 4736), False, 'from logger.logger import logger\n'), ((2285, 2318), 'torch.stack', 'torch.stack', (['targets_similarities'], {}), '(targets_similarities)\n', (2296, 2318), False, 'import torch\n'), ((2801, 2841), 'torch.add', 'torch.add', (['batch_targets_similarities', '(1)'], {}), '(batch_targets_similarities, 1)\n', (2810, 2841), False, 'import torch\n'), ((2882, 2924), 'torch.mul', 'torch.mul', (['batch_targets_similarities', '(0.5)'], {}), '(batch_targets_similarities, 0.5)\n', (2891, 2924), False, 'import torch\n'), ((3402, 3472), 'logger.logger.logger.info', 'logger.info', (['"""Flattening 2D tensor to (1, dim), [batch_dim] not used."""'], {}), "('Flattening 2D tensor to (1, dim), [batch_dim] not used.')\n", (3413, 3472), False, 'from logger.logger import logger\n')] |
import os
import numpy as np
import argparse
import pandas as pd
from openpyxl import load_workbook
def init_args():
# Set argparse
parser = argparse.ArgumentParser(description='Process data')
parser.add_argument('--file_name', metavar='file',
default='./output/lattice_ec/deit_base_patch16_224/Results_300_0.001_128',
help='path to data')
parser.add_argument('--excel_name', metavar='file',
default='./A.xlsx',
help='path to data')
args = parser.parse_args()
return args
def txt2excel():
'''write to excel'''
writer = pd.ExcelWriter(args.excel_name) # 写入Excel文件
data = np.loadtxt(args.file_name, usecols=(1))
data = pd.DataFrame(data)
data.to_excel(writer, sheet_name='Sheet1', float_format='%.2f', header=False, index=False)
writer.save()
writer.close()
if __name__ == '__main__':
args = init_args()
txt2excel()
| [
"numpy.loadtxt",
"pandas.ExcelWriter",
"pandas.DataFrame",
"argparse.ArgumentParser"
] | [((151, 202), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process data"""'}), "(description='Process data')\n", (174, 202), False, 'import argparse\n'), ((654, 685), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['args.excel_name'], {}), '(args.excel_name)\n', (668, 685), True, 'import pandas as pd\n'), ((710, 747), 'numpy.loadtxt', 'np.loadtxt', (['args.file_name'], {'usecols': '(1)'}), '(args.file_name, usecols=1)\n', (720, 747), True, 'import numpy as np\n'), ((761, 779), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (773, 779), True, 'import pandas as pd\n')] |
import os
import time
import h5py
import math
import pickle
import numpy as np
import pandas as pd
import cv2
import threading
import queue
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import misc, ndimage
from sklearn import model_selection, preprocessing, metrics
from sklearn.utils import shuffle
from skimage import transform
from tqdm import tqdm
from keras.regularizers import l2
from keras.models import Model, load_model
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras import backend as K
from keras.losses import binary_crossentropy
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
from tensorflow.python.client import device_lib
DATA_PATH = './datasets/FASSEG/V2'
TRAIN_PATH = os.path.join(DATA_PATH, 'Train_RGB')
TEST_PATH = os.path.join(DATA_PATH, 'Test_RGB')
TRAIN_MASKS_PATH = os.path.join(DATA_PATH, 'Train_Labels')
# TRAIN_MASKS_FIXED_PATH = os.path.join(DATA_PATH, 'fixed_masks/fix-HCK')
TRAIN_MASKS_CSV_PATH = os.path.join(DATA_PATH, 'train_masks.csv')
# SAMPLE_SUBMISSION_PATH = os.path.join(RAW_DATA_PATH, 'sample_submission.csv')
# METADATA_PATH = os.path.join(RAW_DATA_PATH, 'metadata.csv')
# SUBMISSION_PATH = os.path.join(DATA_PATH, 'submissions')
ASSETS_PATH = os.path.join('./', 'assets')
MODELS_PATH = os.path.join(ASSETS_PATH, 'models')
TENSORBOARD_PATH = os.path.join(ASSETS_PATH, 'tensorboard')
train_masks_df = pd.read_csv(TRAIN_MASKS_CSV_PATH)
print('train_masks_df.shape', train_masks_df.shape)
# Constants
# HEIGHT_ORIG = 1280
# WIDTH_ORIG = 1918
# CHANNELS_ORIG = 3
HEIGHT = 512
WIDTH = 370
CHANNELS = 3
new_shape = (HEIGHT, WIDTH, CHANNELS)
mask_shape = (new_shape[0], new_shape[1], 1)
def get_img_id(img_path):
return img_path[:15]
img_ids = list(map(get_img_id, list(train_masks_df.img.values)))
def load_image_disk(img_id, folder=TRAIN_PATH):
img = misc.imread(os.path.join(folder, img_id + ".bmp"))
return img
def get_image(img_id):
return train_imgs[img_id]
# Return mask as 1/0 binary img with single channel
def load_mask_disk(img_id, folder=TRAIN_MASKS_PATH, filetype='bmp'):
mask = misc.imread(os.path.join(folder, "{}_mask.{}".format(img_id, filetype)), flatten=True)
mask[mask > 128] = 1
if len(mask.shape) == 2:
mask = mask.reshape(mask.shape[0], mask.shape[1], 1)
return mask
def get_mask(img_id):
return train_masks[img_id]
# Helper functions to plot car, mask, masked_car
def plot_image(img_id):
img = misc.imread(os.path.join(TRAIN_PATH, img_id + ".bmp"))
imgplot = plt.imshow(img)
plt.axis('off')
plt.show()
def plot_mask(img_id, folder=TRAIN_MASKS_PATH, filetype='bmp', ax=None):
mask = misc.imread(os.path.join(folder, "{}_mask.{}".format(img_id, filetype)))
if ax == None:
imgplot = plt.imshow(mask)
plt.axis('off')
plt.show()
else:
ax.imshow(mask)
ax.axis('off')
def plot_masked_image(img_id, ax=None):
img = misc.imread(os.path.join(TRAIN_PATH, img_id + ".bmp"))
mask = misc.imread(os.path.join(TRAIN_MASKS_PATH, img_id + ".bmp"))
mask = mask[:, :, 0:3]
mask[mask == 255] = 1
masked_img = img * mask
if ax == None:
imgplot = plt.imshow(masked_img)
plt.axis('off')
plt.show()
else:
ax.imshow(masked_img)
ax.axis('off')
# def gray2rgb(img):
# img = np.squeeze(img)
# w, h = img.shape
# ret = np.empty((w, h, 3), dtype=np.uint8)
# ret[:, :, 0] = img
# ret[:, :, 1] = img
# ret[:, :, 2] = img
# return ret
def resize_img(img, new_s=new_shape):
return transform.resize(img, new_s)
train_imgs = {}
for img_path in tqdm(os.listdir(TRAIN_PATH)):
img_id = get_img_id(img_path)
train_imgs[img_id] = cv2.resize(load_image_disk(img_id), (new_shape[0], new_shape[1]))
train_masks = {}
for img_path in tqdm(os.listdir(TRAIN_MASKS_PATH)):
img_id = get_img_id(img_path)
train_masks[img_id] = np.expand_dims(cv2.resize(load_mask_disk(img_id), (new_shape[0], new_shape[1])), axis=2)
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
return image
def randomShiftScaleRotate(image, mask,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_REFLECT_101, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1]) # degree
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(0, 0, 0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(0, 0, 0,))
if len(mask.shape) == 2:
mask = np.expand_dims(mask, axis=2)
return image, mask
def randomHorizontalFlip(image, mask, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
mask = cv2.flip(mask, 1)
return image, mask
def generate_training_batch(data, batch_size):
while True:
X_batch = []
Y_batch = []
batch_ids = np.random.choice(data,
size=batch_size,
replace=False)
for idx, img_id in enumerate(batch_ids):
x = get_image(img_id)
y = get_mask(img_id)
x, y = randomShiftScaleRotate(x, y,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-0, 0))
# x = randomHueSaturationValue(x,
# hue_shift_limit=(-50, 50),
# sat_shift_limit=(-5, 5),
# val_shift_limit=(-15, 15))
X_batch.append(x)
Y_batch.append(y)
X = np.asarray(X_batch, dtype=np.float32)
Y = np.asarray(Y_batch, dtype=np.float32)
yield X, Y
def generate_validation_batch(data, batch_size):
while True:
X_batch = []
Y_batch = []
batch_ids = np.random.choice(data,
size=batch_size,
replace=False)
for idx, img_id in enumerate(batch_ids):
x = get_image(img_id)
y = get_mask(img_id)
X_batch.append(x)
Y_batch.append(y)
X = np.asarray(X_batch, dtype=np.float32)
Y = np.asarray(Y_batch, dtype=np.float32)
yield X, Y
def generate_validation_data_seq(data):
idx = 0
while True:
img_id = data[idx]
X = get_image(img_id)
Y = get_mask(img_id)
yield img_id, X, Y
idx += 1
if idx >= len(data):
break
def get_model_memory_usage(batch_size, model):
from keras import backend as K
shapes_mem_count = 0
for l in model.layers:
single_layer_mem = 1
for s in l.output_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
total_memory = 4 * batch_size * (shapes_mem_count + trainable_count + non_trainable_count)
gbytes = round(total_memory / (1024 ** 3), 3)
mbytes = round(total_memory / (1024 ** 2), 3)
print('trainable_count', trainable_count, 'non_trainable_count', non_trainable_count, 'gbytes', gbytes, 'mbytes',
mbytes)
def down(filters, input_):
down_ = Conv2D(filters, (3, 3), padding='same')(input_)
down_ = BatchNormalization(epsilon=1e-4)(down_)
down_ = Activation('relu')(down_)
down_ = Conv2D(filters, (3, 3), padding='same')(down_)
down_ = BatchNormalization(epsilon=1e-4)(down_)
down_res = Activation('relu')(down_)
down_pool = MaxPooling2D((2, 2), strides=(2, 2))(down_)
return down_pool, down_res
def up(filters, input_, down_):
up_ = UpSampling2D((2, 2))(input_)
up_ = concatenate([down_, up_], axis=3)
up_ = Conv2D(filters, (3, 3), padding='same')(up_)
up_ = BatchNormalization(epsilon=1e-4)(up_)
up_ = Activation('relu')(up_)
up_ = Conv2D(filters, (3, 3), padding='same')(up_)
up_ = BatchNormalization(epsilon=1e-4)(up_)
up_ = Activation('relu')(up_)
up_ = Conv2D(filters, (3, 3), padding='same')(up_)
up_ = BatchNormalization(epsilon=1e-4)(up_)
up_ = Activation('relu')(up_)
return up_
def get_unet_1024(input_shape=(HEIGHT, WIDTH, CHANNELS), num_classes=1):
inputs = Input(shape=input_shape)
# down0b, down0b_res = down(8, inputs)
down0a, down0a_res = down(24, inputs)
down0, down0_res = down(64, down0a)
down1, down1_res = down(128, down0)
down2, down2_res = down(256, down1)
down3, down3_res = down(512, down2)
down4, down4_res = down(768, down3)
center = Conv2D(768, (3, 3), padding='same')(down4)
center = BatchNormalization(epsilon=1e-4)(center)
center = Activation('relu')(center)
center = Conv2D(768, (3, 3), padding='same')(center)
center = BatchNormalization(epsilon=1e-4)(center)
center = Activation('relu')(center)
up4 = up(768, center, down4_res)
up3 = up(512, up4, down3_res)
up2 = up(256, up3, down2_res)
up1 = up(128, up2, down1_res)
up0 = up(64, up1, down0_res)
up0a = up(24, up0, down0a_res)
# up0b = up(8, up0a, down0b_res)
classify = Conv2D(num_classes, (1, 1), activation='sigmoid', name='final_layer')(up0a)
model = Model(inputs=inputs, outputs=classify)
return model
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
def bce_dice_loss(y_true, y_pred):
return binary_crossentropy(y_true, y_pred) + dice_coef_loss(y_true, y_pred)
BATCH_SIZE = 2
# Training new model
ts = str(int(time.time()))
model_name = 'malhot'
num_epochs = 30
steps_per_epoch = int(len(img_ids) * 0.8 / BATCH_SIZE)
run_name = 'model={}-batch_size={}-num_epoch={}-steps_per_epoch={}-timestamp={}'.format(
model_name,
BATCH_SIZE,
num_epochs,
steps_per_epoch,
ts
)
tensorboard_loc = os.path.join(TENSORBOARD_PATH, run_name)
checkpoint_loc = os.path.join(MODELS_PATH, 'model-{}-weights.h5'.format(ts))
earlyStopping = EarlyStopping(
monitor='val_loss',
patience=2,
verbose=1,
min_delta=0.0001,
mode='min',
)
modelCheckpoint = ModelCheckpoint(
checkpoint_loc,
monitor='val_loss',
save_best_only=True,
mode='min',
verbose=1,
save_weights_only=True
)
tensorboard = TensorBoard(log_dir=tensorboard_loc, histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [modelCheckpoint, earlyStopping, tensorboard]
model = get_unet_1024()
model.compile(loss=bce_dice_loss, optimizer=Adam(lr=1e-4), metrics=[dice_coef])
print(model.summary())
get_model_memory_usage(BATCH_SIZE, model)
train_ids, validation_ids = model_selection.train_test_split(img_ids, random_state=42, test_size=0.20)
train_generator = generate_training_batch(train_ids, BATCH_SIZE)
valid_generator = generate_validation_batch(validation_ids, BATCH_SIZE)
VALIDATION_STEPS = int(len(validation_ids) / BATCH_SIZE)
print('Starting run {}'.format(run_name))
history = model.fit_generator(
train_generator,
steps_per_epoch=steps_per_epoch,
epochs=num_epochs,
callbacks=callbacks_list,
verbose=1,
validation_data=valid_generator,
validation_steps=VALIDATION_STEPS
)
model_path = os.path.join(MODELS_PATH, 'model-{}.h5'.format(ts))
history_path = os.path.join(MODELS_PATH, 'model-{}.history'.format(ts))
model.save(model_path)
pickle.dump(history.history, open(history_path, "wb"))
print('Saved model at {}'.format(model_path))
print('Saved model history at {}'.format(history_path))
| [
"keras.losses.binary_crossentropy",
"keras.backend.sum",
"pandas.read_csv",
"keras.backend.flatten",
"numpy.array",
"cv2.warpPerspective",
"matplotlib.pyplot.imshow",
"os.listdir",
"numpy.random.random",
"numpy.math.cos",
"numpy.asarray",
"numpy.dot",
"keras.models.Model",
"keras.callbacks... | [((823, 859), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""Train_RGB"""'], {}), "(DATA_PATH, 'Train_RGB')\n", (835, 859), False, 'import os\n'), ((872, 907), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""Test_RGB"""'], {}), "(DATA_PATH, 'Test_RGB')\n", (884, 907), False, 'import os\n'), ((927, 966), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""Train_Labels"""'], {}), "(DATA_PATH, 'Train_Labels')\n", (939, 966), False, 'import os\n'), ((1064, 1106), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""train_masks.csv"""'], {}), "(DATA_PATH, 'train_masks.csv')\n", (1076, 1106), False, 'import os\n'), ((1322, 1350), 'os.path.join', 'os.path.join', (['"""./"""', '"""assets"""'], {}), "('./', 'assets')\n", (1334, 1350), False, 'import os\n'), ((1365, 1400), 'os.path.join', 'os.path.join', (['ASSETS_PATH', '"""models"""'], {}), "(ASSETS_PATH, 'models')\n", (1377, 1400), False, 'import os\n'), ((1420, 1460), 'os.path.join', 'os.path.join', (['ASSETS_PATH', '"""tensorboard"""'], {}), "(ASSETS_PATH, 'tensorboard')\n", (1432, 1460), False, 'import os\n'), ((1479, 1512), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_MASKS_CSV_PATH'], {}), '(TRAIN_MASKS_CSV_PATH)\n', (1490, 1512), True, 'import pandas as pd\n'), ((12444, 12484), 'os.path.join', 'os.path.join', (['TENSORBOARD_PATH', 'run_name'], {}), '(TENSORBOARD_PATH, run_name)\n', (12456, 12484), False, 'import os\n'), ((12579, 12669), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(2)', 'verbose': '(1)', 'min_delta': '(0.0001)', 'mode': '"""min"""'}), "(monitor='val_loss', patience=2, verbose=1, min_delta=0.0001,\n mode='min')\n", (12592, 12669), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((12708, 12831), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['checkpoint_loc'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)', 'mode': '"""min"""', 'verbose': '(1)', 'save_weights_only': '(True)'}), "(checkpoint_loc, monitor='val_loss', save_best_only=True,\n mode='min', verbose=1, save_weights_only=True)\n", (12723, 12831), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((12869, 12964), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'tensorboard_loc', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), '(log_dir=tensorboard_loc, histogram_freq=0, write_graph=True,\n write_images=True)\n', (12880, 12964), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((13224, 13297), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['img_ids'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(img_ids, random_state=42, test_size=0.2)\n', (13256, 13297), False, 'from sklearn import model_selection, preprocessing, metrics\n'), ((2623, 2638), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2633, 2638), True, 'import matplotlib.pyplot as plt\n'), ((2643, 2658), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2651, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2673), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2671, 2673), True, 'import matplotlib.pyplot as plt\n'), ((3677, 3705), 'skimage.transform.resize', 'transform.resize', (['img', 'new_s'], {}), '(img, new_s)\n', (3693, 3705), False, 'from skimage import transform\n'), ((3745, 3767), 'os.listdir', 'os.listdir', (['TRAIN_PATH'], {}), '(TRAIN_PATH)\n', (3755, 3767), False, 'import os\n'), ((3934, 3962), 'os.listdir', 'os.listdir', (['TRAIN_MASKS_PATH'], {}), '(TRAIN_MASKS_PATH)\n', (3944, 3962), False, 'import os\n'), ((11604, 11642), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'classify'}), '(inputs=inputs, outputs=classify)\n', (11609, 11642), False, 'from keras.models import Model, load_model\n'), ((11719, 11736), 'keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (11728, 11736), True, 'from keras import backend as K\n'), ((11752, 11769), 'keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (11761, 11769), True, 'from keras import backend as K\n'), ((11790, 11816), 'keras.backend.sum', 'K.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (11795, 11816), True, 'from keras import backend as K\n'), ((1953, 1990), 'os.path.join', 'os.path.join', (['folder', "(img_id + '.bmp')"], {}), "(folder, img_id + '.bmp')\n", (1965, 1990), False, 'import os\n'), ((2566, 2607), 'os.path.join', 'os.path.join', (['TRAIN_PATH', "(img_id + '.bmp')"], {}), "(TRAIN_PATH, img_id + '.bmp')\n", (2578, 2607), False, 'import os\n'), ((2870, 2886), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {}), '(mask)\n', (2880, 2886), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2910), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2903, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2919, 2929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2927, 2929), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3092), 'os.path.join', 'os.path.join', (['TRAIN_PATH', "(img_id + '.bmp')"], {}), "(TRAIN_PATH, img_id + '.bmp')\n", (3063, 3092), False, 'import os\n'), ((3117, 3164), 'os.path.join', 'os.path.join', (['TRAIN_MASKS_PATH', "(img_id + '.bmp')"], {}), "(TRAIN_MASKS_PATH, img_id + '.bmp')\n", (3129, 3164), False, 'import os\n'), ((3284, 3306), 'matplotlib.pyplot.imshow', 'plt.imshow', (['masked_img'], {}), '(masked_img)\n', (3294, 3306), True, 'import matplotlib.pyplot as plt\n'), ((3315, 3330), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3323, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3347, 3349), True, 'import matplotlib.pyplot as plt\n'), ((4312, 4330), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4328, 4330), True, 'import numpy as np\n'), ((4352, 4390), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (4364, 4390), False, 'import cv2\n'), ((4409, 4425), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (4418, 4425), False, 'import cv2\n'), ((4446, 4503), 'numpy.random.uniform', 'np.random.uniform', (['hue_shift_limit[0]', 'hue_shift_limit[1]'], {}), '(hue_shift_limit[0], hue_shift_limit[1])\n', (4463, 4503), True, 'import numpy as np\n'), ((4516, 4537), 'cv2.add', 'cv2.add', (['h', 'hue_shift'], {}), '(h, hue_shift)\n', (4523, 4537), False, 'import cv2\n'), ((4558, 4615), 'numpy.random.uniform', 'np.random.uniform', (['sat_shift_limit[0]', 'sat_shift_limit[1]'], {}), '(sat_shift_limit[0], sat_shift_limit[1])\n', (4575, 4615), True, 'import numpy as np\n'), ((4628, 4649), 'cv2.add', 'cv2.add', (['s', 'sat_shift'], {}), '(s, sat_shift)\n', (4635, 4649), False, 'import cv2\n'), ((4670, 4727), 'numpy.random.uniform', 'np.random.uniform', (['val_shift_limit[0]', 'val_shift_limit[1]'], {}), '(val_shift_limit[0], val_shift_limit[1])\n', (4687, 4727), True, 'import numpy as np\n'), ((4740, 4761), 'cv2.add', 'cv2.add', (['v', 'val_shift'], {}), '(v, val_shift)\n', (4747, 4761), False, 'import cv2\n'), ((4778, 4798), 'cv2.merge', 'cv2.merge', (['(h, s, v)'], {}), '((h, s, v))\n', (4787, 4798), False, 'import cv2\n'), ((4815, 4853), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2RGB'], {}), '(image, cv2.COLOR_HSV2RGB)\n', (4827, 4853), False, 'import cv2\n'), ((5172, 5190), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5188, 5190), True, 'import numpy as np\n'), ((5258, 5309), 'numpy.random.uniform', 'np.random.uniform', (['rotate_limit[0]', 'rotate_limit[1]'], {}), '(rotate_limit[0], rotate_limit[1])\n', (5275, 5309), True, 'import numpy as np\n'), ((5336, 5393), 'numpy.random.uniform', 'np.random.uniform', (['(1 + scale_limit[0])', '(1 + scale_limit[1])'], {}), '(1 + scale_limit[0], 1 + scale_limit[1])\n', (5353, 5393), True, 'import numpy as np\n'), ((5411, 5470), 'numpy.random.uniform', 'np.random.uniform', (['(1 + aspect_limit[0])', '(1 + aspect_limit[1])'], {}), '(1 + aspect_limit[0], 1 + aspect_limit[1])\n', (5428, 5470), True, 'import numpy as np\n'), ((5848, 5879), 'numpy.array', 'np.array', (['[[cc, -ss], [ss, cc]]'], {}), '([[cc, -ss], [ss, cc]])\n', (5856, 5879), True, 'import numpy as np\n'), ((5896, 5956), 'numpy.array', 'np.array', (['[[0, 0], [width, 0], [width, height], [0, height]]'], {}), '([[0, 0], [width, 0], [width, height], [0, height]])\n', (5904, 5956), True, 'import numpy as np\n'), ((6199, 6238), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['box0', 'box1'], {}), '(box0, box1)\n', (6226, 6238), False, 'import cv2\n'), ((6255, 6377), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'mat', '(width, height)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'borderMode', 'borderValue': '(0, 0, 0)'}), '(image, mat, (width, height), flags=cv2.INTER_LINEAR,\n borderMode=borderMode, borderValue=(0, 0, 0))\n', (6274, 6377), False, 'import cv2\n'), ((6426, 6547), 'cv2.warpPerspective', 'cv2.warpPerspective', (['mask', 'mat', '(width, height)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'borderMode', 'borderValue': '(0, 0, 0)'}), '(mask, mat, (width, height), flags=cv2.INTER_LINEAR,\n borderMode=borderMode, borderValue=(0, 0, 0))\n', (6445, 6547), False, 'import cv2\n'), ((6740, 6758), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6756, 6758), True, 'import numpy as np\n'), ((6780, 6798), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (6788, 6798), False, 'import cv2\n'), ((6814, 6831), 'cv2.flip', 'cv2.flip', (['mask', '(1)'], {}), '(mask, 1)\n', (6822, 6831), False, 'import cv2\n'), ((6983, 7037), 'numpy.random.choice', 'np.random.choice', (['data'], {'size': 'batch_size', 'replace': '(False)'}), '(data, size=batch_size, replace=False)\n', (6999, 7037), True, 'import numpy as np\n'), ((7824, 7861), 'numpy.asarray', 'np.asarray', (['X_batch'], {'dtype': 'np.float32'}), '(X_batch, dtype=np.float32)\n', (7834, 7861), True, 'import numpy as np\n'), ((7874, 7911), 'numpy.asarray', 'np.asarray', (['Y_batch'], {'dtype': 'np.float32'}), '(Y_batch, dtype=np.float32)\n', (7884, 7911), True, 'import numpy as np\n'), ((8060, 8114), 'numpy.random.choice', 'np.random.choice', (['data'], {'size': 'batch_size', 'replace': '(False)'}), '(data, size=batch_size, replace=False)\n', (8076, 8114), True, 'import numpy as np\n'), ((8377, 8414), 'numpy.asarray', 'np.asarray', (['X_batch'], {'dtype': 'np.float32'}), '(X_batch, dtype=np.float32)\n', (8387, 8414), True, 'import numpy as np\n'), ((8427, 8464), 'numpy.asarray', 'np.asarray', (['Y_batch'], {'dtype': 'np.float32'}), '(Y_batch, dtype=np.float32)\n', (8437, 8464), True, 'import numpy as np\n'), ((12031, 12066), 'keras.losses.binary_crossentropy', 'binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (12050, 12066), False, 'from keras.losses import binary_crossentropy\n'), ((12152, 12163), 'time.time', 'time.time', ([], {}), '()\n', (12161, 12163), False, 'import time\n'), ((5725, 5762), 'numpy.math.cos', 'np.math.cos', (['(angle / 180 * np.math.pi)'], {}), '(angle / 180 * np.math.pi)\n', (5736, 5762), True, 'import numpy as np\n'), ((5781, 5818), 'numpy.math.sin', 'np.math.sin', (['(angle / 180 * np.math.pi)'], {}), '(angle / 180 * np.math.pi)\n', (5792, 5818), True, 'import numpy as np\n'), ((5981, 6014), 'numpy.array', 'np.array', (['[width / 2, height / 2]'], {}), '([width / 2, height / 2])\n', (5989, 6014), True, 'import numpy as np\n'), ((6030, 6059), 'numpy.dot', 'np.dot', (['box1', 'rotate_matrix.T'], {}), '(box1, rotate_matrix.T)\n', (6036, 6059), True, 'import numpy as np\n'), ((6062, 6105), 'numpy.array', 'np.array', (['[width / 2 + dx, height / 2 + dy]'], {}), '([width / 2 + dx, height / 2 + dy])\n', (6070, 6105), True, 'import numpy as np\n'), ((6632, 6660), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (6646, 6660), True, 'import numpy as np\n'), ((5573, 5622), 'numpy.random.uniform', 'np.random.uniform', (['shift_limit[0]', 'shift_limit[1]'], {}), '(shift_limit[0], shift_limit[1])\n', (5590, 5622), True, 'import numpy as np\n'), ((5651, 5700), 'numpy.random.uniform', 'np.random.uniform', (['shift_limit[0]', 'shift_limit[1]'], {}), '(shift_limit[0], shift_limit[1])\n', (5668, 5700), True, 'import numpy as np\n'), ((9095, 9112), 'keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (9109, 9112), True, 'from keras import backend as K\n'), ((9192, 9209), 'keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (9206, 9209), True, 'from keras import backend as K\n'), ((11860, 11875), 'keras.backend.sum', 'K.sum', (['y_true_f'], {}), '(y_true_f)\n', (11865, 11875), True, 'from keras import backend as K\n'), ((11878, 11893), 'keras.backend.sum', 'K.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (11883, 11893), True, 'from keras import backend as K\n')] |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict
import numpy as np
import nnabla as nn
import nnabla.functions as NF
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.model_trainers.model_trainer import TrainingVariables, rnn_support
from nnabla_rl.model_trainers.q_value.categorical_dqn_q_trainer import (CategoricalDQNQTrainer,
CategoricalDQNQTrainerConfig)
from nnabla_rl.models import ValueDistributionFunction
from nnabla_rl.utils.misc import create_variables
@dataclass
class CategoricalDDQNQTrainerConfig(CategoricalDQNQTrainerConfig):
pass
class CategoricalDDQNQTrainer(CategoricalDQNQTrainer):
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_target_function: ValueDistributionFunction
_prev_train_rnn_states: Dict[str, Dict[str, nn.Variable]]
_prev_target_rnn_states: Dict[str, Dict[str, nn.Variable]]
def __init__(self,
train_function: ValueDistributionFunction,
solvers: Dict[str, nn.solver.Solver],
target_function: ValueDistributionFunction,
env_info: EnvironmentInfo,
config: CategoricalDDQNQTrainerConfig = CategoricalDDQNQTrainerConfig()):
self._train_function = train_function
self._target_function = target_function
self._prev_train_rnn_states = {}
self._prev_target_rnn_states = {}
super(CategoricalDDQNQTrainer, self).__init__(train_function, solvers, target_function, env_info, config)
def support_rnn(self) -> bool:
return True
def _compute_target(self, training_variables: TrainingVariables, **kwargs) -> nn.Variable:
batch_size = training_variables.batch_size
gamma = training_variables.gamma
reward = training_variables.reward
non_terminal = training_variables.non_terminal
s_next = training_variables.s_next
N = self._target_function._n_atom
v_max = self._config.v_max
v_min = self._config.v_min
prev_rnn_states = self._prev_train_rnn_states
train_rnn_states = training_variables.rnn_states
with rnn_support(self._train_function, prev_rnn_states, train_rnn_states, training_variables, self._config):
a_next = self._train_function.as_q_function().argmax_q(s_next)
prev_rnn_states = self._prev_target_rnn_states
with rnn_support(self._target_function, prev_rnn_states, train_rnn_states, training_variables, self._config):
pj = self._target_function.probs(s_next, a_next)
delta_z = (v_max - v_min) / (N - 1)
z = np.asarray([v_min + i * delta_z for i in range(N)])
z = np.broadcast_to(array=z, shape=(batch_size, N))
z = nn.Variable.from_numpy_array(z)
target = reward + non_terminal * gamma * z
Tz = NF.clip_by_value(target, v_min, v_max)
assert Tz.shape == (batch_size, N)
mi = self._compute_projection(Tz, pj, N, v_max, v_min)
return mi
def _setup_training_variables(self, batch_size: int) -> TrainingVariables:
training_variables = super()._setup_training_variables(batch_size)
rnn_states = {}
if self._target_function.is_recurrent():
shapes = self._target_function.internal_state_shapes()
rnn_state_variables = create_variables(batch_size, shapes)
rnn_states[self._target_function.scope_name] = rnn_state_variables
# NOTE: rnn_states for train_function is already generated by parent class
training_variables.rnn_states.update(rnn_states)
return training_variables
| [
"nnabla_rl.model_trainers.model_trainer.rnn_support",
"nnabla_rl.utils.misc.create_variables",
"nnabla.functions.clip_by_value",
"nnabla.Variable.from_numpy_array",
"numpy.broadcast_to"
] | [((3511, 3558), 'numpy.broadcast_to', 'np.broadcast_to', ([], {'array': 'z', 'shape': '(batch_size, N)'}), '(array=z, shape=(batch_size, N))\n', (3526, 3558), True, 'import numpy as np\n'), ((3571, 3602), 'nnabla.Variable.from_numpy_array', 'nn.Variable.from_numpy_array', (['z'], {}), '(z)\n', (3599, 3602), True, 'import nnabla as nn\n'), ((3667, 3705), 'nnabla.functions.clip_by_value', 'NF.clip_by_value', (['target', 'v_min', 'v_max'], {}), '(target, v_min, v_max)\n', (3683, 3705), True, 'import nnabla.functions as NF\n'), ((2975, 3081), 'nnabla_rl.model_trainers.model_trainer.rnn_support', 'rnn_support', (['self._train_function', 'prev_rnn_states', 'train_rnn_states', 'training_variables', 'self._config'], {}), '(self._train_function, prev_rnn_states, train_rnn_states,\n training_variables, self._config)\n', (2986, 3081), False, 'from nnabla_rl.model_trainers.model_trainer import TrainingVariables, rnn_support\n'), ((3224, 3331), 'nnabla_rl.model_trainers.model_trainer.rnn_support', 'rnn_support', (['self._target_function', 'prev_rnn_states', 'train_rnn_states', 'training_variables', 'self._config'], {}), '(self._target_function, prev_rnn_states, train_rnn_states,\n training_variables, self._config)\n', (3235, 3331), False, 'from nnabla_rl.model_trainers.model_trainer import TrainingVariables, rnn_support\n'), ((4161, 4197), 'nnabla_rl.utils.misc.create_variables', 'create_variables', (['batch_size', 'shapes'], {}), '(batch_size, shapes)\n', (4177, 4197), False, 'from nnabla_rl.utils.misc import create_variables\n')] |
"""
radtraq.plotting.self_consistency
---------------------
Module for plotting self-consistency histograms
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
from radtraq.utils.dataset_utils import get_height_variable_name
def plot_self_consistency(obj, variables=None, thresh=None):
"""
Function for plotting self-consistency plots
Parameters
----------
object : Xarray.Dataset
ACT object containing vertical point data
variables : dict
Dictionary of variables to plot with format of
{yvariable: {'variable': x-variable, 'bin_width': [1, 2], 'linreg': True}}
thresh : dict
dictionary of variables to threshold the data from. For example
thresh = {'copol_correlation_coeff': 0.99}
Returns
-------
ax : matplotlib ax handle
Returns the axis handle for additional updates if needed
"""
if thresh is not None:
thresh_vars = list(thresh.keys())
comp_vars = [variables[k]['variable'] for k in variables.keys()]
var = list(variables.keys()) + thresh_vars + comp_vars
# Threshold all variables based on thresh
new_obj = obj[var]
height_var = get_height_variable_name(new_obj, variable=var[0])
new_obj = new_obj.stack(z=('time', height_var))
new_obj = new_obj.dropna(dim='z')
if thresh is not None:
for k in thresh:
new_obj = new_obj.where(new_obj[k] > thresh[k], drop=True)
# Set up plots
n_plots = len(variables.keys())
nc = 2
nr = int(np.ceil(n_plots / 2.))
fig, ax = plt.subplots(nc, nr, figsize=(5 * nr, 4.25 * nc))
# Cycle through each plot and create comparison
i = 0
j = 0
for k in variables:
bin_width = [1, 1]
if 'bin_width' in variables[k]:
bin_width = variables[k]['bin_width']
comp_var = variables[k]['variable']
xbins = int((new_obj[comp_var].max() - new_obj[comp_var].min()) / bin_width[0])
ybins = int((new_obj[k].max() - new_obj[k].min()) / bin_width[1])
ax[i, j].hist2d(new_obj[comp_var], new_obj[k], bins=[xbins, ybins], cmin=1)
ax[i, j].set_xlabel(new_obj[comp_var].attrs['long_name'])
ax[i, j].set_ylabel(new_obj[k].attrs['long_name'])
ax[i, j].set_title(new_obj[k].attrs['long_name'].split(',')[0] + ' vs \n' +
new_obj[comp_var].attrs['long_name'])
if 'linreg' in variables[k]:
results = scipy.stats.linregress(new_obj[comp_var], new_obj[k])
text = '%.2f' % results.intercept + ' + ' + '%.2f' % results.slope + 'x'
ax[0, 0].text(new_obj[comp_var].max(), new_obj[k].max(), text, ha='right', va='top')
ax[0, 0].plot(new_obj[comp_var].values, new_obj[comp_var].values * results.slope +
results.intercept, 'r')
j += 1
if j >= nc:
j = 0
i += 1
plt.tight_layout()
return ax
| [
"scipy.stats.linregress",
"numpy.ceil",
"radtraq.utils.dataset_utils.get_height_variable_name",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] | [((1190, 1240), 'radtraq.utils.dataset_utils.get_height_variable_name', 'get_height_variable_name', (['new_obj'], {'variable': 'var[0]'}), '(new_obj, variable=var[0])\n', (1214, 1240), False, 'from radtraq.utils.dataset_utils import get_height_variable_name\n'), ((1571, 1620), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nc', 'nr'], {'figsize': '(5 * nr, 4.25 * nc)'}), '(nc, nr, figsize=(5 * nr, 4.25 * nc))\n', (1583, 1620), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2936), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2934, 2936), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1556), 'numpy.ceil', 'np.ceil', (['(n_plots / 2.0)'], {}), '(n_plots / 2.0)\n', (1541, 1556), True, 'import numpy as np\n'), ((2459, 2512), 'scipy.stats.linregress', 'scipy.stats.linregress', (['new_obj[comp_var]', 'new_obj[k]'], {}), '(new_obj[comp_var], new_obj[k])\n', (2481, 2512), False, 'import scipy\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ntsnet network wrapper."""
import math
import os
import time
import numpy as np
from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn
from mindspore.ops import functional as F
from mindspore.ops import operations as P
import mindspore.common.dtype as mstype
from mindspore.train.callback import Callback, ModelCheckpoint
from src.resnet import resnet50
from src.config import config
m_for_scrutinizer = config.m_for_scrutinizer
K = config.topK
input_size = config.input_size
num_classes = config.num_classes
lossLogName = config.lossLogName
def _fc(in_channel, out_channel):
'''Weight init for dense cell'''
stdv = 1 / math.sqrt(in_channel)
weight = Tensor(np.random.uniform(-stdv, stdv, (out_channel, in_channel)).astype(np.float32))
bias = Tensor(np.random.uniform(-stdv, stdv, (out_channel)).astype(np.float32))
return nn.Dense(in_channel, out_channel, has_bias=True,
weight_init=weight, bias_init=bias).to_float(mstype.float32)
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):
"""Conv2D wrapper."""
shape = (out_channels, in_channels, kernel_size, kernel_size)
stdv = 1 / math.sqrt(in_channels * kernel_size * kernel_size)
weights = Tensor(np.random.uniform(-stdv, stdv, shape).astype(np.float32))
shape_bias = (out_channels,)
biass = Tensor(np.random.uniform(-stdv, stdv, shape_bias).astype(np.float32))
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
pad_mode=pad_mode, weight_init=weights, has_bias=True, bias_init=biass)
_default_anchors_setting = (
dict(layer='p3', stride=32, size=48, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p4', stride=64, size=96, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p5', stride=128, size=192, scale=[1, 2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
)
def generate_default_anchor_maps(anchors_setting=None, input_shape=input_size):
"""
generate default anchor
:param anchors_setting: all information of anchors
:param input_shape: shape of input images, e.g. (h, w)
:return: center_anchors: # anchors * 4 (oy, ox, h, w)
edge_anchors: # anchors * 4 (y0, x0, y1, x1)
anchor_area: # anchors * 1 (area)
"""
if anchors_setting is None:
anchors_setting = _default_anchors_setting
center_anchors = np.zeros((0, 4), dtype=np.float32)
edge_anchors = np.zeros((0, 4), dtype=np.float32)
anchor_areas = np.zeros((0,), dtype=np.float32)
input_shape = np.array(input_shape, dtype=int)
for anchor_info in anchors_setting:
stride = anchor_info['stride']
size = anchor_info['size']
scales = anchor_info['scale']
aspect_ratios = anchor_info['aspect_ratio']
output_map_shape = np.ceil(input_shape.astype(np.float32) / stride)
output_map_shape = output_map_shape.astype(np.int)
output_shape = tuple(output_map_shape) + (4,)
ostart = stride / 2.
oy = np.arange(ostart, ostart + stride * output_shape[0], stride)
oy = oy.reshape(output_shape[0], 1)
ox = np.arange(ostart, ostart + stride * output_shape[1], stride)
ox = ox.reshape(1, output_shape[1])
center_anchor_map_template = np.zeros(output_shape, dtype=np.float32)
center_anchor_map_template[:, :, 0] = oy
center_anchor_map_template[:, :, 1] = ox
for scale in scales:
for aspect_ratio in aspect_ratios:
center_anchor_map = center_anchor_map_template.copy()
center_anchor_map[:, :, 2] = size * scale / float(aspect_ratio) ** 0.5
center_anchor_map[:, :, 3] = size * scale * float(aspect_ratio) ** 0.5
edge_anchor_map = np.concatenate((center_anchor_map[..., :2] - center_anchor_map[..., 2:4] / 2.,
center_anchor_map[..., :2] + center_anchor_map[..., 2:4] / 2.),
axis=-1)
anchor_area_map = center_anchor_map[..., 2] * center_anchor_map[..., 3]
center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4)))
edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4)))
anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1)))
return center_anchors, edge_anchors, anchor_areas
class Navigator(nn.Cell):
"""Navigator"""
def __init__(self):
"""Navigator init"""
super(Navigator, self).__init__()
self.down1 = _conv(2048, 128, 3, 1, padding=1, pad_mode='pad')
self.down2 = _conv(128, 128, 3, 2, padding=1, pad_mode='pad')
self.down3 = _conv(128, 128, 3, 2, padding=1, pad_mode='pad')
self.ReLU = nn.ReLU()
self.tidy1 = _conv(128, 6, 1, 1, padding=0, pad_mode='same')
self.tidy2 = _conv(128, 6, 1, 1, padding=0, pad_mode='same')
self.tidy3 = _conv(128, 9, 1, 1, padding=0, pad_mode='same')
self.opConcat = ops.Concat(axis=1)
self.opReshape = ops.Reshape()
def construct(self, x):
"""Navigator construct"""
batch_size = x.shape[0]
d1 = self.ReLU(self.down1(x))
d2 = self.ReLU(self.down2(d1))
d3 = self.ReLU(self.down3(d2))
t1 = self.tidy1(d1)
t2 = self.tidy2(d2)
t3 = self.tidy3(d3)
t1 = self.opReshape(t1, (batch_size, -1, 1))
t2 = self.opReshape(t2, (batch_size, -1, 1))
t3 = self.opReshape(t3, (batch_size, -1, 1))
return self.opConcat((t1, t2, t3))
class NTS_NET(nn.Cell):
"""Ntsnet"""
def __init__(self, topK=6, resnet50Path=""):
"""Ntsnet init"""
super(NTS_NET, self).__init__()
feature_extractor = resnet50(1001)
if resnet50Path != "":
param_dict = load_checkpoint(resnet50Path)
load_param_into_net(feature_extractor, param_dict)
self.feature_extractor = feature_extractor # Backbone
self.feature_extractor.end_point = _fc(512 * 4, num_classes)
self.navigator = Navigator() # Navigator
self.topK = topK
self.num_classes = num_classes
self.scrutinizer = _fc(2048 * (m_for_scrutinizer + 1), num_classes) # Scrutinizer
self.teacher = _fc(512 * 4, num_classes) # Teacher
_, edge_anchors, _ = generate_default_anchor_maps()
self.pad_side = 224
self.Pad_ops = ops.Pad(((0, 0), (0, 0), (self.pad_side, self.pad_side), (self.pad_side, self.pad_side)))
self.np_edge_anchors = edge_anchors + 224
self.edge_anchors = Tensor(self.np_edge_anchors, mstype.float32)
self.opzeros = ops.Zeros()
self.opones = ops.Ones()
self.concat_op = ops.Concat(axis=1)
self.nms = P.NMSWithMask(0.25)
self.topK_op = ops.TopK(sorted=True)
self.opReshape = ops.Reshape()
self.opResizeLinear = ops.ResizeBilinear((224, 224))
self.transpose = ops.Transpose()
self.opsCropResize = ops.CropAndResize(method="bilinear_v2")
self.min_float_num = -65536.0
self.selected_mask_shape = (1614,)
self.unchosen_score = Tensor(self.min_float_num * np.ones(self.selected_mask_shape, np.float32),
mstype.float32)
self.gatherND = ops.GatherNd()
self.gatherD = ops.GatherD()
self.squeezeop = P.Squeeze()
self.select = P.Select()
self.perm = (1, 2, 0)
self.box_index = self.opzeros(((K,)), mstype.int32)
self.crop_size = (224, 224)
self.perm2 = (0, 3, 1, 2)
self.m_for_scrutinizer = m_for_scrutinizer
self.sortop = ops.Sort(descending=True)
self.stackop = ops.Stack()
def construct(self, x):
"""Ntsnet construct"""
resnet_out, rpn_feature, feature = self.feature_extractor(x)
x_pad = self.Pad_ops(x)
batch_size = x.shape[0]
rpn_feature = F.stop_gradient(rpn_feature)
rpn_score = self.navigator(rpn_feature)
edge_anchors = self.edge_anchors
top_k_info = []
current_img_for_teachers = []
for i in range(batch_size):
# using navigator output as scores to nms anchors
rpn_score_current_img = self.opReshape(rpn_score[i:i + 1:1, ::], (-1, 1))
bbox_score = self.squeezeop(rpn_score_current_img)
bbox_score_sorted, bbox_score_sorted_indices = self.sortop(bbox_score)
bbox_score_sorted_concat = self.opReshape(bbox_score_sorted, (-1, 1))
edge_anchors_sorted_concat = self.gatherND(edge_anchors,
self.opReshape(bbox_score_sorted_indices, (1614, 1)))
bbox = self.concat_op((edge_anchors_sorted_concat, bbox_score_sorted_concat))
_, _, selected_mask = self.nms(bbox)
selected_mask = F.stop_gradient(selected_mask)
bbox_score = self.squeezeop(bbox_score_sorted_concat)
scores_using = self.select(selected_mask, bbox_score, self.unchosen_score)
# select the topk anchors and scores after nms
_, topK_indices = self.topK_op(scores_using, self.topK)
topK_indices = self.opReshape(topK_indices, (K, 1))
bbox_topk = self.gatherND(bbox, topK_indices)
top_k_info.append(self.opReshape(bbox_topk[::, 4:5:1], (-1,)))
# crop from x_pad and resize to a fixed size using bilinear
temp_pad = self.opReshape(x_pad[i:i + 1:1, ::, ::, ::], (3, 896, 896))
temp_pad = self.transpose(temp_pad, self.perm)
tensor_image = self.opReshape(temp_pad, (1,) + temp_pad.shape)
tensor_box = self.gatherND(edge_anchors_sorted_concat, topK_indices)
tensor_box = tensor_box / 895
current_img_for_teacher = self.opsCropResize(tensor_image, tensor_box, self.box_index, self.crop_size)
# the image cropped will be used to extractor feature and calculate loss
current_img_for_teacher = self.opReshape(current_img_for_teacher, (-1, 224, 224, 3))
current_img_for_teacher = self.transpose(current_img_for_teacher, self.perm2)
current_img_for_teacher = self.opReshape(current_img_for_teacher, (-1, 3, 224, 224))
current_img_for_teachers.append(current_img_for_teacher)
feature = self.opReshape(feature, (batch_size, 1, -1))
top_k_info = self.stackop(top_k_info)
top_k_info = self.opReshape(top_k_info, (batch_size, self.topK))
current_img_for_teachers = self.stackop(current_img_for_teachers)
current_img_for_teachers = self.opReshape(current_img_for_teachers, (batch_size * self.topK, 3, 224, 224))
current_img_for_teachers = F.stop_gradient(current_img_for_teachers)
# extracor features of topk cropped images
_, _, pre_teacher_features = self.feature_extractor(current_img_for_teachers)
pre_teacher_features = self.opReshape(pre_teacher_features, (batch_size, self.topK, 2048))
pre_scrutinizer_features = pre_teacher_features[::, 0:self.m_for_scrutinizer:1, ::]
pre_scrutinizer_features = self.opReshape(pre_scrutinizer_features, (batch_size, self.m_for_scrutinizer, 2048))
pre_scrutinizer_features = self.opReshape(self.concat_op((pre_scrutinizer_features, feature)), (batch_size, -1))
# using topk cropped images, feed in scrutinzer and teacher, calculate loss
scrutinizer_out = self.scrutinizer(pre_scrutinizer_features)
teacher_out = self.teacher(pre_teacher_features)
return resnet_out, scrutinizer_out, teacher_out, top_k_info
# (batch_size, 200),(batch_size, 200),(batch_size,6, 200),(batch_size,6)
class WithLossCell(nn.Cell):
"""WithLossCell wrapper for ntsnet"""
def __init__(self, backbone, loss_fn):
"""WithLossCell init"""
super(WithLossCell, self).__init__(auto_prefix=True)
self._backbone = backbone
self._loss_fn = loss_fn
self.oneTensor = Tensor(1.0, mstype.float32)
self.zeroTensor = Tensor(0.0, mstype.float32)
self.opReshape = ops.Reshape()
self.opOnehot = ops.OneHot()
self.oplogsoftmax = ops.LogSoftmax()
self.opZeros = ops.Zeros()
self.opOnes = ops.Ones()
self.opRelu = ops.ReLU()
self.opGatherD = ops.GatherD()
self.squeezeop = P.Squeeze()
self.reducesumop = ops.ReduceSum()
self.oprepeat = ops.repeat_elements
self.cast = ops.Cast()
def construct(self, image_data, label):
"""WithLossCell construct"""
batch_size = image_data.shape[0]
origin_label = label
labelx = self.opReshape(label, (-1, 1))
origin_label_repeatk_2D = self.oprepeat(labelx, rep=K, axis=1)
origin_label_repeatk = self.opReshape(origin_label_repeatk_2D, (-1,))
origin_label_repeatk_unsqueeze = self.opReshape(origin_label_repeatk_2D, (-1, 1))
resnet_out, scrutinizer_out, teacher_out, top_k_info = self._backbone(image_data)
teacher_out = self.opReshape(teacher_out, (batch_size * K, -1))
log_softmax_teacher_out = -1 * self.oplogsoftmax(teacher_out)
log_softmax_teacher_out_result = self.opGatherD(log_softmax_teacher_out, 1, origin_label_repeatk_unsqueeze)
log_softmax_teacher_out_result = self.opReshape(log_softmax_teacher_out_result, (batch_size, K))
oneHotLabel = self.opOnehot(origin_label, num_classes, self.oneTensor, self.zeroTensor)
# using resnet_out to calculate resnet_real_out_loss
resnet_real_out_loss = self._loss_fn(resnet_out, oneHotLabel)
# using scrutinizer_out to calculate scrutinizer_out_loss
scrutinizer_out_loss = self._loss_fn(scrutinizer_out, oneHotLabel)
# using teacher_out and top_k_info to calculate ranking loss
loss = self.opZeros((), mstype.float32)
num = top_k_info.shape[0]
for i in range(K):
log_softmax_teacher_out_inlabel_unsqueeze = self.opReshape(log_softmax_teacher_out_result[::, i:i + 1:1],
(-1, 1))
compareX = log_softmax_teacher_out_result > log_softmax_teacher_out_inlabel_unsqueeze
pivot = self.opReshape(top_k_info[::, i:i + 1:1], (-1, 1))
information = 1 - pivot + top_k_info
loss_p = information * compareX
loss_p_temp = self.opRelu(loss_p)
loss_p = self.reducesumop(loss_p_temp)
loss += loss_p
rank_loss = loss / num
oneHotLabel2 = self.opOnehot(origin_label_repeatk, num_classes, self.oneTensor, self.zeroTensor)
# using teacher_out to calculate teacher_loss
teacher_loss = self._loss_fn(teacher_out, oneHotLabel2)
total_loss = resnet_real_out_loss + rank_loss + scrutinizer_out_loss + teacher_loss
return total_loss
@property
def backbone_network(self):
"""WithLossCell backbone"""
return self._backbone
class NtsnetModelCheckpoint(ModelCheckpoint):
"""
The checkpoint callback class.
It is called to combine with train process and save the model and network parameters after training.
Note:
In the distributed training scenario, please specify different directories for each training process
to save the checkpoint file. Otherwise, the training may fail.
Args:
prefix (str): The prefix name of checkpoint files. Default: "CKP".
directory (str): The path of the folder which will be saved in the checkpoint file. Default: None.
ckconfig (CheckpointConfig): Checkpoint strategy configuration. Default: None.
Raises:
ValueError: If the prefix is invalid.
TypeError: If the config is not CheckpointConfig type.
"""
def __init__(self, prefix='CKP', directory=None, ckconfig=None,
device_num=1, device_id=0, args=None, run_modelart=False):
super(NtsnetModelCheckpoint, self).__init__(prefix, directory, ckconfig)
self.run_modelart = run_modelart
self.device_num = device_num
self.device_id = device_id
self.args = args
def _save_ckpt(self, cb_params, force_to_save=False):
super()._save_ckpt(cb_params, force_to_save)
if self.run_modelart and (self.device_num == 1 or self.device_id == 0):
import moxing as mox
mox.file.copy_parallel(src_url=cur_file, dst_url=os.path.join(self.args.train_url, cur_ckpoint_file))
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF terminating training.
Note:
If per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
"""
def __init__(self, per_print_times=1, rank_id=0, local_output_url="",
device_num=1, device_id=0, args=None, run_modelart=False):
super(LossCallBack, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self._per_print_times = per_print_times
self.count = 0
self.rpn_loss_sum = 0
self.rpn_cls_loss_sum = 0
self.rpn_reg_loss_sum = 0
self.rank_id = rank_id
self.local_output_url = local_output_url
self.device_num = device_num
self.device_id = device_id
self.args = args
self.time_stamp_first = time.time()
self.run_modelart = run_modelart
def step_end(self, run_context):
"""
Called after each step finished.
Args:
run_context (RunContext): Include some information of the model.
"""
cb_params = run_context.original_args()
rpn_loss = cb_params.net_outputs.asnumpy()
self.count += 1
self.rpn_loss_sum += float(rpn_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
if self.count >= 1:
time_stamp_current = time.time()
rpn_loss = self.rpn_loss_sum / self.count
loss_file = open(os.path.join(self.local_output_url, lossLogName), "a+")
loss_file.write("%lu epoch: %s step: %s ,rpn_loss: %.5f" %
(time_stamp_current - self.time_stamp_first, cb_params.cur_epoch_num, cur_step_in_epoch,
rpn_loss))
loss_file.write("\n")
loss_file.close()
if self.run_modelart and (self.device_num == 1 or self.device_id == 0):
import moxing as mox
mox.file.copy_parallel(src_url=os.path.join(self.local_output_url, lossLogName),
dst_url=os.path.join(self.args.train_url, lossLogName))
| [
"mindspore.ops.operations.Squeeze",
"mindspore.ops.ReLU",
"math.sqrt",
"numpy.array",
"mindspore.ops.Pad",
"mindspore.ops.Concat",
"mindspore.ops.Ones",
"numpy.arange",
"mindspore.ops.operations.NMSWithMask",
"mindspore.ops.GatherNd",
"mindspore.ops.Sort",
"mindspore.ops.OneHot",
"mindspore.... | [((2122, 2295), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'pad_mode': 'pad_mode', 'weight_init': 'weights', 'has_bias': '(True)', 'bias_init': 'biass'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, pad_mode=pad_mode, weight_init=weights, has_bias=True,\n bias_init=biass)\n', (2131, 2295), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((3213, 3247), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (3221, 3247), True, 'import numpy as np\n'), ((3267, 3301), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (3275, 3301), True, 'import numpy as np\n'), ((3321, 3353), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.float32'}), '((0,), dtype=np.float32)\n', (3329, 3353), True, 'import numpy as np\n'), ((3372, 3404), 'numpy.array', 'np.array', (['input_shape'], {'dtype': 'int'}), '(input_shape, dtype=int)\n', (3380, 3404), True, 'import numpy as np\n'), ((1322, 1343), 'math.sqrt', 'math.sqrt', (['in_channel'], {}), '(in_channel)\n', (1331, 1343), False, 'import math\n'), ((1866, 1916), 'math.sqrt', 'math.sqrt', (['(in_channels * kernel_size * kernel_size)'], {}), '(in_channels * kernel_size * kernel_size)\n', (1875, 1916), False, 'import math\n'), ((3842, 3902), 'numpy.arange', 'np.arange', (['ostart', '(ostart + stride * output_shape[0])', 'stride'], {}), '(ostart, ostart + stride * output_shape[0], stride)\n', (3851, 3902), True, 'import numpy as np\n'), ((3960, 4020), 'numpy.arange', 'np.arange', (['ostart', '(ostart + stride * output_shape[1])', 'stride'], {}), '(ostart, ostart + stride * output_shape[1], stride)\n', (3969, 4020), True, 'import numpy as np\n'), ((4102, 4142), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': 'np.float32'}), '(output_shape, dtype=np.float32)\n', (4110, 4142), True, 'import numpy as np\n'), ((5648, 5657), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5655, 5657), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((5889, 5907), 'mindspore.ops.Concat', 'ops.Concat', ([], {'axis': '(1)'}), '(axis=1)\n', (5899, 5907), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((5933, 5946), 'mindspore.ops.Reshape', 'ops.Reshape', ([], {}), '()\n', (5944, 5946), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((6631, 6645), 'src.resnet.resnet50', 'resnet50', (['(1001)'], {}), '(1001)\n', (6639, 6645), False, 'from src.resnet import resnet50\n'), ((7303, 7396), 'mindspore.ops.Pad', 'ops.Pad', (['((0, 0), (0, 0), (self.pad_side, self.pad_side), (self.pad_side, self.pad_side)\n )'], {}), '(((0, 0), (0, 0), (self.pad_side, self.pad_side), (self.pad_side,\n self.pad_side)))\n', (7310, 7396), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7471, 7515), 'mindspore.Tensor', 'Tensor', (['self.np_edge_anchors', 'mstype.float32'], {}), '(self.np_edge_anchors, mstype.float32)\n', (7477, 7515), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7539, 7550), 'mindspore.ops.Zeros', 'ops.Zeros', ([], {}), '()\n', (7548, 7550), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7573, 7583), 'mindspore.ops.Ones', 'ops.Ones', ([], {}), '()\n', (7581, 7583), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7609, 7627), 'mindspore.ops.Concat', 'ops.Concat', ([], {'axis': '(1)'}), '(axis=1)\n', (7619, 7627), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7647, 7666), 'mindspore.ops.operations.NMSWithMask', 'P.NMSWithMask', (['(0.25)'], {}), '(0.25)\n', (7660, 7666), True, 'from mindspore.ops import operations as P\n'), ((7690, 7711), 'mindspore.ops.TopK', 'ops.TopK', ([], {'sorted': '(True)'}), '(sorted=True)\n', (7698, 7711), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7737, 7750), 'mindspore.ops.Reshape', 'ops.Reshape', ([], {}), '()\n', (7748, 7750), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7781, 7811), 'mindspore.ops.ResizeBilinear', 'ops.ResizeBilinear', (['(224, 224)'], {}), '((224, 224))\n', (7799, 7811), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7837, 7852), 'mindspore.ops.Transpose', 'ops.Transpose', ([], {}), '()\n', (7850, 7852), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((7882, 7921), 'mindspore.ops.CropAndResize', 'ops.CropAndResize', ([], {'method': '"""bilinear_v2"""'}), "(method='bilinear_v2')\n", (7899, 7921), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((8185, 8199), 'mindspore.ops.GatherNd', 'ops.GatherNd', ([], {}), '()\n', (8197, 8199), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((8223, 8236), 'mindspore.ops.GatherD', 'ops.GatherD', ([], {}), '()\n', (8234, 8236), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((8262, 8273), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', ([], {}), '()\n', (8271, 8273), True, 'from mindspore.ops import operations as P\n'), ((8296, 8306), 'mindspore.ops.operations.Select', 'P.Select', ([], {}), '()\n', (8304, 8306), True, 'from mindspore.ops import operations as P\n'), ((8540, 8565), 'mindspore.ops.Sort', 'ops.Sort', ([], {'descending': '(True)'}), '(descending=True)\n', (8548, 8565), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((8589, 8600), 'mindspore.ops.Stack', 'ops.Stack', ([], {}), '()\n', (8598, 8600), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((8816, 8844), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['rpn_feature'], {}), '(rpn_feature)\n', (8831, 8844), True, 'from mindspore.ops import functional as F\n'), ((11632, 11673), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['current_img_for_teachers'], {}), '(current_img_for_teachers)\n', (11647, 11673), True, 'from mindspore.ops import functional as F\n'), ((12903, 12930), 'mindspore.Tensor', 'Tensor', (['(1.0)', 'mstype.float32'], {}), '(1.0, mstype.float32)\n', (12909, 12930), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((12957, 12984), 'mindspore.Tensor', 'Tensor', (['(0.0)', 'mstype.float32'], {}), '(0.0, mstype.float32)\n', (12963, 12984), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13010, 13023), 'mindspore.ops.Reshape', 'ops.Reshape', ([], {}), '()\n', (13021, 13023), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13048, 13060), 'mindspore.ops.OneHot', 'ops.OneHot', ([], {}), '()\n', (13058, 13060), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13089, 13105), 'mindspore.ops.LogSoftmax', 'ops.LogSoftmax', ([], {}), '()\n', (13103, 13105), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13129, 13140), 'mindspore.ops.Zeros', 'ops.Zeros', ([], {}), '()\n', (13138, 13140), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13163, 13173), 'mindspore.ops.Ones', 'ops.Ones', ([], {}), '()\n', (13171, 13173), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13196, 13206), 'mindspore.ops.ReLU', 'ops.ReLU', ([], {}), '()\n', (13204, 13206), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13232, 13245), 'mindspore.ops.GatherD', 'ops.GatherD', ([], {}), '()\n', (13243, 13245), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13271, 13282), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', ([], {}), '()\n', (13280, 13282), True, 'from mindspore.ops import operations as P\n'), ((13310, 13325), 'mindspore.ops.ReduceSum', 'ops.ReduceSum', ([], {}), '()\n', (13323, 13325), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((13390, 13400), 'mindspore.ops.Cast', 'ops.Cast', ([], {}), '()\n', (13398, 13400), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((18388, 18399), 'time.time', 'time.time', ([], {}), '()\n', (18397, 18399), False, 'import time\n'), ((1537, 1625), 'mindspore.nn.Dense', 'nn.Dense', (['in_channel', 'out_channel'], {'has_bias': '(True)', 'weight_init': 'weight', 'bias_init': 'bias'}), '(in_channel, out_channel, has_bias=True, weight_init=weight,\n bias_init=bias)\n', (1545, 1625), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((6702, 6731), 'mindspore.load_checkpoint', 'load_checkpoint', (['resnet50Path'], {}), '(resnet50Path)\n', (6717, 6731), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((6744, 6794), 'mindspore.load_param_into_net', 'load_param_into_net', (['feature_extractor', 'param_dict'], {}), '(feature_extractor, param_dict)\n', (6763, 6794), False, 'from mindspore import ops, load_checkpoint, load_param_into_net, Tensor, nn\n'), ((9753, 9783), 'mindspore.ops.functional.stop_gradient', 'F.stop_gradient', (['selected_mask'], {}), '(selected_mask)\n', (9768, 9783), True, 'from mindspore.ops import functional as F\n'), ((18955, 18966), 'time.time', 'time.time', ([], {}), '()\n', (18964, 18966), False, 'import time\n'), ((1364, 1421), 'numpy.random.uniform', 'np.random.uniform', (['(-stdv)', 'stdv', '(out_channel, in_channel)'], {}), '(-stdv, stdv, (out_channel, in_channel))\n', (1381, 1421), True, 'import numpy as np\n'), ((1460, 1503), 'numpy.random.uniform', 'np.random.uniform', (['(-stdv)', 'stdv', 'out_channel'], {}), '(-stdv, stdv, out_channel)\n', (1477, 1503), True, 'import numpy as np\n'), ((1938, 1975), 'numpy.random.uniform', 'np.random.uniform', (['(-stdv)', 'stdv', 'shape'], {}), '(-stdv, stdv, shape)\n', (1955, 1975), True, 'import numpy as np\n'), ((2048, 2090), 'numpy.random.uniform', 'np.random.uniform', (['(-stdv)', 'stdv', 'shape_bias'], {}), '(-stdv, stdv, shape_bias)\n', (2065, 2090), True, 'import numpy as np\n'), ((4595, 4757), 'numpy.concatenate', 'np.concatenate', (['(center_anchor_map[..., :2] - center_anchor_map[..., 2:4] / 2.0, \n center_anchor_map[..., :2] + center_anchor_map[..., 2:4] / 2.0)'], {'axis': '(-1)'}), '((center_anchor_map[..., :2] - center_anchor_map[..., 2:4] / \n 2.0, center_anchor_map[..., :2] + center_anchor_map[..., 2:4] / 2.0),\n axis=-1)\n', (4609, 4757), True, 'import numpy as np\n'), ((8061, 8106), 'numpy.ones', 'np.ones', (['self.selected_mask_shape', 'np.float32'], {}), '(self.selected_mask_shape, np.float32)\n', (8068, 8106), True, 'import numpy as np\n'), ((19050, 19098), 'os.path.join', 'os.path.join', (['self.local_output_url', 'lossLogName'], {}), '(self.local_output_url, lossLogName)\n', (19062, 19098), False, 'import os\n'), ((17352, 17403), 'os.path.join', 'os.path.join', (['self.args.train_url', 'cur_ckpoint_file'], {}), '(self.args.train_url, cur_ckpoint_file)\n', (17364, 17403), False, 'import os\n'), ((19566, 19614), 'os.path.join', 'os.path.join', (['self.local_output_url', 'lossLogName'], {}), '(self.local_output_url, lossLogName)\n', (19578, 19614), False, 'import os\n'), ((19663, 19709), 'os.path.join', 'os.path.join', (['self.args.train_url', 'lossLogName'], {}), '(self.args.train_url, lossLogName)\n', (19675, 19709), False, 'import os\n')] |
import cv2
import numpy as np
import pickle
import constants
from PIL import Image, ImageTk
from tkinter import messagebox
import time
import util
cam = None
imgCrop = hist = None
pic = vstream = raw = None
def build_squares(img):
x, y, w, h = 450, 180, 13, 13
d = 10
imgCrop = None
x1, y1 = x, y
for i in range(10):
for j in range(5):
if np.any(imgCrop is None):
imgCrop = img[y:y + h, x:x + w]
else:
imgCrop = np.vstack((imgCrop, img[y:y + h, x:x + w]))
x += w + d
x = 420
y += h + d
cv2.rectangle(img, (x1, y1), (x1 + (w + d) * 5, y1 + (h + d) * 5), (0, 255, 0), 2)
return imgCrop
def get_hand_hist():
global cam
cam = cv2.VideoCapture(1)
if cam.read()[0]==False:
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, constants.frame_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, constants.frame_height)
render()
flagC = False
def render():
global cam, imgCrop, hist, pic, flagC
img = cam.read()[1]
img = cv2.flip(img, 1)
bordersize=1
img=cv2.copyMakeBorder(img, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=0 )
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
thresh = None
if constants.flagCalibrate:
constants.flagCalibrate = False
constants.calibrated = True
flagC = True
print("recalibrate")
hsvCrop = cv2.cvtColor(imgCrop, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsvCrop], [0, 1], None, [180, 256], [0, 180, 0, 256])
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
elif constants.flagSave:
constants.flagSave = False
print("Stopped cam")
cam.release()
cv2.destroyAllWindows()
with open("hist", "wb")as f:
pickle.dump(hist, f)
constants.streamState = True
return
if flagC:
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
cv2.filter2D(dst, -1, disc, dst)
blur = cv2.GaussianBlur(dst, (11, 11), 0)
blur = cv2.medianBlur(blur, 15)
ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
thresh = cv2.merge((thresh, thresh, thresh))
res = cv2.bitwise_and(img, thresh)
# cv2.imshow("res", thresh)
if not constants.flagSave:
imgCrop = build_squares(img)
if constants.streamState or not flagC:
pic = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
build_squares(pic)
else:
cv2.cvtColor(thresh, cv2.COLOR_BGR2RGB)
build_squares(thresh)
pic = thresh
timg = Image.fromarray(pic).resize(constants.stream_dimens, Image.ANTIALIAS)
timgtk = ImageTk.PhotoImage(image = timg)
constants.lblTypeCalibrateStream.imgtk = timgtk
constants.lblTypeCalibrateStream.configure(image = timgtk)
constants.lblTypeCalibrateStream.after(1, render) | [
"cv2.rectangle",
"cv2.normalize",
"cv2.filter2D",
"cv2.destroyAllWindows",
"cv2.calcHist",
"cv2.calcBackProject",
"cv2.threshold",
"cv2.medianBlur",
"numpy.vstack",
"constants.lblTypeCalibrateStream.after",
"PIL.ImageTk.PhotoImage",
"cv2.merge",
"numpy.any",
"cv2.cvtColor",
"constants.lb... | [((517, 603), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x1 + (w + d) * 5, y1 + (h + d) * 5)', '(0, 255, 0)', '(2)'], {}), '(img, (x1, y1), (x1 + (w + d) * 5, y1 + (h + d) * 5), (0, 255,\n 0), 2)\n', (530, 603), False, 'import cv2\n'), ((657, 676), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (673, 676), False, 'import cv2\n'), ((956, 972), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (964, 972), False, 'import cv2\n'), ((992, 1130), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img'], {'top': 'bordersize', 'bottom': 'bordersize', 'left': 'bordersize', 'right': 'bordersize', 'borderType': 'cv2.BORDER_CONSTANT', 'value': '(0)'}), '(img, top=bordersize, bottom=bordersize, left=bordersize,\n right=bordersize, borderType=cv2.BORDER_CONSTANT, value=0)\n', (1010, 1130), False, 'import cv2\n'), ((1136, 1172), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1148, 1172), False, 'import cv2\n'), ((2512, 2542), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'timg'}), '(image=timg)\n', (2530, 2542), False, 'from PIL import Image, ImageTk\n'), ((2595, 2651), 'constants.lblTypeCalibrateStream.configure', 'constants.lblTypeCalibrateStream.configure', ([], {'image': 'timgtk'}), '(image=timgtk)\n', (2637, 2651), False, 'import constants\n'), ((2655, 2704), 'constants.lblTypeCalibrateStream.after', 'constants.lblTypeCalibrateStream.after', (['(1)', 'render'], {}), '(1, render)\n', (2693, 2704), False, 'import constants\n'), ((711, 730), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (727, 730), False, 'import cv2\n'), ((1331, 1371), 'cv2.cvtColor', 'cv2.cvtColor', (['imgCrop', 'cv2.COLOR_BGR2HSV'], {}), '(imgCrop, cv2.COLOR_BGR2HSV)\n', (1343, 1371), False, 'import cv2\n'), ((1381, 1448), 'cv2.calcHist', 'cv2.calcHist', (['[hsvCrop]', '[0, 1]', 'None', '[180, 256]', '[0, 180, 0, 256]'], {}), '([hsvCrop], [0, 1], None, [180, 256], [0, 180, 0, 256])\n', (1393, 1448), False, 'import cv2\n'), ((1451, 1501), 'cv2.normalize', 'cv2.normalize', (['hist', 'hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(hist, hist, 0, 255, cv2.NORM_MINMAX)\n', (1464, 1501), False, 'import cv2\n'), ((1737, 1798), 'cv2.calcBackProject', 'cv2.calcBackProject', (['[hsv]', '[0, 1]', 'hist', '[0, 180, 0, 256]', '(1)'], {}), '([hsv], [0, 1], hist, [0, 180, 0, 256], 1)\n', (1756, 1798), False, 'import cv2\n'), ((1808, 1862), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(10, 10)'], {}), '(cv2.MORPH_ELLIPSE, (10, 10))\n', (1833, 1862), False, 'import cv2\n'), ((1865, 1897), 'cv2.filter2D', 'cv2.filter2D', (['dst', '(-1)', 'disc', 'dst'], {}), '(dst, -1, disc, dst)\n', (1877, 1897), False, 'import cv2\n'), ((1907, 1941), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['dst', '(11, 11)', '(0)'], {}), '(dst, (11, 11), 0)\n', (1923, 1941), False, 'import cv2\n'), ((1951, 1975), 'cv2.medianBlur', 'cv2.medianBlur', (['blur', '(15)'], {}), '(blur, 15)\n', (1965, 1975), False, 'import cv2\n'), ((1992, 2056), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2005, 2056), False, 'import cv2\n'), ((2068, 2103), 'cv2.merge', 'cv2.merge', (['(thresh, thresh, thresh)'], {}), '((thresh, thresh, thresh))\n', (2077, 2103), False, 'import cv2\n'), ((2112, 2140), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'thresh'], {}), '(img, thresh)\n', (2127, 2140), False, 'import cv2\n'), ((2278, 2314), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2290, 2314), False, 'import cv2\n'), ((2345, 2384), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh', 'cv2.COLOR_BGR2RGB'], {}), '(thresh, cv2.COLOR_BGR2RGB)\n', (2357, 2384), False, 'import cv2\n'), ((351, 374), 'numpy.any', 'np.any', (['(imgCrop is None)'], {}), '(imgCrop is None)\n', (357, 374), True, 'import numpy as np\n'), ((1598, 1621), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1619, 1621), False, 'import cv2\n'), ((2432, 2452), 'PIL.Image.fromarray', 'Image.fromarray', (['pic'], {}), '(pic)\n', (2447, 2452), False, 'from PIL import Image, ImageTk\n'), ((435, 478), 'numpy.vstack', 'np.vstack', (['(imgCrop, img[y:y + h, x:x + w])'], {}), '((imgCrop, img[y:y + h, x:x + w]))\n', (444, 478), True, 'import numpy as np\n'), ((1656, 1676), 'pickle.dump', 'pickle.dump', (['hist', 'f'], {}), '(hist, f)\n', (1667, 1676), False, 'import pickle\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ## ###############################################
#
# deconvolucion.py
# Gestiona el proceso de deconvolucion
#
# Autor: <NAME>
# License: MIT
#
# ## ###############################################
from time import time
from time import sleep
import os
import sys
import tifffile
import numpy as np
from progress.bar import Bar, ChargingBar
import src.interfaceTools as it
import src.imageFunctions as imf
from .deconvTF import deconvolveTF
import src.tiff as tif
def deconvolutionTiff(img,psf,iterations):
"""Performs deconvolution of a multichannel file"""
deconv_list=np.zeros(img.shape, dtype="int16")
if(img.ndim==3):
for c in range(img.shape[0]):
from skimage import io
#psf2 = np.uint16(io.imread('/home/charlie/test/psf_0005.tif'))
psf2 = io.imread('/home/charlie/test/psf_0005.tif')
print(psf2.shape)
#psf[c,:,:] = psf2[:,:,c]
deconv = deconvolveTF(img[c,:,:],psf[c,:,:],iterations) #Image deconvolution function
deconvN = imf.normalizar(deconv) #The matrix is normalized
it.printMessage('Channel '+str(c+1)+' deconvolved')
deconv_list[c,:,:]=deconvN
if(img.ndim==4):
if(imf.istiffRGB(img.shape)):
for c in range(img.shape[0]):
deconv= deconvolutionRGB(img[c,:,:,:],psf[c,:,:], iterations, weight) #Image deconvolution function
it.printMessage('Channel '+str(c+1)+' deconvolved')
deconv_list[c,:,:,:]=deconv
else:
for c in range(img.shape[1]):
bar = Bar("\nChannel "+str(c+1)+' :', max=img.shape[0])
for z in range(img.shape[0]):
deconv = deconvolveTF(img[z,c,:,:],psf[z,c,:,:], iterations) #Image deconvolution function
deconvN = imf.normalizar(deconv) #The matrix is normalized
deconv_list[z,c,:,:]=deconvN
bar.next()
it.printMessage('Channel '+str(c+1)+' deconvolved')
bar.finish()
return deconv_list
def deconvolutionRGB(img,psf,iterations):
"""Performs deconvolution of a RGB file"""
deconvN=np.zeros(img.shape)
for crgb in range(3):
deconv=deconvolveTF(img[:,:,crgb], psf[:,:,crgb], iterations) #Image deconvolution function
deconvN[:,:,crgb]=imf.normalizar(deconv)
return deconvN
def deconvolution1Frame(img,psf,iterations):
"""Performs deconvolution of a matrix"""
print('psf max:',psf.max())
deconvN=np.zeros(img.shape, dtype="int16")
deconv=deconvolveTF(img, psf, iterations) #Image deconvolution function
deconvN=imf.normalizar(np.uint8(deconv))
return deconvN
def deconvolutionMain(img_tensor,psf_tensor,i, nameFile, metadata):
"""This function is in charge of determining how the provided matrix should be processed together with the psf matrix"""
global message
to=time()
path = os.path.dirname(os.path.realpath(sys.argv[0])) #Working directory
savepath = os.path.join(path,'deconvolutions/Deconvolution_'+nameFile.split('.')[0]+' i-'+str(i)+'.tif')
#tifffile.imsave(path + '/deconvolutions/'+nameFile.split('.')[0]+'_normalized.tif', np.uint16(img_tensor*(65535/img_tensor.max())), imagej=True)
print(img_tensor.shape)
print(psf_tensor.shape)
it.printMessage('Starting deconvolution')
if(img_tensor.ndim==2):
tiffdeconv = deconvolution1Frame(img_tensor,psf_tensor,i)
if(img_tensor.ndim==3):
if(imf.istiffRGB(img_tensor.shape)):
tiffdeconv = deconvolutionRGB(img_tensor,psf_tensor,i)
else:
tiffdeconv = deconvolutionTiff(img_tensor,psf_tensor,i)
if(img_tensor.ndim==4):
tiffdeconv = deconvolutionTiff(img_tensor,psf_tensor,i)
deconvolution_matrix = np.uint16(tiffdeconv)
it.printMessage('Deconvolution successful, end of execution')
(m,s) = it.getFormatTime(time() - to)
it.printMessage("Runtime: "+str(m)+" minutes, "+str(s)+" seconds")
return deconvolution_matrix
| [
"numpy.uint8",
"src.imageFunctions.normalizar",
"src.imageFunctions.istiffRGB",
"os.path.realpath",
"numpy.zeros",
"skimage.io.imread",
"src.interfaceTools.printMessage",
"numpy.uint16",
"time.time"
] | [((626, 660), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': '"""int16"""'}), "(img.shape, dtype='int16')\n", (634, 660), True, 'import numpy as np\n'), ((1963, 1982), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1971, 1982), True, 'import numpy as np\n'), ((2286, 2320), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': '"""int16"""'}), "(img.shape, dtype='int16')\n", (2294, 2320), True, 'import numpy as np\n'), ((2666, 2672), 'time.time', 'time', ([], {}), '()\n', (2670, 2672), False, 'from time import time\n'), ((3054, 3095), 'src.interfaceTools.printMessage', 'it.printMessage', (['"""Starting deconvolution"""'], {}), "('Starting deconvolution')\n", (3069, 3095), True, 'import src.interfaceTools as it\n'), ((3480, 3501), 'numpy.uint16', 'np.uint16', (['tiffdeconv'], {}), '(tiffdeconv)\n', (3489, 3501), True, 'import numpy as np\n'), ((3505, 3566), 'src.interfaceTools.printMessage', 'it.printMessage', (['"""Deconvolution successful, end of execution"""'], {}), "('Deconvolution successful, end of execution')\n", (3520, 3566), True, 'import src.interfaceTools as it\n'), ((1176, 1200), 'src.imageFunctions.istiffRGB', 'imf.istiffRGB', (['img.shape'], {}), '(img.shape)\n', (1189, 1200), True, 'import src.imageFunctions as imf\n'), ((2120, 2142), 'src.imageFunctions.normalizar', 'imf.normalizar', (['deconv'], {}), '(deconv)\n', (2134, 2142), True, 'import src.imageFunctions as imf\n'), ((2419, 2435), 'numpy.uint8', 'np.uint8', (['deconv'], {}), '(deconv)\n', (2427, 2435), True, 'import numpy as np\n'), ((2698, 2727), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2714, 2727), False, 'import os\n'), ((3211, 3242), 'src.imageFunctions.istiffRGB', 'imf.istiffRGB', (['img_tensor.shape'], {}), '(img_tensor.shape)\n', (3224, 3242), True, 'import src.imageFunctions as imf\n'), ((817, 861), 'skimage.io.imread', 'io.imread', (['"""/home/charlie/test/psf_0005.tif"""'], {}), "('/home/charlie/test/psf_0005.tif')\n", (826, 861), False, 'from skimage import io\n'), ((1015, 1037), 'src.imageFunctions.normalizar', 'imf.normalizar', (['deconv'], {}), '(deconv)\n', (1029, 1037), True, 'import src.imageFunctions as imf\n'), ((3594, 3600), 'time.time', 'time', ([], {}), '()\n', (3598, 3600), False, 'from time import time\n'), ((1674, 1696), 'src.imageFunctions.normalizar', 'imf.normalizar', (['deconv'], {}), '(deconv)\n', (1688, 1696), True, 'import src.imageFunctions as imf\n')] |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
from glob import glob
import nibabel as nib
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import monai
from monai.data import create_test_image_3d, list_data_collate, decollate_batch
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.transforms import (
Activations,
AsChannelFirstd,
AsDiscrete,
Compose,
LoadImaged,
RandCropByPosNegLabeld,
RandRotate90d,
ScaleIntensityd,
EnsureTyped,
EnsureType,
)
from monai.visualize import plot_2d_or_3d_image
def main(tempdir):
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# create a temporary directory and 40 random image, mask pairs
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
images = sorted(glob(os.path.join(tempdir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])]
val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])]
# define transforms for image and segmentation
train_transforms = Compose(
[
LoadImaged(keys=["img", "seg"]),
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
ScaleIntensityd(keys="img"),
RandCropByPosNegLabeld(
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
),
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
EnsureTyped(keys=["img", "seg"]),
]
)
val_transforms = Compose(
[
LoadImaged(keys=["img", "seg"]),
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
ScaleIntensityd(keys="img"),
EnsureTyped(keys=["img", "seg"]),
]
)
# define dataset, data loader
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate)
check_data = monai.utils.misc.first(check_loader)
print(check_data["img"].shape, check_data["seg"].shape)
# create a training data loader
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = DataLoader(
train_ds,
batch_size=2,
shuffle=True,
num_workers=4,
collate_fn=list_data_collate,
pin_memory=torch.cuda.is_available(),
)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)
post_trans = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold_values=True)])
# create UNet, DiceLoss and Adam optimizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = monai.networks.nets.UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss_function = monai.losses.DiceLoss(sigmoid=True)
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
# start a typical PyTorch training
val_interval = 2
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = list()
metric_values = list()
writer = SummaryWriter()
for epoch in range(5):
print("-" * 10)
print(f"epoch {epoch + 1}/{5}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
val_images = None
val_labels = None
val_outputs = None
for val_data in val_loader:
val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
# reset the status for next validation round
dice_metric.reset()
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), "best_metric_model_segmentation3d_dict.pth")
print("saved new best metric model")
print(
"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
epoch + 1, metric, best_metric, best_metric_epoch
)
)
writer.add_scalar("val_mean_dice", metric, epoch + 1)
# plot the last model output as GIF image in TensorBoard with the corresponding image and label
plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image")
plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label")
plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output")
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
writer.close()
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tempdir:
main(tempdir)
| [
"monai.losses.DiceLoss",
"monai.utils.misc.first",
"monai.transforms.EnsureType",
"torch.cuda.is_available",
"monai.transforms.LoadImaged",
"monai.data.create_test_image_3d",
"monai.visualize.plot_2d_or_3d_image",
"monai.config.print_config",
"torch.utils.tensorboard.SummaryWriter",
"monai.network... | [((1267, 1294), 'monai.config.print_config', 'monai.config.print_config', ([], {}), '()\n', (1292, 1294), False, 'import monai\n'), ((1299, 1357), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1318, 1357), False, 'import logging\n'), ((2987, 3051), 'monai.data.Dataset', 'monai.data.Dataset', ([], {'data': 'train_files', 'transform': 'train_transforms'}), '(data=train_files, transform=train_transforms)\n', (3005, 3051), False, 'import monai\n'), ((3186, 3265), 'torch.utils.data.DataLoader', 'DataLoader', (['check_ds'], {'batch_size': '(2)', 'num_workers': '(4)', 'collate_fn': 'list_data_collate'}), '(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate)\n', (3196, 3265), False, 'from torch.utils.data import DataLoader\n'), ((3283, 3319), 'monai.utils.misc.first', 'monai.utils.misc.first', (['check_loader'], {}), '(check_loader)\n', (3305, 3319), False, 'import monai\n'), ((3432, 3496), 'monai.data.Dataset', 'monai.data.Dataset', ([], {'data': 'train_files', 'transform': 'train_transforms'}), '(data=train_files, transform=train_transforms)\n', (3450, 3496), False, 'import monai\n'), ((3869, 3929), 'monai.data.Dataset', 'monai.data.Dataset', ([], {'data': 'val_files', 'transform': 'val_transforms'}), '(data=val_files, transform=val_transforms)\n', (3887, 3929), False, 'import monai\n'), ((3947, 4024), 'torch.utils.data.DataLoader', 'DataLoader', (['val_ds'], {'batch_size': '(1)', 'num_workers': '(4)', 'collate_fn': 'list_data_collate'}), '(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)\n', (3957, 4024), False, 'from torch.utils.data import DataLoader\n'), ((4043, 4116), 'monai.metrics.DiceMetric', 'DiceMetric', ([], {'include_background': '(True)', 'reduction': '"""mean"""', 'get_not_nans': '(False)'}), "(include_background=True, reduction='mean', get_not_nans=False)\n", (4053, 4116), False, 'from monai.metrics import DiceMetric\n'), ((4583, 4618), 'monai.losses.DiceLoss', 'monai.losses.DiceLoss', ([], {'sigmoid': '(True)'}), '(sigmoid=True)\n', (4604, 4618), False, 'import monai\n'), ((4858, 4873), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (4871, 4873), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1545, 1615), 'monai.data.create_test_image_3d', 'create_test_image_3d', (['(128)', '(128)', '(128)'], {'num_seg_classes': '(1)', 'channel_dim': '(-1)'}), '(128, 128, 128, num_seg_classes=1, channel_dim=-1)\n', (1565, 1615), False, 'from monai.data import create_test_image_3d, list_data_collate, decollate_batch\n'), ((7886, 7915), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7913, 7915), False, 'import tempfile\n'), ((1649, 1658), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1655, 1658), True, 'import numpy as np\n'), ((1680, 1721), 'os.path.join', 'os.path.join', (['tempdir', 'f"""img{i:d}.nii.gz"""'], {}), "(tempdir, f'img{i:d}.nii.gz')\n", (1692, 1721), False, 'import os\n'), ((1757, 1766), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1763, 1766), True, 'import numpy as np\n'), ((1788, 1829), 'os.path.join', 'os.path.join', (['tempdir', 'f"""seg{i:d}.nii.gz"""'], {}), "(tempdir, f'seg{i:d}.nii.gz')\n", (1800, 1829), False, 'import os\n'), ((1857, 1893), 'os.path.join', 'os.path.join', (['tempdir', '"""img*.nii.gz"""'], {}), "(tempdir, 'img*.nii.gz')\n", (1869, 1893), False, 'import os\n'), ((1919, 1955), 'os.path.join', 'os.path.join', (['tempdir', '"""seg*.nii.gz"""'], {}), "(tempdir, 'seg*.nii.gz')\n", (1931, 1955), False, 'import os\n'), ((2242, 2273), 'monai.transforms.LoadImaged', 'LoadImaged', ([], {'keys': "['img', 'seg']"}), "(keys=['img', 'seg'])\n", (2252, 2273), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2287, 2339), 'monai.transforms.AsChannelFirstd', 'AsChannelFirstd', ([], {'keys': "['img', 'seg']", 'channel_dim': '(-1)'}), "(keys=['img', 'seg'], channel_dim=-1)\n", (2302, 2339), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2353, 2380), 'monai.transforms.ScaleIntensityd', 'ScaleIntensityd', ([], {'keys': '"""img"""'}), "(keys='img')\n", (2368, 2380), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2394, 2515), 'monai.transforms.RandCropByPosNegLabeld', 'RandCropByPosNegLabeld', ([], {'keys': "['img', 'seg']", 'label_key': '"""seg"""', 'spatial_size': '[96, 96, 96]', 'pos': '(1)', 'neg': '(1)', 'num_samples': '(4)'}), "(keys=['img', 'seg'], label_key='seg', spatial_size=[\n 96, 96, 96], pos=1, neg=1, num_samples=4)\n", (2416, 2515), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2554, 2619), 'monai.transforms.RandRotate90d', 'RandRotate90d', ([], {'keys': "['img', 'seg']", 'prob': '(0.5)', 'spatial_axes': '[0, 2]'}), "(keys=['img', 'seg'], prob=0.5, spatial_axes=[0, 2])\n", (2567, 2619), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2633, 2665), 'monai.transforms.EnsureTyped', 'EnsureTyped', ([], {'keys': "['img', 'seg']"}), "(keys=['img', 'seg'])\n", (2644, 2665), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2735, 2766), 'monai.transforms.LoadImaged', 'LoadImaged', ([], {'keys': "['img', 'seg']"}), "(keys=['img', 'seg'])\n", (2745, 2766), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2780, 2832), 'monai.transforms.AsChannelFirstd', 'AsChannelFirstd', ([], {'keys': "['img', 'seg']", 'channel_dim': '(-1)'}), "(keys=['img', 'seg'], channel_dim=-1)\n", (2795, 2832), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2846, 2873), 'monai.transforms.ScaleIntensityd', 'ScaleIntensityd', ([], {'keys': '"""img"""'}), "(keys='img')\n", (2861, 2873), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((2887, 2919), 'monai.transforms.EnsureTyped', 'EnsureTyped', ([], {'keys': "['img', 'seg']"}), "(keys=['img', 'seg'])\n", (2898, 2919), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((3785, 3810), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3808, 3810), False, 'import torch\n'), ((4143, 4155), 'monai.transforms.EnsureType', 'EnsureType', ([], {}), '()\n', (4153, 4155), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((4157, 4182), 'monai.transforms.Activations', 'Activations', ([], {'sigmoid': '(True)'}), '(sigmoid=True)\n', (4168, 4182), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((4184, 4217), 'monai.transforms.AsDiscrete', 'AsDiscrete', ([], {'threshold_values': '(True)'}), '(threshold_values=True)\n', (4194, 4217), False, 'from monai.transforms import Activations, AsChannelFirstd, AsDiscrete, Compose, LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, EnsureTyped, EnsureType\n'), ((4303, 4328), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4326, 4328), False, 'import torch\n'), ((4353, 4500), 'monai.networks.nets.UNet', 'monai.networks.nets.UNet', ([], {'spatial_dims': '(3)', 'in_channels': '(1)', 'out_channels': '(1)', 'channels': '(16, 32, 64, 128, 256)', 'strides': '(2, 2, 2, 2)', 'num_res_units': '(2)'}), '(spatial_dims=3, in_channels=1, out_channels=1,\n channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2)\n', (4377, 4500), False, 'import monai\n'), ((5838, 5853), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5851, 5853), False, 'import torch\n'), ((7483, 7555), 'monai.visualize.plot_2d_or_3d_image', 'plot_2d_or_3d_image', (['val_images', '(epoch + 1)', 'writer'], {'index': '(0)', 'tag': '"""image"""'}), "(val_images, epoch + 1, writer, index=0, tag='image')\n", (7502, 7555), False, 'from monai.visualize import plot_2d_or_3d_image\n'), ((7572, 7644), 'monai.visualize.plot_2d_or_3d_image', 'plot_2d_or_3d_image', (['val_labels', '(epoch + 1)', 'writer'], {'index': '(0)', 'tag': '"""label"""'}), "(val_labels, epoch + 1, writer, index=0, tag='label')\n", (7591, 7644), False, 'from monai.visualize import plot_2d_or_3d_image\n'), ((7661, 7735), 'monai.visualize.plot_2d_or_3d_image', 'plot_2d_or_3d_image', (['val_outputs', '(epoch + 1)', 'writer'], {'index': '(0)', 'tag': '"""output"""'}), "(val_outputs, epoch + 1, writer, index=0, tag='output')\n", (7680, 7735), False, 'from monai.visualize import plot_2d_or_3d_image\n'), ((6218, 6286), 'monai.inferers.sliding_window_inference', 'sliding_window_inference', (['val_images', 'roi_size', 'sw_batch_size', 'model'], {}), '(val_images, roi_size, sw_batch_size, model)\n', (6242, 6286), False, 'from monai.inferers import sliding_window_inference\n'), ((6345, 6373), 'monai.data.decollate_batch', 'decollate_batch', (['val_outputs'], {}), '(val_outputs)\n', (6360, 6373), False, 'from monai.data import create_test_image_3d, list_data_collate, decollate_batch\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Maximum Likelihood Amplitude Estimation algorithm."""
from typing import Optional, List, Union, Tuple
import logging
import numpy as np
from scipy.optimize import brute
from scipy.stats import norm, chi2
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.aqua import AquaError
from qiskit.aqua.utils.circuit_factory import CircuitFactory
from qiskit.aqua.utils.validation import validate_min
from .ae_algorithm import AmplitudeEstimationAlgorithm
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class MaximumLikelihoodAmplitudeEstimation(AmplitudeEstimationAlgorithm):
"""The Maximum Likelihood Amplitude Estimation algorithm.
This class implements the an quantum amplitude estimation (QAE) algorithm without phase
estimation, according to https://arxiv.org/abs/1904.10246. In comparison to the original
QAE algorithm (https://arxiv.org/abs/quant-ph/0005055), this implementation relies solely
on different powers of the Grover algorithm and does not require ancilla qubits.
Finally, the estimate is determined via a maximum likelihood estimation, which is why this
class in named MaximumLikelihoodAmplitudeEstimation.
"""
def __init__(self, num_oracle_circuits: int,
a_factory: Optional[CircuitFactory] = None,
q_factory: Optional[CircuitFactory] = None,
i_objective: Optional[int] = None,
likelihood_evals: Optional[int] = None) -> None:
r"""
Args:
num_oracle_circuits: The number of circuits applying different powers of the Grover
oracle Q. The (`num_oracle_circuits` + 1) executed circuits will be
`[id, Q^2^0, ..., Q^2^{num_oracle_circuits-1}] A |0>`, where A is the problem
unitary encoded in the argument `a_factory`.
Has a minimum value of 1.
a_factory: The CircuitFactory subclass object representing the problem unitary.
q_factory: The CircuitFactory subclass object representing.
an amplitude estimation sample (based on a_factory)
i_objective: The index of the objective qubit, i.e. the qubit marking 'good' solutions
with the state \|1> and 'bad' solutions with the state \|0>
likelihood_evals: The number of gridpoints for the maximum search of the likelihood
function
"""
validate_min('num_oracle_circuits', num_oracle_circuits, 1)
super().__init__(a_factory, q_factory, i_objective)
# get parameters
self._evaluation_schedule = [0] + [2**j for j in range(num_oracle_circuits)]
self._likelihood_evals = likelihood_evals
# default number of evaluations is max(10^5, pi/2 * 10^3 * 2^(m))
if likelihood_evals is None:
default = 10000
self._likelihood_evals = np.maximum(default,
int(np.pi / 2 * 1000 * 2 ** num_oracle_circuits))
self._circuits = []
self._ret = {}
@property
def _num_qubits(self) -> int:
"""Return the number of qubits needed in the circuit.
Returns:
The total number of qubits.
"""
if self.a_factory is None: # if A factory is not set, no qubits are specified
return 0
num_ancillas = self.q_factory.required_ancillas()
num_qubits = self.a_factory.num_target_qubits + num_ancillas
return num_qubits
def construct_circuits(self, measurement: bool = False) -> List[QuantumCircuit]:
"""Construct the Amplitude Estimation w/o QPE quantum circuits.
Args:
measurement: Boolean flag to indicate if measurement should be included in the circuits.
Returns:
A list with the QuantumCircuit objects for the algorithm.
"""
# keep track of the Q-oracle queries
self._ret['num_oracle_queries'] = 0
# construct first part of circuit
q = QuantumRegister(self.a_factory.num_target_qubits, 'q')
qc_0 = QuantumCircuit(q, name='qc_a') # 0 applications of Q, only a single A operator
# get number of ancillas
num_ancillas = np.maximum(self.a_factory.required_ancillas(),
self.q_factory.required_ancillas())
q_aux = None
# pylint: disable=comparison-with-callable
if num_ancillas > 0:
q_aux = QuantumRegister(num_ancillas, 'aux')
qc_0.add_register(q_aux)
# add classical register if needed
if measurement:
c = ClassicalRegister(1)
qc_0.add_register(c)
self.a_factory.build(qc_0, q, q_aux)
self._circuits = []
for k in self._evaluation_schedule:
qc_k = qc_0.copy(name='qc_a_q_%s' % k)
if k != 0:
self.q_factory.build_power(qc_k, q, k, q_aux)
if measurement:
# real hardware can currently not handle operations after measurements, which might
# happen if the circuit gets transpiled, hence we're adding a safeguard-barrier
qc_k.barrier()
qc_k.measure(q[self.i_objective], c[0])
self._circuits += [qc_k]
return self._circuits
def _evaluate_statevectors(self,
statevectors: Union[List[List[complex]], List[np.ndarray]]
) -> List[float]:
"""For each statevector compute the probability that |1> is measured in the objective qubit.
Args:
statevectors: A list of statevectors.
Returns:
The corresponding probabilities.
"""
probabilities = []
for sv in statevectors:
p_k = 0
for i, a in enumerate(sv):
p = np.abs(a)**2
b = ('{0:%sb}' % self._num_qubits).format(i)[::-1]
if b[self.i_objective] == '1':
p_k += p
probabilities += [p_k]
return probabilities
def _get_hits(self) -> Tuple[List[int], List[int]]:
"""Get the good and total counts.
Returns:
A pair of two lists, ([1-counts per experiment], [shots per experiment]).
Raises:
AquaError: If self.run() has not been called yet.
"""
one_hits = [] # h_k: how often 1 has been measured, for a power Q^(m_k)
all_hits = [] # N_k: how often has been measured at a power Q^(m_k)
try:
if self.quantum_instance.is_statevector:
probabilities = self._evaluate_statevectors(self._ret['statevectors'])
one_hits = probabilities
all_hits = np.ones_like(one_hits)
else:
for c in self._ret['counts']:
one_hits += [c.get('1', 0)] # return 0 if no key '1' found
all_hits += [sum(c.values())]
except KeyError:
raise AquaError('Call run() first!')
return one_hits, all_hits
def _safe_min(self, array, default=0):
if len(array) == 0:
return default
return np.min(array)
def _safe_max(self, array, default=(np.pi / 2)):
if len(array) == 0:
return default
return np.max(array)
def _compute_fisher_information(self, a: Optional[float] = None,
num_sum_terms: Optional[int] = None,
observed: bool = False) -> float:
"""Compute the Fisher information.
Args:
a: The amplitude `a`. Can be omitted if `run` was called already, then the estimate
of the algorithm is used.
num_sum_terms: The number of sum terms to be included in the calculation of the
Fisher information. By default all values are included.
observed: If True, compute the observed Fisher information, otherwise the theoretical
one.
Returns:
The computed Fisher information, or np.inf if statevector simulation was used.
Raises:
KeyError: Call run() first!
"""
# Set the value a. Use `est_a` if provided.
if a is None:
try:
a = self._ret['value']
except KeyError:
raise KeyError('Call run() first!')
# Corresponding angle to the value a (only use real part of 'a')
theta_a = np.arcsin(np.sqrt(np.real(a)))
# Get the number of hits (Nk) and one-hits (hk)
one_hits, all_hits = self._get_hits()
# Include all sum terms or just up to a certain term?
evaluation_schedule = self._evaluation_schedule
if num_sum_terms is not None:
evaluation_schedule = evaluation_schedule[:num_sum_terms]
# not necessary since zip goes as far as shortest list:
# all_hits = all_hits[:num_sum_terms]
# one_hits = one_hits[:num_sum_terms]
# Compute the Fisher information
fisher_information = None
if observed:
# Note, that the observed Fisher information is very unreliable in this algorithm!
d_logL = 0
for Nk, hk, mk in zip(all_hits, one_hits, evaluation_schedule):
tan = np.tan((2 * mk + 1) * theta_a)
d_logL += (2 * mk + 1) * (hk / tan + (Nk - hk) * tan)
d_logL /= np.sqrt(a * (1 - a))
fisher_information = d_logL**2 / len(all_hits)
else:
fisher_information = \
1 / (a * (1 - a)) * sum(Nk * (2 * mk + 1)**2 for Nk, mk in zip(all_hits,
evaluation_schedule))
return fisher_information
def _fisher_confint(self, alpha: float = 0.05, observed: bool = False) -> List[float]:
"""Compute the `alpha` confidence interval based on the Fisher information.
Args:
alpha: The level of the confidence interval (must be <= 0.5), default to 0.05.
observed: If True, use observed Fisher information.
Returns:
float: The alpha confidence interval based on the Fisher information
Raises:
AssertionError: Call run() first!
"""
# Get the (observed) Fisher information
fisher_information = None
try:
fisher_information = self._ret['fisher_information']
except KeyError:
raise AssertionError("Call run() first!")
if observed:
fisher_information = self._compute_fisher_information(observed=True)
normal_quantile = norm.ppf(1 - alpha / 2)
confint = np.real(self._ret['value']) + \
normal_quantile / np.sqrt(fisher_information) * np.array([-1, 1])
mapped_confint = [self.a_factory.value_to_estimation(bound) for bound in confint]
return mapped_confint
def _likelihood_ratio_confint(self, alpha: float = 0.05,
nevals: Optional[int] = None) -> List[float]:
"""Compute the likelihood-ratio confidence interval.
Args:
alpha: The level of the confidence interval (< 0.5), defaults to 0.05.
nevals: The number of evaluations to find the intersection with the loglikelihood
function. Defaults to an adaptive value based on the maximal power of Q.
Returns:
The alpha-likelihood-ratio confidence interval.
"""
if nevals is None:
nevals = self._likelihood_evals
def loglikelihood(theta, one_counts, all_counts):
logL = 0
for i, k in enumerate(self._evaluation_schedule):
logL += np.log(np.sin((2 * k + 1) * theta) ** 2) * one_counts[i]
logL += np.log(np.cos((2 * k + 1) * theta) ** 2) * (all_counts[i] - one_counts[i])
return logL
one_counts, all_counts = self._get_hits()
eps = 1e-15 # to avoid invalid value in log
thetas = np.linspace(0 + eps, np.pi / 2 - eps, nevals)
values = np.zeros(len(thetas))
for i, t in enumerate(thetas):
values[i] = loglikelihood(t, one_counts, all_counts)
loglik_mle = loglikelihood(self._ret['theta'], one_counts, all_counts)
chi2_quantile = chi2.ppf(1 - alpha, df=1)
thres = loglik_mle - chi2_quantile / 2
# the (outer) LR confidence interval
above_thres = thetas[values >= thres]
# it might happen that the `above_thres` array is empty,
# to still provide a valid result use safe_min/max which
# then yield [0, pi/2]
confint = [self._safe_min(above_thres, default=0),
self._safe_max(above_thres, default=(np.pi / 2))]
mapped_confint = [self.a_factory.value_to_estimation(np.sin(bound)**2) for bound in confint]
return mapped_confint
def confidence_interval(self, alpha: float, kind: str = 'fisher') -> List[float]:
# pylint: disable=wrong-spelling-in-docstring
"""Compute the `alpha` confidence interval using the method `kind`.
The confidence level is (1 - `alpha`) and supported kinds are 'fisher',
'likelihood_ratio' and 'observed_fisher' with shorthand
notations 'fi', 'lr' and 'oi', respectively.
Args:
alpha: The confidence level.
kind: The method to compute the confidence interval. Defaults to 'fisher', which
computes the theoretical Fisher information.
Returns:
The specified confidence interval.
Raises:
AquaError: If `run()` hasn't been called yet.
NotImplementedError: If the method `kind` is not supported.
"""
# check if AE did run already
if 'estimation' not in self._ret.keys():
raise AquaError('Call run() first!')
# if statevector simulator the estimate is exact
if self._quantum_instance.is_statevector:
return 2 * [self._ret['estimation']]
if kind in ['likelihood_ratio', 'lr']:
return self._likelihood_ratio_confint(alpha)
if kind in ['fisher', 'fi']:
return self._fisher_confint(alpha, observed=False)
if kind in ['observed_fisher', 'observed_information', 'oi']:
return self._fisher_confint(alpha, observed=True)
raise NotImplementedError('CI `{}` is not implemented.'.format(kind))
def _compute_mle_safe(self):
"""Compute the MLE via a grid-search.
This is a stable approach if sufficient gridpoints are used.
"""
one_hits, all_hits = self._get_hits()
# search range
eps = 1e-15 # to avoid invalid value in log
search_range = [0 + eps, np.pi / 2 - eps]
def loglikelihood(theta):
# logL contains the first `it` terms of the full loglikelihood
logL = 0
for i, k in enumerate(self._evaluation_schedule):
logL += np.log(np.sin((2 * k + 1) * theta) ** 2) * one_hits[i]
logL += np.log(np.cos((2 * k + 1) * theta) ** 2) * (all_hits[i] - one_hits[i])
return -logL
est_theta = brute(loglikelihood, [search_range], Ns=self._likelihood_evals)[0]
return est_theta
def _run_mle(self) -> float:
"""Compute the maximum likelihood estimator (MLE) for the angle theta.
Returns:
The MLE for the angle theta, related to the amplitude a via a = sin^2(theta)
"""
# TODO implement a **reliable**, fast method to find the maximum of the likelihood function
return self._compute_mle_safe()
def _run(self) -> dict:
# check if A factory has been set
if self.a_factory is None:
raise AquaError("a_factory must be set!")
if self._quantum_instance.is_statevector:
# run circuit on statevector simulator
self.construct_circuits(measurement=False)
ret = self._quantum_instance.execute(self._circuits)
# get statevectors and construct MLE input
statevectors = [np.asarray(ret.get_statevector(circuit)) for circuit in self._circuits]
self._ret['statevectors'] = statevectors
# to count the number of Q-oracle calls (don't count shots)
shots = 1
else:
# run circuit on QASM simulator
self.construct_circuits(measurement=True)
ret = self._quantum_instance.execute(self._circuits)
# get counts and construct MLE input
self._ret['counts'] = [ret.get_counts(circuit) for circuit in self._circuits]
# to count the number of Q-oracle calls
shots = self._quantum_instance._run_config.shots
# run maximum likelihood estimation and construct results
self._ret['theta'] = self._run_mle()
self._ret['value'] = np.sin(self._ret['theta'])**2
self._ret['estimation'] = self.a_factory.value_to_estimation(self._ret['value'])
self._ret['fisher_information'] = self._compute_fisher_information()
self._ret['num_oracle_queries'] = shots * sum(k for k in self._evaluation_schedule)
confidence_interval = self._fisher_confint(alpha=0.05)
self._ret['95%_confidence_interval'] = confidence_interval
return self._ret
| [
"logging.getLogger",
"numpy.sqrt",
"numpy.array",
"scipy.stats.chi2.ppf",
"numpy.sin",
"numpy.max",
"numpy.real",
"numpy.linspace",
"numpy.min",
"qiskit.QuantumCircuit",
"qiskit.aqua.utils.validation.validate_min",
"numpy.abs",
"scipy.stats.norm.ppf",
"numpy.cos",
"qiskit.aqua.AquaError"... | [((1005, 1032), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1022, 1032), False, 'import logging\n'), ((2970, 3029), 'qiskit.aqua.utils.validation.validate_min', 'validate_min', (['"""num_oracle_circuits"""', 'num_oracle_circuits', '(1)'], {}), "('num_oracle_circuits', num_oracle_circuits, 1)\n", (2982, 3029), False, 'from qiskit.aqua.utils.validation import validate_min\n'), ((4560, 4614), 'qiskit.QuantumRegister', 'QuantumRegister', (['self.a_factory.num_target_qubits', '"""q"""'], {}), "(self.a_factory.num_target_qubits, 'q')\n", (4575, 4614), False, 'from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit\n'), ((4630, 4660), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['q'], {'name': '"""qc_a"""'}), "(q, name='qc_a')\n", (4644, 4660), False, 'from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit\n'), ((7746, 7759), 'numpy.min', 'np.min', (['array'], {}), '(array)\n', (7752, 7759), True, 'import numpy as np\n'), ((7884, 7897), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (7890, 7897), True, 'import numpy as np\n'), ((11280, 11303), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - alpha / 2)'], {}), '(1 - alpha / 2)\n', (11288, 11303), False, 'from scipy.stats import norm, chi2\n'), ((12665, 12710), 'numpy.linspace', 'np.linspace', (['(0 + eps)', '(np.pi / 2 - eps)', 'nevals'], {}), '(0 + eps, np.pi / 2 - eps, nevals)\n', (12676, 12710), True, 'import numpy as np\n'), ((12958, 12983), 'scipy.stats.chi2.ppf', 'chi2.ppf', (['(1 - alpha)'], {'df': '(1)'}), '(1 - alpha, df=1)\n', (12966, 12983), False, 'from scipy.stats import norm, chi2\n'), ((5006, 5042), 'qiskit.QuantumRegister', 'QuantumRegister', (['num_ancillas', '"""aux"""'], {}), "(num_ancillas, 'aux')\n", (5021, 5042), False, 'from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit\n'), ((5164, 5184), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(1)'], {}), '(1)\n', (5181, 5184), False, 'from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit\n'), ((10037, 10057), 'numpy.sqrt', 'np.sqrt', (['(a * (1 - a))'], {}), '(a * (1 - a))\n', (10044, 10057), True, 'import numpy as np\n'), ((11322, 11349), 'numpy.real', 'np.real', (["self._ret['value']"], {}), "(self._ret['value'])\n", (11329, 11349), True, 'import numpy as np\n'), ((14499, 14529), 'qiskit.aqua.AquaError', 'AquaError', (['"""Call run() first!"""'], {}), "('Call run() first!')\n", (14508, 14529), False, 'from qiskit.aqua import AquaError\n'), ((15853, 15916), 'scipy.optimize.brute', 'brute', (['loglikelihood', '[search_range]'], {'Ns': 'self._likelihood_evals'}), '(loglikelihood, [search_range], Ns=self._likelihood_evals)\n', (15858, 15916), False, 'from scipy.optimize import brute\n'), ((16441, 16476), 'qiskit.aqua.AquaError', 'AquaError', (['"""a_factory must be set!"""'], {}), "('a_factory must be set!')\n", (16450, 16476), False, 'from qiskit.aqua import AquaError\n'), ((17577, 17603), 'numpy.sin', 'np.sin', (["self._ret['theta']"], {}), "(self._ret['theta'])\n", (17583, 17603), True, 'import numpy as np\n'), ((7305, 7327), 'numpy.ones_like', 'np.ones_like', (['one_hits'], {}), '(one_hits)\n', (7317, 7327), True, 'import numpy as np\n'), ((7566, 7596), 'qiskit.aqua.AquaError', 'AquaError', (['"""Call run() first!"""'], {}), "('Call run() first!')\n", (7575, 7596), False, 'from qiskit.aqua import AquaError\n'), ((9089, 9099), 'numpy.real', 'np.real', (['a'], {}), '(a)\n', (9096, 9099), True, 'import numpy as np\n'), ((9913, 9943), 'numpy.tan', 'np.tan', (['((2 * mk + 1) * theta_a)'], {}), '((2 * mk + 1) * theta_a)\n', (9919, 9943), True, 'import numpy as np\n'), ((11414, 11431), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (11422, 11431), True, 'import numpy as np\n'), ((6411, 6420), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (6417, 6420), True, 'import numpy as np\n'), ((11384, 11411), 'numpy.sqrt', 'np.sqrt', (['fisher_information'], {}), '(fisher_information)\n', (11391, 11411), True, 'import numpy as np\n'), ((13474, 13487), 'numpy.sin', 'np.sin', (['bound'], {}), '(bound)\n', (13480, 13487), True, 'import numpy as np\n'), ((12370, 12397), 'numpy.sin', 'np.sin', (['((2 * k + 1) * theta)'], {}), '((2 * k + 1) * theta)\n', (12376, 12397), True, 'import numpy as np\n'), ((12451, 12478), 'numpy.cos', 'np.cos', (['((2 * k + 1) * theta)'], {}), '((2 * k + 1) * theta)\n', (12457, 12478), True, 'import numpy as np\n'), ((15664, 15691), 'numpy.sin', 'np.sin', (['((2 * k + 1) * theta)'], {}), '((2 * k + 1) * theta)\n', (15670, 15691), True, 'import numpy as np\n'), ((15743, 15770), 'numpy.cos', 'np.cos', (['((2 * k + 1) * theta)'], {}), '((2 * k + 1) * theta)\n', (15749, 15770), True, 'import numpy as np\n')] |
"""
File: list_manipulation.py
Project: analysis
Last Modified: 2022-7-2
Created Date: 2022-7-2
Copyright (c) 2021
Author: AHMA project (Univ Rennes, CNRS, Inria, IRISA)
"""
################################################################################
import argparse
import numpy as np
import logging
import random
import glob, os
from sklearn.model_selection import train_test_split
from tabulate import tabulate
from tqdm import tqdm
################################################################################
class bcolors:
################################################################################
# class bcolors
# use to get colors iun the terminal
#
################################################################################
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightyellow = '\u001b[33;1m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
endc = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
whitebg = '\u001b[47;1m'
resetbg = '\u001b[0m'
################################################################################
def change_directory (path_lists, new_dir):
################################################################################
# change_directory
# change the path of the directory where the traces are stored
#
# input:
# + path_lists: file containing tthe lists
# + new_dir: new directory
################################################################################
[x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test] \
= np.load (path_lists, allow_pickle = True)
for i in range (len (x_train_filelist)):
x_train_filelist [i] = new_dir + '/' + os.path.basename (x_train_filelist [i])
## hack to modify the extension
# x_train_filelist [i] = new_dir + '/' + '.'.join (os.path.basename (x_train_filelist [i]).split ('.')[:-1]) + '.npy'
for i in range (len (x_val_filelist)):
x_val_filelist [i] = new_dir + '/' + os.path.basename (x_val_filelist [i])
## hack to modify the extension
# x_val_filelist [i] = new_dir + '/' + '.'.join (os.path.basename (x_val_filelist [i]).split ('.')[:-1]) + '.npy'
for i in range (len (x_test_filelist)):
x_test_filelist [i] = new_dir + '/' + os.path.basename (x_test_filelist [i])
## hack to modify the extension
# x_test_filelist [i] = new_dir + '/' + '.'.join (os.path.basename (x_test_filelist [i]).split ('.')[:-1]) + '.npy'
np.save (path_lists,
[x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test],
allow_pickle = True)
################################################################################
def get_tag (f):
################################################################################
# get_tag
# [from neural-network/utils]
# get list of labels from a list of file
#
# input:
# + f: list of files
#
# output:
# + list of tags
################################################################################
return "-".join(os.path.basename(f).split("-")[:-1])
################################################################################
def display_tabular (table, header):
################################################################################
# display_tabular
# display the data as tabular
#
# inputs:
# + table: tabular to display
# + header: header of the tabular
################################################################################
print(tabulate (table, headers= header))
################################################################################
def display_list (x_train, x_val, x_test, y_train, y_val, y_test):
################################################################################
# display_list
# display the the content of a list:
#
# inputs:
# + x_{train, val, test} filenames to get the tags
# + y_{train, val, test} labels of a list
################################################################################
y_unique = np.unique (np.array (list (y_train) + list (y_val) + list (y_test)))
tags = [np.array ([get_tag (f) for f in list (x_train)]),
np.array ([get_tag (f) for f in list (x_val)]),
np.array ([get_tag (f) for f in list (x_test)])]
lines = []
last_line = ['-', 'total', 0, 0, 0, 0, 0]
y_idx_lines = []
tags_idx_lines = []
for i in range (len (y_unique)):
line = [i, f'{bcolors.bold}{y_unique [i]}{bcolors.endc}']
count = 0
current_tags = []
## train
idx = np.where (np.array (y_train) == y_unique [i])[0]
line.append (len (idx))
last_line [2] += len (idx)
count += len (idx)
current_tags.append (tags [0][idx])
## val
idx = np.where (np.array (y_val) == y_unique [i]) [0]
line.append (len (idx))
last_line [3] += len (idx)
count += len (idx)
current_tags.append (tags [1][idx])
## train + val
line.append (count)
last_line [4] += count
## test
idx = np.where (np.array (y_test) == y_unique [i]) [0]
line.append (len (idx))
last_line [5] += len (idx)
count += len (idx)
current_tags.append (tags [2][idx])
## totlal
line.append (count)
last_line [6] += count
lines.append (line)
y_idx_lines.append (len (lines) - 1)
tmp_tags_idx_lines = []
current_unique_tags = np.unique (np.concatenate ((current_tags [0], current_tags [1], current_tags [2])))
for j in range (len (current_unique_tags)):
line = [f'{i}-{j}', f'{current_unique_tags [j]}']
tag_count = 0
## train
idx = np.where (np.array (tags [0]) == current_unique_tags [j])[0]
line.append (len (idx))
tag_count += len (idx)
## val
idx = np.where (np.array (tags [1]) == current_unique_tags [j]) [0]
line.append (len (idx))
## train + val
tag_count += len (idx)
line.append (tag_count)
## test
idx = np.where (np.array (tags [2]) == current_unique_tags [j]) [0]
line.append (len (idx))
## totlal
tag_count += len (idx)
line.append (tag_count)
lines.append (line)
tmp_tags_idx_lines.append (len (lines) - 1)
tags_idx_lines.append (tmp_tags_idx_lines)
## add the percentages
lines_array = np.array (np.array (lines)[:, 2:], dtype = np.uint32)
for i in range (lines_array.shape [1]): ## for each colunm
for j in range (len (y_idx_lines)): ## for each label
current_percentage = 100*lines_array [y_idx_lines [j], i]/lines_array [y_idx_lines, i].sum ()
lines [y_idx_lines [j]][i + 2] = f'{bcolors.lightblue}{lines [y_idx_lines [j]][i + 2]}{bcolors.endc} {bcolors.lightgreen}[{current_percentage:.2f}]{bcolors.endc}'
for k in range (len (tags_idx_lines [j])): # for each tags
current_percentage_local = 100*lines_array [tags_idx_lines [j][k], i]/lines_array [tags_idx_lines [j], i].sum ()
current_percentage_global = 100*lines_array [tags_idx_lines [j][k], i]/lines_array [y_idx_lines, i].sum ()
lines [tags_idx_lines [j][k]][i + 2] = f'{bcolors.blue}{lines [tags_idx_lines [j][k]][i + 2]}{bcolors.endc}'\
+f' {bcolors.green}[{current_percentage_global:.2f}]{bcolors.endc}'\
+f' {bcolors.yellow}[{current_percentage_local:.2f}]{bcolors.endc}'
for j in range (len (y_idx_lines)):
lines [y_idx_lines [j]][0] = f'{bcolors.whitebg} {lines [y_idx_lines [j]][0]} {bcolors.resetbg}'
for i in range (2, len (last_line) - 1):
last_line [i] = f'{bcolors.blue}{last_line [i]}{bcolors.endc} {bcolors.green}[{100*last_line [i]/last_line [-1]:.2f}]{bcolors.endc}'
last_line [-1] = f'{bcolors.lightred}{last_line [-1]}{bcolors.endc}'
lines.append (last_line)
print(tabulate (lines, headers= [f'{bcolors.bold}idx{bcolors.endc}\nsub-idx',
f'{bcolors.bold}label{bcolors.endc}\ntag',
f'{bcolors.bold}train nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]',
f'{bcolors.bold}val nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]',
f'{bcolors.bold}test nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]',
f'{bcolors.bold}total nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]'], tablefmt="grid"))
# ################################################################################
# def display_list (y_train, y_val, y_test):
# ################################################################################
# # display_list
# # display the the content of a list
# #
# # inputs:
# # + y_{train, val, test} labels of a list
# ################################################################################
# y_unique = np.unique (y_train + y_val + y_test)
# lines = []
# for i in range (len (y_unique)):
# line = [i, y_unique [i]]
# count = 0
# idx = np.where (np.array (y_train) == y_unique [i])[0]
# line.append (len (idx))
# count += len (idx)
# idx = np.where (np.array (y_val) == y_unique [i]) [0]
# line.append (len (idx))
# count += len (idx)
# line.append (count)
# idx = np.where (np.array (y_test) == y_unique [i]) [0]
# line.append (len (idx))
# count += len (idx)
# line.append (count)
# lines.append (line)
# print(tabulate (lines, headers= ['idx', 'label', 'train', 'val', 'train + val', 'test', 'total']))
################################################################################
def compute_main_list (data, extension, nb_of_traces_per_label):
################################################################################
# compute_main_list
# [from neural-network/utils]
# Label the data and separate it in train and test dataset.
# inputs:
# - datadir: path to the directory containing all data
# - extension: type of file in datadir
# - nb_of_traces_per_label: nb of traces per labels
#
# outputs:
# - lists: {filelist, labels} x {learning, validating, testing}
################################################################################
if (not type (data) is list):
filelist = glob.glob (data + "/**/*.%s"%extension, recursive = True)
else:
filelist = data
## sanity check
clean_file = []
empty_file = []
for i in tqdm (filelist, desc = 'sanity check', leave = False):
if (os.stat (i).st_size == 0):
empty_file.append (i)
else:
clean_file.append (i)
if (len (empty_file) != 0):
print (f'[EMPTY FILES]: {empty_file} ({len (empty_file)})')
filelist = clean_file
random.shuffle (filelist)
# get labels
y = np.array ([get_tag (f) for f in filelist])
# if a limit is needed
if (nb_of_traces_per_label is not None):
unique_y = np.unique (y)
new_y = []
new_filelist = []
filelist = np.array (filelist)
for u in unique_y:
idx = np.where (y == u)[0]
new_y += list (y [idx [:nb_of_traces_per_label]])
new_filelist += list (filelist [idx [:nb_of_traces_per_label]])
y = new_y
filelist = new_filelist
x_train_filelist, x_test_filelist, y_train, y_test\
= train_test_split (filelist, y, test_size=0.2)
x_train_filelist, x_val_filelist, y_train, y_val\
= train_test_split (x_train_filelist, y_train, test_size=0.2)
return x_train_filelist, x_val_filelist, x_test_filelist,\
y_train, y_val, y_test
################################################################################
def parse_data (filelist, tagmap):
################################################################################
# parse_data
# [from neural-network/utils]
# Label the data and separate it in train and test dataset.
# It is possible to provide a path to a file containing a tag map.
# Each line of this file should be formatted like this: tag <space>
# corresponding_label <space> dataset
# Dataset value are 0: not used, 1: train only, 2: test only, 3: train and test
# inputs:
# - filelist: list of the filename
# - tagmap: tagmap used for the labeling
#
# outputs:
# - x_train_filelist, x_test_filelist, x_trainandtest_filelist: files lists
# - y_train, y_test, y_trainandtest: labels
################################################################################
random.shuffle (filelist)
tm = open(tagmap)
label = {}
group = {}
count = {}
for l in tm:
try:
tag, l, g = l.strip("\n").split(",")
except:
tag, l, g, c = l.strip("\n").split(",")
count[tag] = int(c)
g = int(g)
label[tag] = l
group[tag] = g
tm.close()
#compute for each file its group and label
x_train_filelist, x_test_filelist, x_trainandtest_filelist = [], [], []
y_train, y_test, y_trainandtest = [], [], []
for f in filelist:
tag = get_tag(f)
if tag in count:
if count[tag] == 0:
continue
if tag in group.keys():
g = group[tag]
if g == 1:
x_train_filelist.append(f)
y_train.append(label[tag])
elif g == 2:
x_test_filelist.append(f)
y_test.append(label[tag])
elif g == 3:
x_trainandtest_filelist.append(f)
y_trainandtest.append(label[tag])
else:
continue
if tag in count: count[tag] -= 1
return x_train_filelist, x_test_filelist, x_trainandtest_filelist,\
y_train, y_test, y_trainandtest
################################################################################
if __name__ == '__main__':
################################################################################
parser = argparse.ArgumentParser()
parser.add_argument ('--raw', action = 'store', type = str,
dest = 'path_raw',
help = 'Absolute path to the raw data directory')
parser.add_argument ('--tagmap', action = 'store',
type = str, dest = 'path_tagmap',
help = 'Absolute path to a file containing the tag map')
parser.add_argument ('--save', action = 'store',
type = str, dest = 'path_save',
help = 'Absolute path to a file to save the lists')
parser.add_argument ('--main-lists', action = 'store',
type = str, dest = 'path_main_lists',
help = 'Absolute path to a file containing the main lists')
parser.add_argument ('--extension', default='dat',
type = str, dest = 'extension',
help = 'extensio of the raw traces ')
parser.add_argument ('--log-level', default=logging.INFO,
type=lambda x: getattr (logging, x),
help = "Configure the logging level: DEBUG|INFO|WARNING|ERROR|FATAL")
parser.add_argument ('--lists', action = 'store', type = str,
dest = 'path_lists',
help = 'Absolute path to a file containing lists')
parser.add_argument ('--new_dir', action = 'store', type = str,
dest = 'path_new_dir',
help = 'Absolute path to the raw data, to change in a given file lists')
parser.add_argument('--nb_of_traces_per_label', action='store', type=int,
default=None,
dest='nb_of_traces_per_label',
help='number of traces to keep per label')
args, unknown = parser.parse_known_args ()
assert len (unknown) == 0, f"[WARNING] Unknown arguments:\n{unknown}\n"
logging.basicConfig (level = args.log_level)
if (logging.root.level < logging.INFO):
print ("argument:")
for arg in vars(args):
print (f"{arg} : {getattr (args, arg)}")
main_list = False
## change the directory of a list
if ((args.path_lists is not None) and (args.path_new_dir is not None)):
change_directory (args.path_lists, args.path_new_dir)
## generate main list (not tag needed)
if (args.path_raw is not None and args.path_tagmap is None):
# split testing and learning
x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test \
= compute_main_list (args.path_raw, extension = args.extension,
nb_of_traces_per_label = args.nb_of_traces_per_label)
main_list = True
if (logging.root.level < logging.INFO):
print ('main generated list:')
display_list (x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test)
## otherwise it is loaded
elif (args.path_main_lists is not None):
[x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test]\
= np.load (args.path_main_lists, allow_pickle = True)
main_list = True
if (logging.root.level < logging.INFO):
print ('main loaded list:')
display_list (x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test)
## creat list from the main list
if (args.path_tagmap is not None and main_list):
x_train_filelist, x_del_0, x_trainandtest_filelist,\
y_train, y_del_0, y_trainandtest = parse_data (x_train_filelist + x_val_filelist,
args.path_tagmap)
x_train_filelist = x_train_filelist + x_trainandtest_filelist
y_train = y_train + y_trainandtest
# creat the validation
x_train_filelist, x_val_filelist, y_train, y_val\
= train_test_split (x_train_filelist, y_train, test_size=0.2)
# compute testing
x_del_1, x_test_filelist, x_trainandtest_filelist,\
y_del_1, y_test, y_trainandtest = parse_data (x_test_filelist,
args.path_tagmap)
x_test_filelist = x_test_filelist + x_trainandtest_filelist
y_test = y_test+ y_trainandtest
if (logging.root.level < logging.INFO):
print ('new list:')
display_list (x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test)
# if (len (x_del_0) != 0):
# print (f'deleted from training: {len (x_del_0)}')
# display_list (y_del_0, [], [])
# if (len (x_del_1) != 0):
# print (f'deleted from testing: {len (x_del_1)}')
# display_list ([], [], y_del_1)
## generate list from raw data and tagmap
elif (args.path_tagmap is not None):
# get the list of files
filelist = glob.glob (args.path_raw + "/**/*.%s"%args.extension, recursive = True)
# creat lists
x_train_filelist, x_test_filelist, x_trainandtest_filelist,\
y_train, y_test, y_trainandtest = parse_data (filelist, args.path_tagmap)
# split x_trainandtest
if (len (x_trainandtest_filelist) > 0):
print (f'{len (x_trainandtest_filelist)}, {np.unique (y_trainandtest, return_counts = True)[1].sum ()}')
x_train_filelist_tmp, x_test_filelist_tmp, y_train_tmp, y_test_tmp\
= train_test_split(x_trainandtest_filelist, y_trainandtest, test_size=0.2)
x_train_filelist += x_train_filelist_tmp
x_test_filelist += x_test_filelist_tmp
y_train += y_train_tmp
y_test += y_test_tmp
# generate validating set
x_train_filelist, x_val_filelist, y_train, y_val = train_test_split (x_train_filelist, y_train, test_size=0.2)
if (logging.root.level < logging.INFO):
print ('generated list:')
display_list (x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test)
## display a provided list
elif ((args.path_lists is not None) and (logging.root.level < logging.INFO)):
[x_train_filelist, x_val_filelist, x_test_filelist,
y_train, y_val, y_test] = np.load (args.path_lists, allow_pickle = True)
display_list (x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test)
## save the computed
if (args.path_save is not None):
np.save (args.path_save,
[x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test],
allow_pickle = True)
| [
"logging.basicConfig",
"tabulate.tabulate",
"random.shuffle",
"argparse.ArgumentParser",
"numpy.unique",
"sklearn.model_selection.train_test_split",
"numpy.where",
"tqdm.tqdm",
"numpy.array",
"glob.glob",
"os.path.basename",
"numpy.concatenate",
"os.stat",
"numpy.load",
"numpy.save"
] | [((1868, 1906), 'numpy.load', 'np.load', (['path_lists'], {'allow_pickle': '(True)'}), '(path_lists, allow_pickle=True)\n', (1875, 1906), True, 'import numpy as np\n'), ((2805, 2924), 'numpy.save', 'np.save', (['path_lists', '[x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test]'], {'allow_pickle': '(True)'}), '(path_lists, [x_train_filelist, x_val_filelist, x_test_filelist,\n y_train, y_val, y_test], allow_pickle=True)\n', (2812, 2924), True, 'import numpy as np\n'), ((11340, 11388), 'tqdm.tqdm', 'tqdm', (['filelist'], {'desc': '"""sanity check"""', 'leave': '(False)'}), "(filelist, desc='sanity check', leave=False)\n", (11344, 11388), False, 'from tqdm import tqdm\n'), ((11657, 11681), 'random.shuffle', 'random.shuffle', (['filelist'], {}), '(filelist)\n', (11671, 11681), False, 'import random\n'), ((12273, 12317), 'sklearn.model_selection.train_test_split', 'train_test_split', (['filelist', 'y'], {'test_size': '(0.2)'}), '(filelist, y, test_size=0.2)\n', (12289, 12317), False, 'from sklearn.model_selection import train_test_split\n'), ((12388, 12446), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train_filelist', 'y_train'], {'test_size': '(0.2)'}), '(x_train_filelist, y_train, test_size=0.2)\n', (12404, 12446), False, 'from sklearn.model_selection import train_test_split\n'), ((13408, 13432), 'random.shuffle', 'random.shuffle', (['filelist'], {}), '(filelist)\n', (13422, 13432), False, 'import random\n'), ((14871, 14896), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14894, 14896), False, 'import argparse\n'), ((16843, 16884), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'args.log_level'}), '(level=args.log_level)\n', (16862, 16884), False, 'import logging\n'), ((3828, 3859), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': 'header'}), '(table, headers=header)\n', (3836, 3859), False, 'from tabulate import tabulate\n'), ((8662, 9163), 'tabulate.tabulate', 'tabulate', (['lines'], {'headers': '[f"""{bcolors.bold}idx{bcolors.endc}\nsub-idx""",\n f"""{bcolors.bold}label{bcolors.endc}\ntag""",\n f"""{bcolors.bold}train nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ,\n f"""{bcolors.bold}val nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ,\n f"""{bcolors.bold}test nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ,\n f"""{bcolors.bold}total nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ]', 'tablefmt': '"""grid"""'}), '(lines, headers=[f"""{bcolors.bold}idx{bcolors.endc}\nsub-idx""",\n f"""{bcolors.bold}label{bcolors.endc}\ntag""",\n f"""{bcolors.bold}train nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ,\n f"""{bcolors.bold}val nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ,\n f"""{bcolors.bold}test nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ,\n f"""{bcolors.bold}total nbr [gobal %] {bcolors.endc}\nnbr [global %] [local %]"""\n ], tablefmt=\'grid\')\n', (8670, 9163), False, 'from tabulate import tabulate\n'), ((11174, 11230), 'glob.glob', 'glob.glob', (["(data + '/**/*.%s' % extension)"], {'recursive': '(True)'}), "(data + '/**/*.%s' % extension, recursive=True)\n", (11183, 11230), False, 'import glob, os\n'), ((11849, 11861), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (11858, 11861), True, 'import numpy as np\n'), ((11927, 11945), 'numpy.array', 'np.array', (['filelist'], {}), '(filelist)\n', (11935, 11945), True, 'import numpy as np\n'), ((18850, 18908), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train_filelist', 'y_train'], {'test_size': '(0.2)'}), '(x_train_filelist, y_train, test_size=0.2)\n', (18866, 18908), False, 'from sklearn.model_selection import train_test_split\n'), ((21472, 21595), 'numpy.save', 'np.save', (['args.path_save', '[x_train_filelist, x_val_filelist, x_test_filelist, y_train, y_val, y_test]'], {'allow_pickle': '(True)'}), '(args.path_save, [x_train_filelist, x_val_filelist, x_test_filelist,\n y_train, y_val, y_test], allow_pickle=True)\n', (21479, 21595), True, 'import numpy as np\n'), ((2004, 2041), 'os.path.basename', 'os.path.basename', (['x_train_filelist[i]'], {}), '(x_train_filelist[i])\n', (2020, 2041), False, 'import glob, os\n'), ((2302, 2337), 'os.path.basename', 'os.path.basename', (['x_val_filelist[i]'], {}), '(x_val_filelist[i])\n', (2318, 2337), False, 'import glob, os\n'), ((2596, 2632), 'os.path.basename', 'os.path.basename', (['x_test_filelist[i]'], {}), '(x_test_filelist[i])\n', (2612, 2632), False, 'import glob, os\n'), ((5909, 5976), 'numpy.concatenate', 'np.concatenate', (['(current_tags[0], current_tags[1], current_tags[2])'], {}), '((current_tags[0], current_tags[1], current_tags[2]))\n', (5923, 5976), True, 'import numpy as np\n'), ((7077, 7092), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (7085, 7092), True, 'import numpy as np\n'), ((18033, 18081), 'numpy.load', 'np.load', (['args.path_main_lists'], {'allow_pickle': '(True)'}), '(args.path_main_lists, allow_pickle=True)\n', (18040, 18081), True, 'import numpy as np\n'), ((19887, 19957), 'glob.glob', 'glob.glob', (["(args.path_raw + '/**/*.%s' % args.extension)"], {'recursive': '(True)'}), "(args.path_raw + '/**/*.%s' % args.extension, recursive=True)\n", (19896, 19957), False, 'import glob, os\n'), ((20772, 20830), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train_filelist', 'y_train'], {'test_size': '(0.2)'}), '(x_train_filelist, y_train, test_size=0.2)\n', (20788, 20830), False, 'from sklearn.model_selection import train_test_split\n'), ((11407, 11417), 'os.stat', 'os.stat', (['i'], {}), '(i)\n', (11414, 11417), False, 'import glob, os\n'), ((11992, 12008), 'numpy.where', 'np.where', (['(y == u)'], {}), '(y == u)\n', (12000, 12008), True, 'import numpy as np\n'), ((20429, 20501), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_trainandtest_filelist', 'y_trainandtest'], {'test_size': '(0.2)'}), '(x_trainandtest_filelist, y_trainandtest, test_size=0.2)\n', (20445, 20501), False, 'from sklearn.model_selection import train_test_split\n'), ((21243, 21286), 'numpy.load', 'np.load', (['args.path_lists'], {'allow_pickle': '(True)'}), '(args.path_lists, allow_pickle=True)\n', (21250, 21286), True, 'import numpy as np\n'), ((3374, 3393), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (3390, 3393), False, 'import glob, os\n'), ((4926, 4943), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4934, 4943), True, 'import numpy as np\n'), ((5153, 5168), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (5161, 5168), True, 'import numpy as np\n'), ((5479, 5495), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (5487, 5495), True, 'import numpy as np\n'), ((6199, 6216), 'numpy.array', 'np.array', (['tags[0]'], {}), '(tags[0])\n', (6207, 6216), True, 'import numpy as np\n'), ((6378, 6395), 'numpy.array', 'np.array', (['tags[1]'], {}), '(tags[1])\n', (6386, 6395), True, 'import numpy as np\n'), ((6659, 6676), 'numpy.array', 'np.array', (['tags[2]'], {}), '(tags[2])\n', (6667, 6676), True, 'import numpy as np\n'), ((20268, 20313), 'numpy.unique', 'np.unique', (['y_trainandtest'], {'return_counts': '(True)'}), '(y_trainandtest, return_counts=True)\n', (20277, 20313), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
#
# Example to compare the faces in two images.
# <NAME>
# 2015/09/29
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import itertools
import os
import numpy as np
np.set_printoptions(precision=2)
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('imgs', type=str, nargs='+', help="Input images.")
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
print("Argument parsing and loading libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, args.imgDim)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
if args.verbose:
print("Processing {}.".format(imgPath))
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if bb is None:
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print(" + Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(args.imgDim, rgbImg, bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print(" + Face alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print(" + OpenFace forward pass took {} seconds.".format(time.time() - start))
print("Representation:")
print(rep)
print("-----\n")
return rep
for (img1, img2) in itertools.combinations(args.imgs, 2):
d = getRep(img1) - getRep(img2)
print("Comparing {} with {}.".format(img1, img2))
print(" + Squared l2 distance between representations: {:0.3f}".format(np.dot(d, d)))
| [
"openface.TorchNeuralNet",
"cv2.imread",
"argparse.ArgumentParser",
"os.path.join",
"itertools.combinations",
"os.path.realpath",
"numpy.dot",
"openface.AlignDlib",
"cv2.cvtColor",
"time.time",
"numpy.set_printoptions"
] | [((734, 745), 'time.time', 'time.time', ([], {}), '()\n', (743, 745), False, 'import time\n'), ((829, 861), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (848, 861), True, 'import numpy as np\n'), ((950, 987), 'os.path.join', 'os.path.join', (['fileDir', '""".."""', '"""models"""'], {}), "(fileDir, '..', 'models')\n", (962, 987), False, 'import os\n'), ((1004, 1034), 'os.path.join', 'os.path.join', (['modelDir', '"""dlib"""'], {}), "(modelDir, 'dlib')\n", (1016, 1034), False, 'import os\n'), ((1055, 1089), 'os.path.join', 'os.path.join', (['modelDir', '"""openface"""'], {}), "(modelDir, 'openface')\n", (1067, 1089), False, 'import os\n'), ((1102, 1127), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1125, 1127), False, 'import argparse\n'), ((1893, 1904), 'time.time', 'time.time', ([], {}), '()\n', (1902, 1904), False, 'import time\n'), ((1914, 1956), 'openface.AlignDlib', 'openface.AlignDlib', (['args.dlibFacePredictor'], {}), '(args.dlibFacePredictor)\n', (1932, 1956), False, 'import openface\n'), ((1964, 2019), 'openface.TorchNeuralNet', 'openface.TorchNeuralNet', (['args.networkModel', 'args.imgDim'], {}), '(args.networkModel, args.imgDim)\n', (1987, 2019), False, 'import openface\n'), ((3444, 3480), 'itertools.combinations', 'itertools.combinations', (['args.imgs', '(2)'], {}), '(args.imgs, 2)\n', (3466, 3480), False, 'import itertools\n'), ((910, 936), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (926, 936), False, 'import os\n'), ((2255, 2274), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (2265, 2274), False, 'import cv2\n'), ((2382, 2421), 'cv2.cvtColor', 'cv2.cvtColor', (['bgrImg', 'cv2.COLOR_BGR2RGB'], {}), '(bgrImg, cv2.COLOR_BGR2RGB)\n', (2394, 2421), False, 'import cv2\n'), ((2522, 2533), 'time.time', 'time.time', ([], {}), '()\n', (2531, 2533), False, 'import time\n'), ((2793, 2804), 'time.time', 'time.time', ([], {}), '()\n', (2802, 2804), False, 'import time\n'), ((3166, 3177), 'time.time', 'time.time', ([], {}), '()\n', (3175, 3177), False, 'import time\n'), ((1324, 1391), 'os.path.join', 'os.path.join', (['dlibModelDir', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')\n", (1336, 1391), False, 'import os\n'), ((1508, 1558), 'os.path.join', 'os.path.join', (['openfaceModelDir', '"""nn4.small2.v1.t7"""'], {}), "(openfaceModelDir, 'nn4.small2.v1.t7')\n", (1520, 1558), False, 'import os\n'), ((3651, 3663), 'numpy.dot', 'np.dot', (['d', 'd'], {}), '(d, d)\n', (3657, 3663), True, 'import numpy as np\n'), ((1860, 1871), 'time.time', 'time.time', ([], {}), '()\n', (1869, 1871), False, 'import time\n'), ((2122, 2133), 'time.time', 'time.time', ([], {}), '()\n', (2131, 2133), False, 'import time\n'), ((2756, 2767), 'time.time', 'time.time', ([], {}), '()\n', (2765, 2767), False, 'import time\n'), ((3129, 3140), 'time.time', 'time.time', ([], {}), '()\n', (3138, 3140), False, 'import time\n'), ((3303, 3314), 'time.time', 'time.time', ([], {}), '()\n', (3312, 3314), False, 'import time\n')] |
from collections import OrderedDict
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from models.text_detect.craft import CRAFT
from backend.text_detect.craft_utils import (
adjustResultCoordinates,
getDetBoxes,
)
from backend.text_detect.imgproc import (
normalizeMeanVariance,
resize_aspect_ratio,
)
def scale(x, s):
"""Scales x by scaling factor s.
Parameters
----------
x : float
s : float
Returns
-------
x : float
"""
x *= s
return x
def rescale_coordinate(k, factors):
"""Translates and scales resized image coordinate to original image
coordinate space.
Parameters
----------
k : tuple
Tuple (x1, y1, x2, y2) representing table bounding box where
(x1, y1) -> lt and (x2, y2) -> rb in PDFMiner coordinate
space.
factors : tuple
Tuple (scaling_factor_x, scaling_factor_y, pdf_y) where the
first two elements are scaling factors and pdf_y is height of
pdf.
Returns
-------
knew : tuple
Tuple (x1, y1, x2, y2) representing table bounding box where
(x1, y1) -> lt and (x2, y2) -> rb in OpenCV coordinate
space.
"""
x1, y1, x2, y2 = k
scaling_factor_x, scaling_factor_y, pdf_y = factors
x1 = scale(x1, scaling_factor_x)
y1 = scale(y1, scaling_factor_y)
x2 = scale(x2, scaling_factor_x)
y2 = scale(y2, scaling_factor_y)
knew = (int(x1), int(y1), int(x2), int(y2))
return knew
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def test_net(
canvas_size,
mag_ratio,
net,
image,
text_threshold,
link_threshold,
low_text,
poly,
device,
estimate_num_chars=False,
):
# resize
img_resized, target_ratio, size_heatmap, delta_h, delta_w = resize_aspect_ratio(
image, canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=mag_ratio
)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
x = x.to(device)
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0, :, :, 0].cpu().data.numpy()
score_link = y[0, :, :, 1].cpu().data.numpy()
# Post-processing
boxes, polys, mapper = getDetBoxes(
score_text,
score_link,
text_threshold,
link_threshold,
low_text,
poly,
estimate_num_chars,
)
# coordinate adjustment
boxes = adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = adjustResultCoordinates(polys, ratio_w, ratio_h)
if estimate_num_chars:
boxes = list(boxes)
polys = list(polys)
for k in range(len(polys)):
if estimate_num_chars:
boxes[k] = (boxes[k], mapper[k])
if polys[k] is None:
polys[k] = boxes[k]
return y, boxes, polys, delta_h, delta_w
def get_detector(trained_model, device="cpu"):
net = CRAFT()
if device == "cpu":
net.load_state_dict(
copyStateDict(torch.load(trained_model, map_location=device))
)
else:
net.load_state_dict(
copyStateDict(torch.load(trained_model, map_location=device))
)
net = torch.nn.DataParallel(net).to(device)
cudnn.benchmark = False
net.eval()
return net
def get_textbox(
detector,
image,
canvas_size,
mag_ratio,
text_threshold,
link_threshold,
low_text,
poly,
device,
optimal_num_chars=None,
):
result = []
estimate_num_chars = optimal_num_chars is not None
y, bboxes, polys, delta_h, delta_w = test_net(
canvas_size,
mag_ratio,
detector,
image,
text_threshold,
link_threshold,
low_text,
poly,
device,
estimate_num_chars,
)
# bboxes, polys (no_box, 4, 2), dtype=float32
if estimate_num_chars:
polys = [
p for p, _ in sorted(polys, key=lambda x: abs(optimal_num_chars - x[1]))
]
for i, box in enumerate(polys):
poly = np.array(box).astype(np.int32).reshape((-1))
result.append(poly)
return y, result
| [
"collections.OrderedDict",
"models.text_detect.craft.CRAFT",
"backend.text_detect.craft_utils.getDetBoxes",
"torch.load",
"torch.nn.DataParallel",
"backend.text_detect.craft_utils.adjustResultCoordinates",
"torch.from_numpy",
"backend.text_detect.imgproc.resize_aspect_ratio",
"numpy.array",
"backe... | [((1720, 1733), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1731, 1733), False, 'from collections import OrderedDict\n'), ((2133, 2229), 'backend.text_detect.imgproc.resize_aspect_ratio', 'resize_aspect_ratio', (['image', 'canvas_size'], {'interpolation': 'cv2.INTER_LINEAR', 'mag_ratio': 'mag_ratio'}), '(image, canvas_size, interpolation=cv2.INTER_LINEAR,\n mag_ratio=mag_ratio)\n', (2152, 2229), False, 'from backend.text_detect.imgproc import normalizeMeanVariance, resize_aspect_ratio\n'), ((2310, 2344), 'backend.text_detect.imgproc.normalizeMeanVariance', 'normalizeMeanVariance', (['img_resized'], {}), '(img_resized)\n', (2331, 2344), False, 'from backend.text_detect.imgproc import normalizeMeanVariance, resize_aspect_ratio\n'), ((2754, 2861), 'backend.text_detect.craft_utils.getDetBoxes', 'getDetBoxes', (['score_text', 'score_link', 'text_threshold', 'link_threshold', 'low_text', 'poly', 'estimate_num_chars'], {}), '(score_text, score_link, text_threshold, link_threshold,\n low_text, poly, estimate_num_chars)\n', (2765, 2861), False, 'from backend.text_detect.craft_utils import adjustResultCoordinates, getDetBoxes\n'), ((2962, 3010), 'backend.text_detect.craft_utils.adjustResultCoordinates', 'adjustResultCoordinates', (['boxes', 'ratio_w', 'ratio_h'], {}), '(boxes, ratio_w, ratio_h)\n', (2985, 3010), False, 'from backend.text_detect.craft_utils import adjustResultCoordinates, getDetBoxes\n'), ((3023, 3071), 'backend.text_detect.craft_utils.adjustResultCoordinates', 'adjustResultCoordinates', (['polys', 'ratio_w', 'ratio_h'], {}), '(polys, ratio_w, ratio_h)\n', (3046, 3071), False, 'from backend.text_detect.craft_utils import adjustResultCoordinates, getDetBoxes\n'), ((3429, 3436), 'models.text_detect.craft.CRAFT', 'CRAFT', ([], {}), '()\n', (3434, 3436), False, 'from models.text_detect.craft import CRAFT\n'), ((2528, 2543), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2541, 2543), False, 'import torch\n'), ((2353, 2372), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2369, 2372), False, 'import torch\n'), ((3517, 3563), 'torch.load', 'torch.load', (['trained_model'], {'map_location': 'device'}), '(trained_model, map_location=device)\n', (3527, 3563), False, 'import torch\n'), ((3640, 3686), 'torch.load', 'torch.load', (['trained_model'], {'map_location': 'device'}), '(trained_model, map_location=device)\n', (3650, 3686), False, 'import torch\n'), ((3712, 3738), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (3733, 3738), False, 'import torch\n'), ((4564, 4577), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (4572, 4577), True, 'import numpy as np\n')] |
import torch
import glob, os
from models.unet import UNet_clean
from collections import OrderedDict
from dess_utils.data_utils import imagesc
import numpy as np
import pandas as pd
def get_dcm(dcm_path):
l = glob.glob(dcm_path + '*')
l.sort()
dcm = []
for x in l:
x = np.load(x)
dcm.append(np.expand_dims(x, 0))
dcm = np.concatenate(dcm, 0)
dcm[dcm >= 400] = 400
dcm = torch.from_numpy(dcm / dcm.max())
dcm = torch.cat([dcm.unsqueeze(1)] * 3, 1)
dcm = dcm.type(torch.FloatTensor)
return dcm
def get_seg(checkpoint, dcm, lightning=False):
if lightning:
state_dict = checkpoint['state_dict']
state_dict_new = OrderedDict((k.split('net.')[1], v) for k, v in state_dict.items())
else:
state_dict_new = checkpoint
# network
net = UNet_clean(output_ch=state_dict_new['Conv.weight'].shape[0], backbone='vgg11', depth=5)
net.load_state_dict(state_dict_new)
net.cuda()
seg = []
for i in range(dcm.shape[0]):
x = dcm[i:i + 1, ::].cuda()
y, = net(x)
y = y.cpu().detach().numpy()
y = np.argmax(y, 1)
seg.append(y)
seg = np.concatenate(seg, 0)
return seg
if __name__ == '__main__':
crop = pd.read_csv('data/testing/SAG_3D_DESS_LEFT_00.csv')
dcm_path = 'data/testing/SAG_3D_DESS_LEFT/'
if not os.path.isdir(dcm_path[:-1] + '_seg/'):
os.mkdir(dcm_path[:-1] + '_seg/')
subjects = glob.glob(dcm_path + '*')
for s in subjects:
subject = s.split('/')[-1]
crop_idx = crop.loc[crop['ID'] == int(subject)].values[0, :]
dcm = get_dcm(dcm_path + str(subject) + '/')
dcm = dcm[crop_idx[1]:crop_idx[2], :, crop_idx[3]:crop_idx[4], crop_idx[5]:crop_idx[6]]
seg = get_seg(torch.load('checkpoints/clean_femur_tibia_cartilage.pth'), dcm).astype(np.uint8)
np.save(dcm_path[:-1] + '_seg/' + subject + '.npy', seg)
| [
"models.unet.UNet_clean",
"pandas.read_csv",
"torch.load",
"numpy.argmax",
"os.path.isdir",
"os.mkdir",
"numpy.concatenate",
"numpy.save",
"numpy.expand_dims",
"numpy.load",
"glob.glob"
] | [((214, 239), 'glob.glob', 'glob.glob', (["(dcm_path + '*')"], {}), "(dcm_path + '*')\n", (223, 239), False, 'import glob, os\n'), ((356, 378), 'numpy.concatenate', 'np.concatenate', (['dcm', '(0)'], {}), '(dcm, 0)\n', (370, 378), True, 'import numpy as np\n'), ((826, 918), 'models.unet.UNet_clean', 'UNet_clean', ([], {'output_ch': "state_dict_new['Conv.weight'].shape[0]", 'backbone': '"""vgg11"""', 'depth': '(5)'}), "(output_ch=state_dict_new['Conv.weight'].shape[0], backbone=\n 'vgg11', depth=5)\n", (836, 918), False, 'from models.unet import UNet_clean\n'), ((1169, 1191), 'numpy.concatenate', 'np.concatenate', (['seg', '(0)'], {}), '(seg, 0)\n', (1183, 1191), True, 'import numpy as np\n'), ((1247, 1298), 'pandas.read_csv', 'pd.read_csv', (['"""data/testing/SAG_3D_DESS_LEFT_00.csv"""'], {}), "('data/testing/SAG_3D_DESS_LEFT_00.csv')\n", (1258, 1298), True, 'import pandas as pd\n'), ((1456, 1481), 'glob.glob', 'glob.glob', (["(dcm_path + '*')"], {}), "(dcm_path + '*')\n", (1465, 1481), False, 'import glob, os\n'), ((294, 304), 'numpy.load', 'np.load', (['x'], {}), '(x)\n', (301, 304), True, 'import numpy as np\n'), ((1121, 1136), 'numpy.argmax', 'np.argmax', (['y', '(1)'], {}), '(y, 1)\n', (1130, 1136), True, 'import numpy as np\n'), ((1358, 1396), 'os.path.isdir', 'os.path.isdir', (["(dcm_path[:-1] + '_seg/')"], {}), "(dcm_path[:-1] + '_seg/')\n", (1371, 1396), False, 'import glob, os\n'), ((1406, 1439), 'os.mkdir', 'os.mkdir', (["(dcm_path[:-1] + '_seg/')"], {}), "(dcm_path[:-1] + '_seg/')\n", (1414, 1439), False, 'import glob, os\n'), ((1870, 1926), 'numpy.save', 'np.save', (["(dcm_path[:-1] + '_seg/' + subject + '.npy')", 'seg'], {}), "(dcm_path[:-1] + '_seg/' + subject + '.npy', seg)\n", (1877, 1926), True, 'import numpy as np\n'), ((324, 344), 'numpy.expand_dims', 'np.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (338, 344), True, 'import numpy as np\n'), ((1781, 1838), 'torch.load', 'torch.load', (['"""checkpoints/clean_femur_tibia_cartilage.pth"""'], {}), "('checkpoints/clean_femur_tibia_cartilage.pth')\n", (1791, 1838), False, 'import torch\n')] |
import unittest
import numpy as np
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.experimental.links.model.fcis import ProposalTargetCreator
from chainercv.utils import generate_random_bbox
from chainercv.utils import mask_to_bbox
class TestProposalTargetCreator(unittest.TestCase):
n_sample = 128
n_class = 21
pos_ratio = 0.25
mask_size = 21
def setUp(self):
n_roi = 1024
n_mask = 10
img_size = (392, 512)
self.roi = generate_random_bbox(n_roi, img_size, 16, 250)
self.mask = np.random.uniform(
size=(n_mask, img_size[0], img_size[1])) > 0.5
self.label = np.random.randint(
0, self.n_class - 1, size=(n_mask,), dtype=np.int32)
self.proposal_target_creator = ProposalTargetCreator(
n_sample=self.n_sample,
pos_ratio=self.pos_ratio)
def check_proposal_target_creator(
self, roi, mask, label, proposal_target_creator):
xp = cuda.get_array_module(roi)
bbox = mask_to_bbox(mask)
sample_roi, gt_roi_mask, gt_roi_label, gt_roi_loc =\
proposal_target_creator(
roi, mask, label, bbox, mask_size=self.mask_size)
# Test types
self.assertIsInstance(sample_roi, xp.ndarray)
self.assertIsInstance(gt_roi_loc, xp.ndarray)
self.assertIsInstance(gt_roi_mask, xp.ndarray)
self.assertIsInstance(gt_roi_label, xp.ndarray)
sample_roi = cuda.to_cpu(sample_roi)
gt_roi_loc = cuda.to_cpu(gt_roi_loc)
gt_roi_mask = cuda.to_cpu(gt_roi_mask)
gt_roi_label = cuda.to_cpu(gt_roi_label)
# Test shapes
self.assertEqual(sample_roi.shape, (self.n_sample, 4))
self.assertEqual(gt_roi_loc.shape, (self.n_sample, 4))
self.assertEqual(
gt_roi_mask.shape, (self.n_sample, self.mask_size, self.mask_size))
self.assertEqual(gt_roi_label.shape, (self.n_sample,))
# Test foreground and background labels
np.testing.assert_equal(np.sum(gt_roi_label >= 0), self.n_sample)
n_pos = np.sum(gt_roi_label >= 1)
n_neg = np.sum(gt_roi_label == 0)
self.assertLessEqual(n_pos, self.n_sample * self.pos_ratio)
self.assertLessEqual(n_neg, self.n_sample - n_pos)
def test_proposal_target_creator_cpu(self):
self.check_proposal_target_creator(
self.roi, self.mask, self.label,
self.proposal_target_creator)
@attr.gpu
def test_proposal_target_creator_gpu(self):
self.check_proposal_target_creator(
cuda.to_gpu(self.roi),
cuda.to_gpu(self.mask),
cuda.to_gpu(self.label),
self.proposal_target_creator)
testing.run_module(__name__, __file__)
| [
"chainercv.experimental.links.model.fcis.ProposalTargetCreator",
"chainercv.utils.generate_random_bbox",
"chainer.testing.run_module",
"chainer.backends.cuda.get_array_module",
"numpy.random.randint",
"chainer.backends.cuda.to_cpu",
"numpy.sum",
"numpy.random.uniform",
"chainercv.utils.mask_to_bbox"... | [((2788, 2826), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (2806, 2826), False, 'from chainer import testing\n'), ((542, 588), 'chainercv.utils.generate_random_bbox', 'generate_random_bbox', (['n_roi', 'img_size', '(16)', '(250)'], {}), '(n_roi, img_size, 16, 250)\n', (562, 588), False, 'from chainercv.utils import generate_random_bbox\n'), ((708, 778), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.n_class - 1)'], {'size': '(n_mask,)', 'dtype': 'np.int32'}), '(0, self.n_class - 1, size=(n_mask,), dtype=np.int32)\n', (725, 778), True, 'import numpy as np\n'), ((832, 903), 'chainercv.experimental.links.model.fcis.ProposalTargetCreator', 'ProposalTargetCreator', ([], {'n_sample': 'self.n_sample', 'pos_ratio': 'self.pos_ratio'}), '(n_sample=self.n_sample, pos_ratio=self.pos_ratio)\n', (853, 903), False, 'from chainercv.experimental.links.model.fcis import ProposalTargetCreator\n'), ((1044, 1070), 'chainer.backends.cuda.get_array_module', 'cuda.get_array_module', (['roi'], {}), '(roi)\n', (1065, 1070), False, 'from chainer.backends import cuda\n'), ((1086, 1104), 'chainercv.utils.mask_to_bbox', 'mask_to_bbox', (['mask'], {}), '(mask)\n', (1098, 1104), False, 'from chainercv.utils import mask_to_bbox\n'), ((1532, 1555), 'chainer.backends.cuda.to_cpu', 'cuda.to_cpu', (['sample_roi'], {}), '(sample_roi)\n', (1543, 1555), False, 'from chainer.backends import cuda\n'), ((1577, 1600), 'chainer.backends.cuda.to_cpu', 'cuda.to_cpu', (['gt_roi_loc'], {}), '(gt_roi_loc)\n', (1588, 1600), False, 'from chainer.backends import cuda\n'), ((1623, 1647), 'chainer.backends.cuda.to_cpu', 'cuda.to_cpu', (['gt_roi_mask'], {}), '(gt_roi_mask)\n', (1634, 1647), False, 'from chainer.backends import cuda\n'), ((1671, 1696), 'chainer.backends.cuda.to_cpu', 'cuda.to_cpu', (['gt_roi_label'], {}), '(gt_roi_label)\n', (1682, 1696), False, 'from chainer.backends import cuda\n'), ((2154, 2179), 'numpy.sum', 'np.sum', (['(gt_roi_label >= 1)'], {}), '(gt_roi_label >= 1)\n', (2160, 2179), True, 'import numpy as np\n'), ((2196, 2221), 'numpy.sum', 'np.sum', (['(gt_roi_label == 0)'], {}), '(gt_roi_label == 0)\n', (2202, 2221), True, 'import numpy as np\n'), ((609, 667), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n_mask, img_size[0], img_size[1])'}), '(size=(n_mask, img_size[0], img_size[1]))\n', (626, 667), True, 'import numpy as np\n'), ((2096, 2121), 'numpy.sum', 'np.sum', (['(gt_roi_label >= 0)'], {}), '(gt_roi_label >= 0)\n', (2102, 2121), True, 'import numpy as np\n'), ((2648, 2669), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.roi'], {}), '(self.roi)\n', (2659, 2669), False, 'from chainer.backends import cuda\n'), ((2683, 2705), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.mask'], {}), '(self.mask)\n', (2694, 2705), False, 'from chainer.backends import cuda\n'), ((2719, 2742), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.label'], {}), '(self.label)\n', (2730, 2742), False, 'from chainer.backends import cuda\n')] |
from aerosandbox.numpy import sin, cos, linalg
from aerosandbox.numpy.array import array
import numpy as _onp
from typing import Union, List
def rotation_matrix_2D(
angle,
as_array: bool = True,
):
"""
Gives the 2D rotation matrix associated with a counterclockwise rotation about an angle.
Args:
angle: Angle by which to rotate. Given in radians.
as_array: Determines whether to return an array-like or just a simple list of lists.
Returns: The 2D rotation matrix
"""
s = sin(angle)
c = cos(angle)
rot = [
[c, -s],
[s, c]
]
if as_array:
return array(rot)
else:
return rot
def rotation_matrix_3D(
angle: Union[float, _onp.ndarray],
axis: Union[_onp.ndarray, List, str],
as_array: bool = True,
axis_already_normalized: bool = False
):
"""
Yields the rotation matrix that corresponds to a rotation by a specified amount about a given axis.
An implementation of https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Args:
angle: The angle to rotate by. [radians]
Direction of rotation corresponds to the right-hand rule.
Can be vectorized.
axis: The axis to rotate about. [ndarray]
Can be vectorized; be sure axis[0] yields all the x-components, etc.
as_array: boolean, returns a 3x3 array-like if True, and a list-of-lists otherwise.
If you are intending to use this function vectorized, it is recommended you flag this False. (Or test before
proceeding.)
axis_already_normalized: boolean, skips axis normalization for speed if you flag this true.
Returns:
The rotation matrix, with type according to the parameter `as_array`.
"""
s = sin(angle)
c = cos(angle)
if isinstance(axis, str):
if axis.lower() == "x":
rot = [
[1, 0, 0],
[0, c, -s],
[0, s, c]
]
elif axis.lower() == "y":
rot = [
[c, 0, s],
[0, 1, 0],
[-s, 0, c]
]
elif axis.lower() == "z":
rot = [
[c, -s, 0],
[s, c, 0],
[0, 0, 1]
]
else:
raise ValueError("If `axis` is a string, it must be `x`, `y`, or `z`.")
else:
ux = axis[0]
uy = axis[1]
uz = axis[2]
if not axis_already_normalized:
norm = (ux ** 2 + uy ** 2 + uz ** 2) ** 0.5
ux = ux / norm
uy = uy / norm
uz = uz / norm
rot = [
[c + ux ** 2 * (1 - c), ux * uy * (1 - c) - uz * s, ux * uz * (1 - c) + uy * s],
[uy * ux * (1 - c) + uz * s, c + uy ** 2 * (1 - c), uy * uz * (1 - c) - ux * s],
[uz * ux * (1 - c) - uy * s, uz * uy * (1 - c) + ux * s, c + uz ** 2 * (1 - c)]
]
if as_array:
return array(rot)
else:
return rot
def rotation_matrix_from_euler_angles(
roll_angle: Union[float, _onp.ndarray] = 0,
pitch_angle: Union[float, _onp.ndarray] = 0,
yaw_angle: Union[float, _onp.ndarray] = 0,
as_array: bool = True
):
"""
Yields the rotation matrix that corresponds to a given Euler angle rotation.
Note: This uses the standard (yaw, pitch, roll) Euler angle rotation, where:
* First, a rotation about x is applied (roll)
* Second, a rotation about y is applied (pitch)
* Third, a rotation about z is applied (yaw)
In other words: R = R_z(yaw) @ R_y(pitch) @ R_x(roll).
Note: To use this, pre-multiply your vector to go from body axes to earth axes.
Example:
>>> vector_earth = rotation_matrix_from_euler_angles(np.pi / 4, np.pi / 4, np.pi / 4) @ vector_body
See notes:
http://planning.cs.uiuc.edu/node102.html
Args:
roll_angle: The roll angle, which is a rotation about the x-axis. [radians]
pitch_angle: The pitch angle, which is a rotation about the y-axis. [radians]
yaw_angle: The yaw angle, which is a rotation about the z-axis. [radians]
as_array:
Returns:
"""
sa = sin(yaw_angle)
ca = cos(yaw_angle)
sb = sin(pitch_angle)
cb = cos(pitch_angle)
sc = sin(roll_angle)
cc = cos(roll_angle)
rot = [
[ca * cb, ca * sb * sc - sa * cc, ca * sb * cc + sa * sc],
[sa * cb, sa * sb * sc + ca * cc, sa * sb * cc - ca * sc],
[-sb, cb * sc, cb * cc]
]
if as_array:
return array(rot)
else:
return rot
def is_valid_rotation_matrix(
a: _onp.ndarray,
tol=1e-9
) -> bool:
"""
Returns a boolean of whether the given matrix satisfies the properties of a rotation matrix.
Specifically, tests for:
* Volume-preserving
* Handedness of output reference frame
* Orthogonality of output reference frame
Args:
a: The array-like to be tested
tol: A tolerance to use for truthiness; accounts for floating-point error.
Returns: A boolean of whether the array-like is a valid rotation matrix.
"""
def approx_equal(x, y):
return (x > y - tol) and (x < y + tol)
det = linalg.det(a)
is_volume_preserving_and_right_handed = approx_equal(det, 1)
eye_approx = a.T @ a
eye = _onp.eye(a.shape[0])
is_orthogonality_preserving = True
for i in range(eye.shape[0]):
for j in range(eye.shape[1]):
if not approx_equal(eye_approx[i, j], eye[i, j]):
is_orthogonality_preserving = False
return (
is_volume_preserving_and_right_handed and
is_orthogonality_preserving
)
| [
"numpy.eye",
"aerosandbox.numpy.linalg.det",
"aerosandbox.numpy.array.array",
"aerosandbox.numpy.sin",
"aerosandbox.numpy.cos"
] | [((533, 543), 'aerosandbox.numpy.sin', 'sin', (['angle'], {}), '(angle)\n', (536, 543), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((552, 562), 'aerosandbox.numpy.cos', 'cos', (['angle'], {}), '(angle)\n', (555, 562), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((1831, 1841), 'aerosandbox.numpy.sin', 'sin', (['angle'], {}), '(angle)\n', (1834, 1841), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((1850, 1860), 'aerosandbox.numpy.cos', 'cos', (['angle'], {}), '(angle)\n', (1853, 1860), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((4264, 4278), 'aerosandbox.numpy.sin', 'sin', (['yaw_angle'], {}), '(yaw_angle)\n', (4267, 4278), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((4288, 4302), 'aerosandbox.numpy.cos', 'cos', (['yaw_angle'], {}), '(yaw_angle)\n', (4291, 4302), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((4312, 4328), 'aerosandbox.numpy.sin', 'sin', (['pitch_angle'], {}), '(pitch_angle)\n', (4315, 4328), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((4338, 4354), 'aerosandbox.numpy.cos', 'cos', (['pitch_angle'], {}), '(pitch_angle)\n', (4341, 4354), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((4364, 4379), 'aerosandbox.numpy.sin', 'sin', (['roll_angle'], {}), '(roll_angle)\n', (4367, 4379), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((4389, 4404), 'aerosandbox.numpy.cos', 'cos', (['roll_angle'], {}), '(roll_angle)\n', (4392, 4404), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((5315, 5328), 'aerosandbox.numpy.linalg.det', 'linalg.det', (['a'], {}), '(a)\n', (5325, 5328), False, 'from aerosandbox.numpy import sin, cos, linalg\n'), ((5430, 5450), 'numpy.eye', '_onp.eye', (['a.shape[0]'], {}), '(a.shape[0])\n', (5438, 5450), True, 'import numpy as _onp\n'), ((645, 655), 'aerosandbox.numpy.array.array', 'array', (['rot'], {}), '(rot)\n', (650, 655), False, 'from aerosandbox.numpy.array import array\n'), ((3024, 3034), 'aerosandbox.numpy.array.array', 'array', (['rot'], {}), '(rot)\n', (3029, 3034), False, 'from aerosandbox.numpy.array import array\n'), ((4623, 4633), 'aerosandbox.numpy.array.array', 'array', (['rot'], {}), '(rot)\n', (4628, 4633), False, 'from aerosandbox.numpy.array import array\n')] |
from PIL import Image, ImageDraw, ImageFont
import numpy as np
def DrawRegion(text, font, fontcolor, shadowcolor, shadow_radius = 1, spread = 0) :
# we first get size of region we need to draw text
text_width, text_height = font.getsize(text)
text_height += 2 * shadow_radius + 2 * spread
text_width += 2 * shadow_radius + 2 * spread
# now we can create our outputs
ret_text = Image.new("RGB", (text_width, text_height), (0, 0, 0))
ret_mask = Image.new("L", (text_width, text_height), 0)
# create draw objects
draw_text = ImageDraw.Draw(ret_text)
draw_mask = ImageDraw.Draw(ret_mask)
# draw text
draw_text.text((spread - shadow_radius, spread - shadow_radius), text, font=font, fill=shadowcolor, stroke_width = shadow_radius)
draw_text.text((spread, spread), text, font=font, fill=fontcolor)
# draw mask
draw_mask.text((spread - shadow_radius, spread - shadow_radius), text, font=font, fill=255, stroke_width = shadow_radius)
draw_mask.text((spread, spread), text, font=font, fill=255)
return np.array(ret_text), np.array(ret_mask)
| [
"numpy.array",
"PIL.Image.new",
"PIL.ImageDraw.Draw"
] | [((386, 440), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(text_width, text_height)', '(0, 0, 0)'], {}), "('RGB', (text_width, text_height), (0, 0, 0))\n", (395, 440), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((453, 497), 'PIL.Image.new', 'Image.new', (['"""L"""', '(text_width, text_height)', '(0)'], {}), "('L', (text_width, text_height), 0)\n", (462, 497), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((535, 559), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['ret_text'], {}), '(ret_text)\n', (549, 559), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((573, 597), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['ret_mask'], {}), '(ret_mask)\n', (587, 597), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1016, 1034), 'numpy.array', 'np.array', (['ret_text'], {}), '(ret_text)\n', (1024, 1034), True, 'import numpy as np\n'), ((1036, 1054), 'numpy.array', 'np.array', (['ret_mask'], {}), '(ret_mask)\n', (1044, 1054), True, 'import numpy as np\n')] |
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
from typing import Dict, Any, Optional
from typing import Sequence
import numpy as np
import pandas as pd
import pytest
import cirq
import cirq.experiments.random_quantum_circuit_generation as rqcg
from cirq.experiments.xeb_simulation import simulate_2q_xeb_circuits
def test_simulate_2q_xeb_circuits():
q0, q1 = cirq.LineQubit.range(2)
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1, depth=50, two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b)
)
for _ in range(2)
]
cycle_depths = np.arange(3, 50, 9)
df = simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths)
assert len(df) == len(cycle_depths) * len(circuits)
for (circuit_i, cycle_depth), row in df.iterrows():
assert 0 <= circuit_i < len(circuits)
assert cycle_depth in cycle_depths
assert len(row['pure_probs']) == 4
assert np.isclose(np.sum(row['pure_probs']), 1)
with multiprocessing.Pool() as pool:
df2 = simulate_2q_xeb_circuits(circuits, cycle_depths, pool=pool)
pd.testing.assert_frame_equal(df, df2)
def test_simulate_circuit_length_validation():
q0, q1 = cirq.LineQubit.range(2)
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0,
q1,
depth=10, # not long enough!
two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b),
)
for _ in range(2)
]
cycle_depths = np.arange(3, 50, 9)
with pytest.raises(ValueError, match='.*not long enough.*'):
_ = simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths)
def _ref_simulate_2q_xeb_circuit(task: Dict[str, Any]):
"""Helper function for simulating a given (circuit, cycle_depth)."""
circuit_i = task['circuit_i']
cycle_depth = task['cycle_depth']
circuit = task['circuit']
param_resolver = task['param_resolver']
circuit_depth = cycle_depth * 2 + 1
assert circuit_depth <= len(circuit)
tcircuit = circuit[:circuit_depth]
tcircuit = cirq.resolve_parameters_once(tcircuit, param_resolver=param_resolver)
pure_sim = cirq.Simulator()
psi = pure_sim.simulate(tcircuit)
psi = psi.final_state_vector
pure_probs = cirq.state_vector_to_probabilities(psi)
return {'circuit_i': circuit_i, 'cycle_depth': cycle_depth, 'pure_probs': pure_probs}
def _ref_simulate_2q_xeb_circuits(
circuits: Sequence['cirq.Circuit'],
cycle_depths: Sequence[int],
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
pool: Optional['multiprocessing.pool.Pool'] = None,
):
"""Reference implementation for `simulate_2q_xeb_circuits` that
does each circuit independently instead of using intermediate states.
You can also try editing the helper function to use QSimSimulator() for
benchmarking. This simulator does not support intermediate states, so
you can't use it with the new functionality.
https://github.com/quantumlib/qsim/issues/101
"""
tasks = []
for cycle_depth in cycle_depths:
for circuit_i, circuit in enumerate(circuits):
tasks += [
{
'circuit_i': circuit_i,
'cycle_depth': cycle_depth,
'circuit': circuit,
'param_resolver': param_resolver,
}
]
if pool is not None:
records = pool.map(_ref_simulate_2q_xeb_circuit, tasks, chunksize=4)
else:
records = [_ref_simulate_2q_xeb_circuit(record) for record in tasks]
return pd.DataFrame(records).set_index(['circuit_i', 'cycle_depth']).sort_index()
@pytest.mark.parametrize('multiprocess', (True, False))
def test_incremental_simulate(multiprocess):
q0, q1 = cirq.LineQubit.range(2)
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1, depth=100, two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b)
)
for _ in range(20)
]
cycle_depths = np.arange(3, 100, 9)
if multiprocess:
pool = multiprocessing.Pool()
else:
pool = None
start = time.perf_counter()
df_ref = _ref_simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths, pool=pool)
end1 = time.perf_counter()
df = simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths, pool=pool)
end2 = time.perf_counter()
if pool is not None:
pool.terminate()
print("\nnew:", end2 - end1, "old:", end1 - start)
pd.testing.assert_frame_equal(df_ref, df)
# Use below for approximate equality, if e.g. you're using qsim:
# assert len(df_ref) == len(df)
# assert df_ref.columns == df.columns
# for (i1, row1), (i2, row2) in zip(df_ref.iterrows(), df.iterrows()):
# assert i1 == i2
# np.testing.assert_allclose(row1['pure_probs'], row2['pure_probs'], atol=5e-5)
| [
"cirq.LineQubit.range",
"cirq.resolve_parameters_once",
"time.perf_counter",
"cirq.experiments.xeb_simulation.simulate_2q_xeb_circuits",
"pytest.mark.parametrize",
"cirq.Simulator",
"numpy.sum",
"pytest.raises",
"multiprocessing.Pool",
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"... | [((4296, 4350), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multiprocess"""', '(True, False)'], {}), "('multiprocess', (True, False))\n", (4319, 4350), False, 'import pytest\n'), ((940, 963), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (960, 963), False, 'import cirq\n'), ((1188, 1207), 'numpy.arange', 'np.arange', (['(3)', '(50)', '(9)'], {}), '(3, 50, 9)\n', (1197, 1207), True, 'import numpy as np\n'), ((1218, 1288), 'cirq.experiments.xeb_simulation.simulate_2q_xeb_circuits', 'simulate_2q_xeb_circuits', ([], {'circuits': 'circuits', 'cycle_depths': 'cycle_depths'}), '(circuits=circuits, cycle_depths=cycle_depths)\n', (1242, 1288), False, 'from cirq.experiments.xeb_simulation import simulate_2q_xeb_circuits\n'), ((1710, 1748), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['df', 'df2'], {}), '(df, df2)\n', (1739, 1748), True, 'import pandas as pd\n'), ((1811, 1834), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (1831, 1834), False, 'import cirq\n'), ((2116, 2135), 'numpy.arange', 'np.arange', (['(3)', '(50)', '(9)'], {}), '(3, 50, 9)\n', (2125, 2135), True, 'import numpy as np\n'), ((2697, 2766), 'cirq.resolve_parameters_once', 'cirq.resolve_parameters_once', (['tcircuit'], {'param_resolver': 'param_resolver'}), '(tcircuit, param_resolver=param_resolver)\n', (2725, 2766), False, 'import cirq\n'), ((2783, 2799), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (2797, 2799), False, 'import cirq\n'), ((2888, 2927), 'cirq.state_vector_to_probabilities', 'cirq.state_vector_to_probabilities', (['psi'], {}), '(psi)\n', (2922, 2927), False, 'import cirq\n'), ((4409, 4432), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (4429, 4432), False, 'import cirq\n'), ((4659, 4679), 'numpy.arange', 'np.arange', (['(3)', '(100)', '(9)'], {}), '(3, 100, 9)\n', (4668, 4679), True, 'import numpy as np\n'), ((4783, 4802), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4800, 4802), False, 'import time\n'), ((4914, 4933), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4931, 4933), False, 'import time\n'), ((4944, 5030), 'cirq.experiments.xeb_simulation.simulate_2q_xeb_circuits', 'simulate_2q_xeb_circuits', ([], {'circuits': 'circuits', 'cycle_depths': 'cycle_depths', 'pool': 'pool'}), '(circuits=circuits, cycle_depths=cycle_depths, pool\n =pool)\n', (4968, 5030), False, 'from cirq.experiments.xeb_simulation import simulate_2q_xeb_circuits\n'), ((5037, 5056), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5054, 5056), False, 'import time\n'), ((5167, 5208), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['df_ref', 'df'], {}), '(df_ref, df)\n', (5196, 5208), True, 'import pandas as pd\n'), ((1599, 1621), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (1619, 1621), False, 'import multiprocessing\n'), ((1645, 1704), 'cirq.experiments.xeb_simulation.simulate_2q_xeb_circuits', 'simulate_2q_xeb_circuits', (['circuits', 'cycle_depths'], {'pool': 'pool'}), '(circuits, cycle_depths, pool=pool)\n', (1669, 1704), False, 'from cirq.experiments.xeb_simulation import simulate_2q_xeb_circuits\n'), ((2145, 2199), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*not long enough.*"""'}), "(ValueError, match='.*not long enough.*')\n", (2158, 2199), False, 'import pytest\n'), ((2213, 2283), 'cirq.experiments.xeb_simulation.simulate_2q_xeb_circuits', 'simulate_2q_xeb_circuits', ([], {'circuits': 'circuits', 'cycle_depths': 'cycle_depths'}), '(circuits=circuits, cycle_depths=cycle_depths)\n', (2237, 2283), False, 'from cirq.experiments.xeb_simulation import simulate_2q_xeb_circuits\n'), ((4717, 4739), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (4737, 4739), False, 'import multiprocessing\n'), ((1559, 1584), 'numpy.sum', 'np.sum', (["row['pure_probs']"], {}), "(row['pure_probs'])\n", (1565, 1584), True, 'import numpy as np\n'), ((1105, 1126), 'cirq.SQRT_ISWAP', 'cirq.SQRT_ISWAP', (['a', 'b'], {}), '(a, b)\n', (1120, 1126), False, 'import cirq\n'), ((2032, 2053), 'cirq.SQRT_ISWAP', 'cirq.SQRT_ISWAP', (['a', 'b'], {}), '(a, b)\n', (2047, 2053), False, 'import cirq\n'), ((4218, 4239), 'pandas.DataFrame', 'pd.DataFrame', (['records'], {}), '(records)\n', (4230, 4239), True, 'import pandas as pd\n'), ((4575, 4596), 'cirq.SQRT_ISWAP', 'cirq.SQRT_ISWAP', (['a', 'b'], {}), '(a, b)\n', (4590, 4596), False, 'import cirq\n')] |
import numpy
from skimage.data import camera
from dexp.processing.interpolation.warp import warp
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
from dexp.utils.timeit import timeit
def demo_warp_2d_numpy():
try:
with NumpyBackend():
_demo_warp_2d()
except NotImplementedError:
print("Numpy version not yet implemented")
def demo_warp_2d_cupy():
try:
with CupyBackend():
_demo_warp_2d()
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def _demo_warp_2d(grid_size=8):
image = camera().astype(numpy.float32) / 255
image = image[0:477, 0:507]
magnitude = 15
vector_field = numpy.random.uniform(low=-magnitude, high=+magnitude, size=(grid_size,) * 2 + (2,))
with timeit("warp"):
warped = warp(image, vector_field, vector_field_upsampling=4)
with timeit("dewarped"):
dewarped = warp(warped, -vector_field, vector_field_upsampling=4)
from napari import Viewer, gui_qt
with gui_qt():
def _c(array):
return Backend.to_numpy(array)
viewer = Viewer()
viewer.add_image(_c(image), name="image")
viewer.add_image(_c(vector_field), name="vector_field")
viewer.add_image(_c(warped), name="warped")
viewer.add_image(_c(dewarped), name="dewarped")
if __name__ == "__main__":
demo_warp_2d_cupy()
# demo_warp_2d_numpy()
| [
"napari.Viewer",
"napari.gui_qt",
"dexp.utils.backends.CupyBackend",
"dexp.utils.backends.Backend.to_numpy",
"dexp.utils.backends.NumpyBackend",
"numpy.random.uniform",
"skimage.data.camera",
"dexp.utils.timeit.timeit",
"dexp.processing.interpolation.warp.warp"
] | [((723, 810), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-magnitude)', 'high': '(+magnitude)', 'size': '((grid_size,) * 2 + (2,))'}), '(low=-magnitude, high=+magnitude, size=(grid_size,) * 2 +\n (2,))\n', (743, 810), False, 'import numpy\n'), ((817, 831), 'dexp.utils.timeit.timeit', 'timeit', (['"""warp"""'], {}), "('warp')\n", (823, 831), False, 'from dexp.utils.timeit import timeit\n'), ((850, 902), 'dexp.processing.interpolation.warp.warp', 'warp', (['image', 'vector_field'], {'vector_field_upsampling': '(4)'}), '(image, vector_field, vector_field_upsampling=4)\n', (854, 902), False, 'from dexp.processing.interpolation.warp import warp\n'), ((913, 931), 'dexp.utils.timeit.timeit', 'timeit', (['"""dewarped"""'], {}), "('dewarped')\n", (919, 931), False, 'from dexp.utils.timeit import timeit\n'), ((952, 1006), 'dexp.processing.interpolation.warp.warp', 'warp', (['warped', '(-vector_field)'], {'vector_field_upsampling': '(4)'}), '(warped, -vector_field, vector_field_upsampling=4)\n', (956, 1006), False, 'from dexp.processing.interpolation.warp import warp\n'), ((1056, 1064), 'napari.gui_qt', 'gui_qt', ([], {}), '()\n', (1062, 1064), False, 'from napari import Viewer, gui_qt\n'), ((1151, 1159), 'napari.Viewer', 'Viewer', ([], {}), '()\n', (1157, 1159), False, 'from napari import Viewer, gui_qt\n'), ((252, 266), 'dexp.utils.backends.NumpyBackend', 'NumpyBackend', ([], {}), '()\n', (264, 266), False, 'from dexp.utils.backends import Backend, CupyBackend, NumpyBackend\n'), ((428, 441), 'dexp.utils.backends.CupyBackend', 'CupyBackend', ([], {}), '()\n', (439, 441), False, 'from dexp.utils.backends import Backend, CupyBackend, NumpyBackend\n'), ((1109, 1132), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['array'], {}), '(array)\n', (1125, 1132), False, 'from dexp.utils.backends import Backend, CupyBackend, NumpyBackend\n'), ((615, 623), 'skimage.data.camera', 'camera', ([], {}), '()\n', (621, 623), False, 'from skimage.data import camera\n')] |
# Copyright 2020 The TensorFlow Recommenders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint-as: python3
"""Tests for ANN layers."""
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_recommenders.layers import ann
class AnnTest(tf.test.TestCase):
def test_brute_force(self):
num_candidates, num_queries = (1000, 4)
rng = np.random.RandomState(42)
candidates = rng.normal(size=(num_candidates, 4)).astype(np.float32)
query = rng.normal(size=(num_queries, 4)).astype(np.float32)
candidate_names = np.arange(num_candidates).astype(np.str)
index = ann.BruteForce(query_model=lambda x: x)
index.index(candidates, candidate_names)
for _ in range(100):
pre_serialization_results = index(query[:2])
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "query_model")
index.save(path)
loaded = tf.keras.models.load_model(path)
for _ in range(100):
post_serialization_results = loaded(tf.constant(query[:2]))
self.assertAllEqual(post_serialization_results, pre_serialization_results)
if __name__ == "__main__":
tf.test.main()
| [
"tempfile.TemporaryDirectory",
"numpy.arange",
"os.path.join",
"tensorflow_recommenders.layers.ann.BruteForce",
"tensorflow.test.main",
"tensorflow.keras.models.load_model",
"tensorflow.constant",
"numpy.random.RandomState"
] | [((1658, 1672), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1670, 1672), True, 'import tensorflow as tf\n'), ((888, 913), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (909, 913), True, 'import numpy as np\n'), ((1128, 1167), 'tensorflow_recommenders.layers.ann.BruteForce', 'ann.BruteForce', ([], {'query_model': '(lambda x: x)'}), '(query_model=lambda x: x)\n', (1142, 1167), False, 'from tensorflow_recommenders.layers import ann\n'), ((1300, 1329), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1327, 1329), False, 'import tempfile\n'), ((1351, 1383), 'os.path.join', 'os.path.join', (['tmp', '"""query_model"""'], {}), "(tmp, 'query_model')\n", (1363, 1383), False, 'import os\n'), ((1422, 1454), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['path'], {}), '(path)\n', (1448, 1454), True, 'import tensorflow as tf\n'), ((1074, 1099), 'numpy.arange', 'np.arange', (['num_candidates'], {}), '(num_candidates)\n', (1083, 1099), True, 'import numpy as np\n'), ((1523, 1545), 'tensorflow.constant', 'tf.constant', (['query[:2]'], {}), '(query[:2])\n', (1534, 1545), True, 'import tensorflow as tf\n')] |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import os
import numpy
from dcase_util.datasets import AcousticSceneDataset
from dcase_util.containers import MetaDataContainer, MetaDataItem
from dcase_util.utils import Path
class DCASE2013_Scenes_DevelopmentSet(AcousticSceneDataset):
#
"""DCASE2013 Acoustic scenes 2013 development dataset
This dataset was used in DCASE2013 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='DCASE2013-acoustic-scenes-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'DCASE2013-acoustic-scenes-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': '<NAME>, <NAME>, <NAME>, and <NAME>',
'title': 'IEEE AASP CASA Challenge - Public Dataset for Scene Classification Task',
'url': 'https://archive.org/details/dcase2013_scene_classification',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
}
kwargs['crossvalidation_folds'] = 5
kwargs['package_list'] = [
{
'content_type': ['audio', 'meta', 'documentation'],
'remote_file': 'https://archive.org/download/dcase2013_scene_classification/scenes_stereo.zip',
'remote_bytes': 361748263,
'remote_md5': 'abdefde136d84de33b0f20a0f13a6b97',
'filename': 'scenes_stereo.zip'
}
]
kwargs['audio_paths'] = [
'scenes_stereo'
]
super(DCASE2013_Scenes_DevelopmentSet, self).__init__(**kwargs)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = MetaDataContainer()
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative_path(raw_path)
meta_data.append(
MetaDataItem(
{
'filename': os.path.join(relative_path, raw_filename),
'scene_label': os.path.splitext(os.path.split(filename)[1])[0][:-2],
}
)
)
meta_data.save(
filename=self.meta_file
)
self.load_meta()
all_folds_found = True
for fold in self.folds():
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=fold
)
if not os.path.isfile(train_filename):
all_folds_found = False
if not os.path.isfile(test_filename):
all_folds_found = False
if not all_folds_found:
Path().makedirs(
path=self.evaluation_setup_path
)
classes = []
files = []
for item in self.meta:
classes.append(item.scene_label)
files.append(item.filename)
files = numpy.array(files)
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(
n_splits=self.crossvalidation_folds,
test_size=0.3,
random_state=0
)
fold = 1
for train_index, test_index in sss.split(X=numpy.zeros(len(classes)), y=classes):
train_files = files[train_index]
test_files = files[test_index]
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=fold
)
eval_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
# Create meta containers and save them
# Train
train_meta = MetaDataContainer(
filename=train_filename
)
for filename in train_files:
train_meta += self.meta_container.filter(
filename=filename
)
train_meta.save()
# Test
test_meta = MetaDataContainer(
filename=test_filename
)
for filename in test_files:
test_meta.append(
MetaDataItem(
{
'filename': self.absolute_to_relative_path(filename)
}
)
)
test_meta.save()
# Evaluate
eval_meta = MetaDataContainer(
filename=eval_filename
)
for filename in test_files:
eval_meta += self.meta_container.filter(
filename=filename
)
eval_meta.save()
fold += 1
# Load meta and cross validation
self.load()
return self
class DCASE2013_Scenes_EvaluationSet(AcousticSceneDataset):
"""DCASE2013 Acoustic scenes 2013 evaluation dataset
This dataset was used in DCASE2013 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='DCASE2013-acoustic-scenes-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'DCASE2013-acoustic-scenes-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': '<NAME>, <NAME>, <NAME>, and <NAME>',
'title': 'IEEE AASP CASA Challenge - Private Dataset for Scene Classification Task',
'url': 'https://archive.org/details/dcase2013_scene_classification_testset',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
}
kwargs['crossvalidation_folds'] = 5
kwargs['package_list'] = [
{
'content_type': ['audio', 'documentation'],
'remote_file': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'remote_bytes': 371994727,
'remote_md5': None,
'filename': 'scenes_stereo_testset.zip'
},
{
'content_type': ['meta'],
'remote_file': 'https://archive.org/download/dcase2013_scene_classification_testset/dcase2013_task1_filenamekey.csv',
'remote_bytes': 8572,
'remote_md5': None,
'filename': 'dcase2013_task1_filenamekey.csv'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(DCASE2013_Scenes_EvaluationSet, self).__init__(**kwargs)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
Path(os.path.join(self.local_path, 'audio')).create()
for filename in Path(os.path.join(self.local_path)).file_list(extensions='wav'):
# Rename files so that they do not overlap with ones in DCASE2013_Scenes_DevelopmentSet
if not os.path.split(filename)[1].startswith('test_'):
base, file = os.path.split(filename)
os.rename(
filename,
os.path.join(base, 'audio', 'test_' + file)
)
self.files = None
if not self.meta_container.exists():
meta_data = MetaDataContainer()
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative_path(raw_path)
meta_data.append(
MetaDataItem(
{
'filename': os.path.join(relative_path, raw_filename),
'scene_label': os.path.splitext(os.path.split(filename)[1])[0][:-2].replace('test_', ''),
}
)
)
meta_data.save(
filename=self.meta_file
)
self.load_meta()
all_folds_found = True
for fold in self.folds():
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=fold
)
if not os.path.isfile(train_filename):
all_folds_found = False
if not os.path.isfile(test_filename):
all_folds_found = False
if not all_folds_found:
Path().makedirs(
path=self.evaluation_setup_path
)
classes = []
files = []
for item in self.meta:
classes.append(item.scene_label)
files.append(item.filename)
files = numpy.array(files)
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(
n_splits=self.crossvalidation_folds,
test_size=0.3,
random_state=0
)
fold = 1
for train_index, test_index in sss.split(X=numpy.zeros(len(classes)), y=classes):
train_files = files[train_index]
test_files = files[test_index]
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=fold
)
eval_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
# Create meta containers and save them
# Train
train_meta = MetaDataContainer(
filename=train_filename
)
for filename in train_files:
train_meta += self.meta_container.filter(
filename=filename
)
train_meta.save()
# Test
test_meta = MetaDataContainer(
filename=test_filename
)
for filename in test_files:
test_meta.append(
MetaDataItem(
{
'filename': self.absolute_to_relative_path(filename)
}
)
)
test_meta.save()
# Evaluate
eval_meta = MetaDataContainer(
filename=eval_filename
)
for filename in test_files:
eval_meta += self.meta_container.filter(
filename=filename
)
eval_meta.save()
fold += 1
# Load meta and cross validation
self.load()
return self
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"dcase_util.utils.Path",
"os.path.join",
"dcase_util.containers.MetaDataContainer",
"os.path.split",
"os.path.isfile",
"numpy.array"
] | [((2931, 2950), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {}), '()\n', (2948, 2950), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((4434, 4452), 'numpy.array', 'numpy.array', (['files'], {}), '(files)\n', (4445, 4452), False, 'import numpy\n'), ((4543, 4637), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'self.crossvalidation_folds', 'test_size': '(0.3)', 'random_state': '(0)'}), '(n_splits=self.crossvalidation_folds, test_size=0.3,\n random_state=0)\n', (4565, 4637), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((10202, 10221), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {}), '()\n', (10219, 10221), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((11726, 11744), 'numpy.array', 'numpy.array', (['files'], {}), '(files)\n', (11737, 11744), False, 'import numpy\n'), ((11835, 11929), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'self.crossvalidation_folds', 'test_size': '(0.3)', 'random_state': '(0)'}), '(n_splits=self.crossvalidation_folds, test_size=0.3,\n random_state=0)\n', (11857, 11929), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((3039, 3062), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (3052, 3062), False, 'import os\n'), ((3949, 3979), 'os.path.isfile', 'os.path.isfile', (['train_filename'], {}), '(train_filename)\n', (3963, 3979), False, 'import os\n'), ((4041, 4070), 'os.path.isfile', 'os.path.isfile', (['test_filename'], {}), '(test_filename)\n', (4055, 4070), False, 'import os\n'), ((5479, 5521), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {'filename': 'train_filename'}), '(filename=train_filename)\n', (5496, 5521), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((5819, 5860), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {'filename': 'test_filename'}), '(filename=test_filename)\n', (5836, 5860), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((6304, 6345), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {'filename': 'eval_filename'}), '(filename=eval_filename)\n', (6321, 6345), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((9943, 9966), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (9956, 9966), False, 'import os\n'), ((10310, 10333), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (10323, 10333), False, 'import os\n'), ((11241, 11271), 'os.path.isfile', 'os.path.isfile', (['train_filename'], {}), '(train_filename)\n', (11255, 11271), False, 'import os\n'), ((11333, 11362), 'os.path.isfile', 'os.path.isfile', (['test_filename'], {}), '(test_filename)\n', (11347, 11362), False, 'import os\n'), ((12771, 12813), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {'filename': 'train_filename'}), '(filename=train_filename)\n', (12788, 12813), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((13111, 13152), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {'filename': 'test_filename'}), '(filename=test_filename)\n', (13128, 13152), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((13595, 13636), 'dcase_util.containers.MetaDataContainer', 'MetaDataContainer', ([], {'filename': 'eval_filename'}), '(filename=eval_filename)\n', (13612, 13636), False, 'from dcase_util.containers import MetaDataContainer, MetaDataItem\n'), ((4157, 4163), 'dcase_util.utils.Path', 'Path', ([], {}), '()\n', (4161, 4163), False, 'from dcase_util.utils import Path\n'), ((9608, 9646), 'os.path.join', 'os.path.join', (['self.local_path', '"""audio"""'], {}), "(self.local_path, 'audio')\n", (9620, 9646), False, 'import os\n'), ((9687, 9716), 'os.path.join', 'os.path.join', (['self.local_path'], {}), '(self.local_path)\n', (9699, 9716), False, 'import os\n'), ((10044, 10087), 'os.path.join', 'os.path.join', (['base', '"""audio"""', "('test_' + file)"], {}), "(base, 'audio', 'test_' + file)\n", (10056, 10087), False, 'import os\n'), ((11449, 11455), 'dcase_util.utils.Path', 'Path', ([], {}), '()\n', (11453, 11455), False, 'from dcase_util.utils import Path\n'), ((3271, 3312), 'os.path.join', 'os.path.join', (['relative_path', 'raw_filename'], {}), '(relative_path, raw_filename)\n', (3283, 3312), False, 'import os\n'), ((9866, 9889), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (9879, 9889), False, 'import os\n'), ((10542, 10583), 'os.path.join', 'os.path.join', (['relative_path', 'raw_filename'], {}), '(relative_path, raw_filename)\n', (10554, 10583), False, 'import os\n'), ((3374, 3397), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (3387, 3397), False, 'import os\n'), ((10645, 10668), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (10658, 10668), False, 'import os\n')] |
from headers import *
from colorama import Fore, Back, Style
import numpy as np
class paddle:
def __init__(self):
self.__pos_x=74
self.__pos_y=40
self.__vel_x=0
self.__vel_y=0
self.__body = np.zeros((3, 15), dtype='<U20')
self.__empty = np.zeros((4, 15), dtype='<U20')
self.__empty[:] = ' '
self.__x=np.array(['-'])
self.__body2=np.zeros((4,15),dtype='<U20')
def create(self,grid):
self.__body[0]= np.tile(self.__x,15)
self.__body[1]=np.tile(self.__x,15)
self.__body[2]= np.tile(self.__x,15)
def place_paddle(self, grid):
x = self.__pos_x
y = self.__pos_y
grid[y-1:y+2, x-7:x+8] = self.__body
def place_paddle2(self,grid):
x = self.__pos_x
y = self.__pos_y
self.__body2[0]=[Fore.RED+'|',Fore.RED+'|',Fore.RED+'|',' ',' ',' ',' ',' ',' ',' ',' ',' ',Fore.RED+'|',Fore.RED+'|',Fore.RED+'|']
self.__body2[1]= np.tile(self.__x,15)
self.__body2[2]=np.tile(self.__x,15)
self.__body2[3]= np.tile(self.__x,15)
grid[y-1:y+3, x-7:x+8] = self.__body2
def get_pos_x(self):
return self.__pos_x
def get_pos_y(self):
return self.__pos_y
def erase_paddle(self, grid):
'''Erases mando off the board, reduces lives
'''
x = self.__pos_x
y = self.__pos_y
grid[y-1:y+3, x-7:x+8] = self.__empty
def set_values(self, x):
'''sets appropriate values of mando and returns 1 if in path of obstacle
'''
# if any parameter is passed as -100 that means it should remain unchanged
if x != -100:
self.__pos_x += x
if self.__pos_x > length-8:
self.__pos_x = length-8
elif self.__pos_x <= 7:
self.__pos_x = 7
| [
"numpy.array",
"numpy.zeros",
"numpy.tile"
] | [((204, 235), 'numpy.zeros', 'np.zeros', (['(3, 15)'], {'dtype': '"""<U20"""'}), "((3, 15), dtype='<U20')\n", (212, 235), True, 'import numpy as np\n'), ((254, 285), 'numpy.zeros', 'np.zeros', (['(4, 15)'], {'dtype': '"""<U20"""'}), "((4, 15), dtype='<U20')\n", (262, 285), True, 'import numpy as np\n'), ((321, 336), 'numpy.array', 'np.array', (["['-']"], {}), "(['-'])\n", (329, 336), True, 'import numpy as np\n'), ((352, 383), 'numpy.zeros', 'np.zeros', (['(4, 15)'], {'dtype': '"""<U20"""'}), "((4, 15), dtype='<U20')\n", (360, 383), True, 'import numpy as np\n'), ((426, 447), 'numpy.tile', 'np.tile', (['self.__x', '(15)'], {}), '(self.__x, 15)\n', (433, 447), True, 'import numpy as np\n'), ((464, 485), 'numpy.tile', 'np.tile', (['self.__x', '(15)'], {}), '(self.__x, 15)\n', (471, 485), True, 'import numpy as np\n'), ((503, 524), 'numpy.tile', 'np.tile', (['self.__x', '(15)'], {}), '(self.__x, 15)\n', (510, 524), True, 'import numpy as np\n'), ((864, 885), 'numpy.tile', 'np.tile', (['self.__x', '(15)'], {}), '(self.__x, 15)\n', (871, 885), True, 'import numpy as np\n'), ((903, 924), 'numpy.tile', 'np.tile', (['self.__x', '(15)'], {}), '(self.__x, 15)\n', (910, 924), True, 'import numpy as np\n'), ((943, 964), 'numpy.tile', 'np.tile', (['self.__x', '(15)'], {}), '(self.__x, 15)\n', (950, 964), True, 'import numpy as np\n')] |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import def_function
from tensorflow.python.framework import test_util
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_strategy
from tensorflow.python.ipu import utils
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ipu.ops import replication_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class IPUStrategyV1ReplicatedTest(test_util.TensorFlowTestCase):
@tu.test_uses_ipus(num_ipus=2)
@test_util.run_v2_only
def test_all_reduce(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.auto_select_ipus = 2
cfg.device_connection.enable_remote_buffers = True
cfg.device_connection.type = utils.DeviceConnectionType.ON_DEMAND
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategyV1()
def make_all_reduce_function(reduce_op):
@def_function.function(experimental_compile=True)
def all_reduce_function():
replica_ctx = distribution_strategy_context.get_replica_context()
x = math_ops.cast(replication_ops.replication_index(), np.float32)
return replica_ctx.all_reduce(reduce_op, x)
return all_reduce_function
with strategy.scope():
summed = strategy.run(make_all_reduce_function(reduce_util.ReduceOp.SUM))
self.assertEqual(1.0, summed.numpy())
mean = strategy.run(make_all_reduce_function(reduce_util.ReduceOp.MEAN))
self.assertEqual(0.5, mean.numpy())
@tu.test_uses_ipus(num_ipus=2)
@test_util.run_v2_only
def test_optimizer(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.auto_select_ipus = 2
cfg.device_connection.enable_remote_buffers = True
cfg.device_connection.type = utils.DeviceConnectionType.ON_DEMAND
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategyV1()
with strategy.scope():
initial_variable = 2.0
variable = variables.Variable(initial_variable)
learning_rate = 0.5
num_iterations = 3
data = [1.0, 2.0]
dataset = dataset_ops.Dataset.from_tensor_slices((data))
dataset = dataset.repeat(num_iterations)
infeed = ipu_infeed_queue.IPUInfeedQueue(dataset)
optimizer = keras.optimizer_v2.gradient_descent.SGD(learning_rate)
@def_function.function(experimental_compile=True)
def apply_gradient():
gradient = infeed._dequeue() # pylint: disable=protected-access
optimizer.apply_gradients([(gradient, variable)])
# The optimizers in v2 will sum the gradients, and not average them.
expected_gradient = np.sum(data)
expected_variable = initial_variable
infeed.initializer # pylint: disable=pointless-statement
for _ in range(num_iterations):
strategy.run(apply_gradient)
expected_variable -= learning_rate * expected_gradient
self.assertEqual(expected_variable, variable.numpy())
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ipu.config.IPUConfig",
"tensorflow.compiler.plugin.poplar.tests.test_utils.test_uses_ipus",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ipu.ops.replication_ops.replication_index",
"tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQue... | [((1531, 1560), 'tensorflow.compiler.plugin.poplar.tests.test_utils.test_uses_ipus', 'tu.test_uses_ipus', ([], {'num_ipus': '(2)'}), '(num_ipus=2)\n', (1548, 1560), True, 'from tensorflow.compiler.plugin.poplar.tests import test_utils as tu\n'), ((2558, 2587), 'tensorflow.compiler.plugin.poplar.tests.test_utils.test_uses_ipus', 'tu.test_uses_ipus', ([], {'num_ipus': '(2)'}), '(num_ipus=2)\n', (2575, 2587), True, 'from tensorflow.compiler.plugin.poplar.tests import test_utils as tu\n'), ((4034, 4045), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (4043, 4045), False, 'from tensorflow.python.platform import test\n'), ((1625, 1636), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (1634, 1636), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((1881, 1909), 'tensorflow.python.ipu.ipu_strategy.IPUStrategyV1', 'ipu_strategy.IPUStrategyV1', ([], {}), '()\n', (1907, 1909), False, 'from tensorflow.python.ipu import ipu_strategy\n'), ((2651, 2662), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (2660, 2662), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((2907, 2935), 'tensorflow.python.ipu.ipu_strategy.IPUStrategyV1', 'ipu_strategy.IPUStrategyV1', ([], {}), '()\n', (2933, 2935), False, 'from tensorflow.python.ipu import ipu_strategy\n'), ((1963, 2011), 'tensorflow.python.eager.def_function.function', 'def_function.function', ([], {'experimental_compile': '(True)'}), '(experimental_compile=True)\n', (1984, 2011), False, 'from tensorflow.python.eager import def_function\n'), ((3010, 3046), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['initial_variable'], {}), '(initial_variable)\n', (3028, 3046), False, 'from tensorflow.python.ops import variables\n'), ((3139, 3183), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['data'], {}), '(data)\n', (3177, 3183), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((3248, 3288), 'tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue', 'ipu_infeed_queue.IPUInfeedQueue', (['dataset'], {}), '(dataset)\n', (3279, 3288), False, 'from tensorflow.python.ipu import ipu_infeed_queue\n'), ((3308, 3362), 'tensorflow.python.keras.optimizer_v2.gradient_descent.SGD', 'keras.optimizer_v2.gradient_descent.SGD', (['learning_rate'], {}), '(learning_rate)\n', (3347, 3362), False, 'from tensorflow.python import keras\n'), ((3371, 3419), 'tensorflow.python.eager.def_function.function', 'def_function.function', ([], {'experimental_compile': '(True)'}), '(experimental_compile=True)\n', (3392, 3419), False, 'from tensorflow.python.eager import def_function\n'), ((3681, 3693), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (3687, 3693), True, 'import numpy as np\n'), ((2067, 2118), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'distribution_strategy_context.get_replica_context', ([], {}), '()\n', (2116, 2118), False, 'from tensorflow.python.distribute import distribution_strategy_context\n'), ((2145, 2180), 'tensorflow.python.ipu.ops.replication_ops.replication_index', 'replication_ops.replication_index', ([], {}), '()\n', (2178, 2180), False, 'from tensorflow.python.ipu.ops import replication_ops\n')] |
import cv2
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import sampler
class CSVDataset(Dataset):
def __init__(self, df, transform):
self.df = df
self.transform = transform
def __getitem__(self, index):
row = self.df.iloc[index]
img = cv2.imread(row['ImageID'])
target = row['class']
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.df)
class TestDataset(Dataset):
def __init__(self, img_paths, transform):
self.img_paths = img_paths
self.transform = transform
def __getitem__(self, index):
img = cv2.imread(str(self.img_paths[index]))
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.img_paths)
class InfiniteSampler(sampler.Sampler):
def __init__(self, num_samples):
self.num_samples = num_samples
def __iter__(self):
while True:
order = np.random.permutation(self.num_samples)
for i in range(self.num_samples):
yield order[i]
def __len__(self):
return None
| [
"cv2.imread",
"numpy.random.permutation"
] | [((312, 338), 'cv2.imread', 'cv2.imread', (["row['ImageID']"], {}), "(row['ImageID'])\n", (322, 338), False, 'import cv2\n'), ((1099, 1138), 'numpy.random.permutation', 'np.random.permutation', (['self.num_samples'], {}), '(self.num_samples)\n', (1120, 1138), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.engine import base_layer_utils
from keras.layers.rnn import gru
from keras.layers.rnn import gru_v1
from keras.layers.rnn import lstm
from keras.layers.rnn import lstm_v1
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import generic_utils
# isort: off
from tensorflow.python.training.tracking import (
util as trackable_util,
)
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple("NestedInput", ["t1", "t2"])
NestedState = collections.namedtuple("NestedState", ["s1", "s2"])
@test_combinations.run_all_keras_modes
class RNNTest(test_combinations.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32),
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16),
]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super().__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, output
@property
def output_size(self):
return self.units
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(16), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
batch = 10
time_step = 5
embedding_dim = 4
units = 3
# Test basic case.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
layer = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
self.assertEqual(
layer.compute_output_shape(
(time_step, None, embedding_dim)
).as_list(),
[time_step, None, units],
)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, units))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test stacking.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
cell_units = [10, 8, 6]
cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])),
)
# Test masking.
x = keras.Input((time_step, embedding_dim))
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
mask = keras.layers.Masking()(time_major)
rnn = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)(mask)
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(rnn)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test layer output
x = keras.Input((time_step, embedding_dim))
rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True)
y = rnn_1(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
x_np = np.random.random((batch, time_step, embedding_dim))
y_np_1 = model.predict(x_np)
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
rnn_2 = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
y_2 = rnn_2(time_major)
y_2 = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y_2)
model_2 = keras.models.Model(x, y_2)
rnn_2.set_weights(rnn_1.get_weights())
y_np_2 = model_2.predict(x_np)
self.assertAllClose(y_np_1, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {"RNNCellWithConstants": RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# test flat list inputs.
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test GRUCell reset_after property.
x = keras.Input((None, 5))
c = keras.Input((3,))
cells = [gru.GRUCell(32, reset_after=True)]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test stacked RNN serialization
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_non_keras_constants(self):
# Test basic case.
x = keras.Input((None, 5))
c = tf.zeros([6, 3], dtype=tf.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32)),
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {"RNNCellWithConstants": RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10.0, c_np])
with self.assertRaises(AssertionError):
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_non_keras_constants_and_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = tf.zeros([6, 3], dtype=tf.float32)
s = tf.zeros([6, 32], dtype=tf.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
s = [
tf.zeros([6, 8], dtype=tf.float32),
tf.zeros([6, 12], dtype=tf.float32),
tf.zeros([6, 32], dtype=tf.float32),
]
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_stacked_rnn_attributes(self):
if tf.executing_eagerly():
self.skipTest("reduce_sum is not available in eager mode.")
cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
# Test weights
self.assertEqual(len(layer.trainable_weights), 6)
cells[0].trainable = False
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
# Test `get_losses_for` and `losses`
x = keras.Input((None, 1))
loss_1 = tf.reduce_sum(x)
loss_2 = tf.reduce_sum(cells[0].kernel)
cells[0].add_loss(loss_1, inputs=x)
cells[0].add_loss(loss_2)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(layer.get_losses_for(None), [loss_2])
self.assertEqual(layer.get_losses_for(x), [loss_1])
# Test `updates`
cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
x = keras.Input((None, 1))
_ = layer(x)
update_1 = tf.compat.v1.assign_add(
cells[0].kernel, x[0, 0, 0] * cells[0].kernel
)
update_2 = tf.compat.v1.assign_add(
cells[0].kernel, tf.ones_like(cells[0].kernel)
)
# TODO(b/128682878): Remove when RNNCells are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
cells[0].add_update(update_1)
cells[0].add_update(update_2)
self.assertEqual(len(layer.updates), 2)
def test_rnn_dynamic_trainability(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
layer = layer_class(units)
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.parameters(
[keras.layers.SimpleRNN, keras.layers.GRU, keras.layers.LSTM]
)
def test_rnn_cell_trainability(self, layer_cls):
# https://github.com/tensorflow/tensorflow/issues/32369.
layer = layer_cls(3, trainable=False)
self.assertFalse(layer.cell.trainable)
layer.trainable = True
self.assertTrue(layer.cell.trainable)
def test_state_reuse_with_dropout(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
timesteps = 2
num_samples = 2
input1 = keras.Input(
batch_shape=(num_samples, timesteps, embedding_dim)
)
layer = layer_class(
units, return_state=True, return_sequences=True, dropout=0.2
)
state = layer(input1)[1:]
input2 = keras.Input(
batch_shape=(num_samples, timesteps, embedding_dim)
)
output = layer_class(units)(input2, initial_state=state)
model = keras.Model([input1, input2], output)
inputs = [
np.random.random((num_samples, timesteps, embedding_dim)),
np.random.random((num_samples, timesteps, embedding_dim)),
]
model.predict(inputs)
def test_builtin_and_custom_rnn_cell_serialization(self):
@keras.utils.generic_utils.register_keras_serializable(
package="TestOnly"
)
class CustomRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
for cell_class in [
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
CustomRNNCell,
]:
# Test basic case.
x = keras.Input((None, 5))
cell = cell_class(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [cell_class(8), cell_class(12), cell_class(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.SimpleRNN,
gru_v1.GRU,
lstm_v1.LSTM,
gru.GRU,
lstm.LSTM,
],
unroll=[True, False],
)
)
def test_rnn_dropout(self, layer, unroll):
rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = rnn_layer(x)
model = keras.models.Model(x, y)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
cell=[
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
],
unroll=[True, False],
)
)
def test_stacked_rnn_dropout(self, cell, unroll):
cells = [
cell(3, dropout=0.1, recurrent_dropout=0.1),
cell(3, dropout=0.1, recurrent_dropout=0.1),
]
layer = keras.layers.RNN(cells, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = layer(x)
model = keras.models.Model(x, y)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
def test_dropout_mask_reuse(self):
# The layer is created with recurrent_initializer = zero, so that the
# the recurrent state won't affect the output. By doing this, we can
# verify the output and see if the same mask is applied to for each
# timestep.
layer_1 = keras.layers.SimpleRNN(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
return_sequences=True,
unroll=True,
)
layer_2 = keras.layers.RNN(
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
return_sequences=True,
unroll=True,
)
layer_3 = keras.layers.RNN(
[
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
],
return_sequences=True,
unroll=True,
)
def verify(rnn_layer):
inputs = tf.constant(1.0, shape=(6, 2, 5))
out = rnn_layer(inputs, training=True)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_1 = self.evaluate(out)
batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
self.assertAllClose(batch_1_t0, batch_1_t1)
# This simulate the layer called with multiple batches in eager mode
if tf.executing_eagerly():
out2 = rnn_layer(inputs, training=True)
else:
out2 = out
batch_2 = self.evaluate(out2)
batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
self.assertAllClose(batch_2_t0, batch_2_t1)
# Also validate that different dropout is used by between batches.
self.assertNotAllClose(batch_1_t0, batch_2_t0)
self.assertNotAllClose(batch_1_t1, batch_2_t1)
for l in [layer_1, layer_2, layer_3]:
verify(l)
def test_stacked_rnn_compute_output_shape(self):
cells = [keras.layers.LSTMCell(3), keras.layers.LSTMCell(6)]
embedding_dim = 4
timesteps = 2
layer = keras.layers.RNN(
cells, return_state=True, return_sequences=True
)
output_shape = layer.compute_output_shape(
(None, timesteps, embedding_dim)
)
expected_output_shape = [
(None, timesteps, 6),
(None, 3),
(None, 3),
(None, 6),
(None, 6),
]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape], expected_output_shape
)
# Test reverse_state_order = True for stacked cell.
stacked_cell = keras.layers.StackedRNNCells(
cells, reverse_state_order=True
)
layer = keras.layers.RNN(
stacked_cell, return_state=True, return_sequences=True
)
output_shape = layer.compute_output_shape(
(None, timesteps, embedding_dim)
)
expected_output_shape = [
(None, timesteps, 6),
(None, 6),
(None, 6),
(None, 3),
(None, 3),
]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape], expected_output_shape
)
def test_stacked_rnn_with_training_param(self):
# See https://github.com/tensorflow/tensorflow/issues/32586
class CellWrapper(keras.layers.AbstractRNNCell):
def __init__(self, cell):
super().__init__()
self.cell = cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def build(self, input_shape):
self.cell.build(input_shape)
self.built = True
def get_initial_state(
self, inputs=None, batch_size=None, dtype=None
):
return self.cell.get_initial_state(
inputs=inputs, batch_size=batch_size, dtype=dtype
)
def call(self, inputs, states, training=None, **kwargs):
assert training is not None
return self.cell(inputs, states=states, training=training)
cell = keras.layers.LSTMCell(32)
cell = CellWrapper(cell)
cell = keras.layers.StackedRNNCells([cell])
rnn = keras.layers.RNN(cell)
inputs = np.ones((8, 4, 16), dtype=np.float32)
rnn(inputs, training=True)
def test_stacked_rnn_with_nested_cell(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o11, o12, o13 = 2, 3, 4
o21, o22, o23 = 4, 5, 6
# test 1: use_tuple=False
cells = [NestedCell(o11, o12, o13), NestedCell(o21, o22, o23)]
rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, state1, state2 = rnn((input_1, input_2))
s11, s12 = state1
s21, s22 = state2
self.assertEqual(output1.shape.as_list(), [None, t, o21])
self.assertEqual(output2.shape.as_list(), [None, t, o22, o23])
self.assertEqual(s11.shape.as_list(), [None, o11])
self.assertEqual(s12.shape.as_list(), [None, o12, o13])
self.assertEqual(s21.shape.as_list(), [None, o21])
self.assertEqual(s22.shape.as_list(), [None, o22, o23])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))],
)
self.assertEqual(
model.output_shape, [(None, t, o21), (None, t, o22, o23)]
)
# test 2: use_tuple=True
cells = [
NestedCell(o11, o12, o13, use_tuple=True),
NestedCell(o21, o22, o23),
]
rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, state1, state2 = rnn(
NestedInput(t1=input_1, t2=input_2)
)
s11, s12 = state1
s21, s22 = state2
self.assertEqual(output1.shape.as_list(), [None, t, o21])
self.assertEqual(output2.shape.as_list(), [None, t, o22, o23])
self.assertEqual(s11.shape.as_list(), [None, o11])
self.assertEqual(s12.shape.as_list(), [None, o12, o13])
self.assertEqual(s21.shape.as_list(), [None, o21])
self.assertEqual(s22.shape.as_list(), [None, o22, o23])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))],
)
self.assertEqual(
model.output_shape, [(None, t, o21), (None, t, o22, o23)]
)
def test_trackable_dependencies(self):
rnn = keras.layers.SimpleRNN
x = np.random.random((2, 2, 2))
y = np.random.random((2, 2))
model = keras.models.Sequential()
model.add(rnn(2))
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_objects)
def test_high_dimension_RNN(self):
# Basic test case.
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b])
if not tf.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(
init_state[0].shape.as_list(), [None, unit_a, unit_b]
)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)),
)
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Test stacking.
cells = [
Minimal2DRNNCell(unit_a, unit_b),
Minimal2DRNNCell(unit_a * 2, unit_b * 2),
Minimal2DRNNCell(unit_a * 4, unit_b * 4),
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a * 4, unit_b * 4)),
)
self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4))
def test_high_dimension_RNN_with_init_state(self):
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
# Basic test case.
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((unit_a, unit_b))
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s)
model = keras.models.Model([x, s], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)),
],
np.zeros((batch, unit_a, unit_b)),
)
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Bad init state shape.
bad_shape_a = unit_a * 2
bad_shape_b = unit_b * 2
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((bad_shape_a, bad_shape_b))
layer = keras.layers.RNN(cell)
with self.assertRaisesWithPredicateMatch(
ValueError, "however `cell.state_size` is"
):
layer(x, initial_state=s)
def test_inconsistent_output_state_size(self):
batch = 32
time_step = 4
state_size = 5
input_size = 6
cell = PlusOneRNNCell(state_size)
x = keras.Input((None, input_size))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size, state_size)
if not tf.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, state_size])
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, input_size)),
np.zeros((batch, input_size)),
)
self.assertEqual(model.output_shape, (None, input_size))
def test_get_initial_state(self):
cell = keras.layers.SimpleRNNCell(5)
with self.assertRaisesRegex(
ValueError, "batch_size and dtype cannot be None"
):
cell.get_initial_state(None, None, None)
if not tf.executing_eagerly():
inputs = keras.Input((None, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = tf.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
else:
batch = 8
inputs = np.random.random((batch, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [8, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [batch, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
@parameterized.parameters([True, False])
def test_nested_input_output(self, stateful):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, stateful=stateful)
batch_size = batch if stateful else None
input_1 = keras.Input((t, i1), batch_size=batch_size)
input_2 = keras.Input((t, i2, i3), batch_size=batch_size)
outputs = rnn((input_1, input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [batch_size, o1])
self.assertEqual(outputs[1].shape.as_list(), [batch_size, o2, o3])
model = keras.models.Model((input_1, input_2), outputs)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))],
)
self.assertEqual(
model.output_shape, [(batch_size, o1), (batch_size, o2, o3)]
)
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, stateful=stateful)
input_1 = keras.Input((t, i1), batch_size=batch_size)
input_2 = keras.Input((t, i2, i3), batch_size=batch_size)
outputs = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [batch_size, o1])
self.assertEqual(outputs[1].shape.as_list(), [batch_size, o2, o3])
model = keras.models.Model([input_1, input_2], outputs)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))],
)
self.assertEqual(
model.output_shape, [(batch_size, o1), (batch_size, o2, o3)]
)
def test_nested_input_output_with_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn((input_1, input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))],
)
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))],
)
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_nest_input_output_with_init_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
init_s1 = keras.Input((o1,))
init_s2 = keras.Input((o2, o3))
output1, output2, s1, s2 = rnn(
(input_1, input_2), initial_state=(init_s1, init_s2)
)
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model(
[input_1, input_2, init_s1, init_s2], [output1, output2]
)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[
np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
np.zeros((batch, o1)),
np.zeros((batch, o2, o3)),
],
[np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))],
)
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
init_s1 = keras.Input((o1,))
init_s2 = keras.Input((o2, o3))
init_state = NestedState(s1=init_s1, s2=init_s2)
output1, output2, s1, s2 = rnn(
NestedInput(t1=input_1, t2=input_2), initial_state=init_state
)
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model(
[input_1, input_2, init_s1, init_s2], [output1, output2]
)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[
np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
np.zeros((batch, o1)),
np.zeros((batch, o2, o3)),
],
[np.zeros((batch, t, o1)), np.zeros((batch, t, o2, o3))],
)
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_masking_rnn_with_output_and_states(self):
class Cell(keras.layers.Layer):
def __init__(self):
self.state_size = None
self.output_size = None
super().__init__()
def build(self, input_shape):
self.state_size = input_shape[-1]
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs, [s + 1 for s in states]
x = keras.Input((3, 1), name="x")
x_masked = keras.layers.Masking()(x)
s_0 = keras.Input((1,), name="s_0")
y, s = keras.layers.RNN(Cell(), return_state=True)(
x_masked, initial_state=s_0
)
model = keras.models.Model([x, s_0], [y, s])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# last time step masked
x_np = np.array([[[1.0], [2.0], [0.0]]])
s_0_np = np.array([[10.0]])
y_np, s_np = model.predict([x_np, s_0_np])
# 1 is added to initial state two times
self.assertAllClose(s_np, s_0_np + 2)
# Expect last output to be the same as last output before masking
self.assertAllClose(y_np, x_np[:, 1, :])
def test_zero_output_for_masking(self):
for unroll in [True, False]:
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((5, 5))
mask = keras.layers.Masking()
layer = keras.layers.RNN(
cell,
return_sequences=True,
zero_output_for_mask=True,
unroll=unroll,
)
masked_input = mask(x)
y = layer(masked_input)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
np_x = np.ones((6, 5, 5))
result_1 = model.predict(np_x)
# set the time 4 and 5 for last record to be zero (masked).
np_x[5, 3:] = 0
result_2 = model.predict(np_x)
# expect the result_2 has same output, except the time 4,5 for last
# record.
result_1[5, 3:] = 0
self.assertAllClose(result_1, result_2)
def test_unroll_single_step(self):
"""Even if the time dimension is only one, we should be able to
unroll."""
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((1, 5))
layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
np_x = np.ones((6, 1, 5))
result = model.predict(np_x)
self.assertEqual((6, 1, 5), result.shape)
def test_unroll_zero_step(self):
"""If the time dimension is None, we should fail to unroll."""
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
with self.assertRaisesRegex(ValueError, "Cannot unroll a RNN.*"):
layer(x)
def test_full_input_spec(self):
# See https://github.com/tensorflow/tensorflow/issues/25985
inputs = keras.layers.Input(batch_shape=(1, 1, 1))
state_h = keras.layers.Input(batch_shape=(1, 1))
state_c = keras.layers.Input(batch_shape=(1, 1))
states = [state_h, state_c]
decoder_out = keras.layers.LSTM(1, stateful=True)(
inputs, initial_state=states
)
model = keras.Model([inputs, state_h, state_c], decoder_out)
output1 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))]
)
output2 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))]
)
model.reset_states()
output3 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))]
)
self.assertAllClose(output1, output3)
self.assertNotAllClose(output1, output2)
def test_reset_states(self):
# See https://github.com/tensorflow/tensorflow/issues/25852
with self.assertRaisesRegex(
ValueError, "it needs to know its batch size"
):
simple_rnn = keras.layers.SimpleRNN(1, stateful=True)
simple_rnn.reset_states()
with self.assertRaisesRegex(
ValueError, "it needs to know its batch size"
):
cell = Minimal2DRNNCell(1, 2)
custom_rnn = keras.layers.RNN(cell, stateful=True)
custom_rnn.reset_states()
@parameterized.parameters(
[
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
]
)
def test_stateful_rnn_with_stacking(self, cell):
# See https://github.com/tensorflow/tensorflow/issues/28614.
batch = 12
timesteps = 10
input_dim = 8
output_dim = 64
cells = [cell(32), cell(64)]
x = keras.Input(batch_shape=(batch, None, input_dim))
layer = keras.layers.RNN(cells, stateful=True)
y = layer(x)
model = keras.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)),
)
model.predict(np.ones((batch, timesteps, input_dim)))
model.reset_states()
model.predict(np.ones((batch, timesteps, input_dim)))
new_states = tf.nest.map_structure(
lambda s: np.ones((batch, s)), layer.cell.state_size
)
layer.reset_states(new_states)
model.predict(np.ones((batch, timesteps, input_dim)))
def test_stateful_rnn_with_initial_state(self):
# See https://github.com/tensorflow/tensorflow/issues/32299.
batch = 12
timesteps = 1
input_dim = 8
output_dim = 16
test_inputs = np.full((batch, timesteps, input_dim), 0.5)
def make_model(stateful=False, with_initial_state=False):
input_layer = keras.Input(shape=(None, input_dim), batch_size=batch)
if with_initial_state:
initial_states = keras.backend.constant(
np.ones((batch, output_dim))
)
else:
initial_states = None
rnn_output = keras.layers.GRU(
units=output_dim, return_sequences=True, stateful=stateful
)(input_layer, initial_state=initial_states)
model = keras.Model(input_layer, rnn_output)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
return model
# Define a model with a constant state initialization
model = make_model(stateful=True, with_initial_state=True)
layer_weights = model.layers[1].get_weights()
model.reset_states()
predict_1 = model.predict(test_inputs)
predict_2 = model.predict(test_inputs)
model.reset_states()
predict_3 = model.predict(test_inputs)
# predict 1 and 2 should be different since the batch 2 should use the
# state from batch 1 as the initial state.
self.assertNotAllClose(predict_1, predict_2)
self.assertAllClose(predict_1, predict_3)
# Create a new model with same weights but without initial states. Make
# sure the predict value is different from the model with non-zero
# initial state.
model_2 = make_model(stateful=True, with_initial_state=False)
model_2.layers[1].set_weights(layer_weights)
model_2.reset_states()
predict_4 = model_2.predict(test_inputs)
predict_5 = model_2.predict(test_inputs)
self.assertNotAllClose(predict_1, predict_4)
self.assertNotAllClose(predict_4, predict_5)
# Create models with stateful=False, and make sure they handle init
# state correctly.
model_3 = make_model(stateful=False, with_initial_state=True)
model_3.layers[1].set_weights(layer_weights)
model_3.reset_states()
predict_6 = model_3.predict(test_inputs)
predict_7 = model_3.predict(test_inputs)
self.assertAllClose(predict_1, predict_6)
self.assertAllClose(predict_6, predict_7)
def test_stateful_rnn_with_customized_get_initial_state(self):
class TestCell(keras.layers.AbstractRNNCell):
state_size = 1
output_size = 2
def get_initial_state(
self, inputs=None, batch_size=None, dtype=None
):
return np.ones((batch_size, 1), dtype=dtype)
def call(self, inputs, states):
return inputs, states
layer = keras.layers.RNN(TestCell(), stateful=True, return_state=True)
inputs = keras.Input(shape=(10, 2), batch_size=4)
model = keras.Model(inputs, layer(inputs))
x = np.ones((4, 10, 2), dtype=np.float32)
output, state = model.predict(x)
self.assertAllClose(output, np.ones((4, 2)))
self.assertAllClose(state, np.ones((4, 1)))
def test_input_dim_length(self):
simple_rnn = keras.layers.SimpleRNN(5, input_length=10, input_dim=8)
self.assertEqual(simple_rnn._batch_input_shape, (None, 10, 8))
simple_rnn = keras.layers.SimpleRNN(5, input_dim=8)
self.assertEqual(simple_rnn._batch_input_shape, (None, None, 8))
simple_rnn = keras.layers.SimpleRNN(5, input_length=10)
self.assertEqual(simple_rnn._batch_input_shape, (None, 10, None))
@parameterized.parameters(
[
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
]
)
def test_state_spec_with_stack_cell(self, cell):
# See https://github.com/tensorflow/tensorflow/issues/27817 for more
# detail.
batch = 12
timesteps = 10
input_dim = 8
output_dim = 8
def create_cell():
return [cell(output_dim), cell(output_dim), cell(output_dim)]
inputs = keras.Input((timesteps, input_dim))
encoder_output = keras.layers.RNN(create_cell(), return_state=True)(
inputs
)
states = encoder_output[1:]
decoder_output = keras.layers.RNN(create_cell())(
inputs, initial_state=states
)
model = keras.models.Model(inputs, decoder_output)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)),
)
model.predict(np.ones((batch, timesteps, input_dim)))
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.SimpleRNN,
gru_v1.GRU,
lstm_v1.LSTM,
gru.GRU,
lstm.LSTM,
]
)
)
def test_rnn_with_ragged_input(self, layer):
ragged_data = tf.ragged.constant(
[
[[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 1.0, 1.0]],
[[2.0, 4.0, 1.0, 3.0, 1.0]],
[
[2.0, 3.0, 4.0, 1.0, 5.0],
[2.0, 3.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
],
],
ragged_rank=1,
)
label_data = np.array([[1, 0, 1], [1, 1, 0], [0, 0, 1]])
# Test results in feed forward
np.random.seed(100)
rnn_layer = layer(4, activation="sigmoid")
x_ragged = keras.Input(shape=(None, 5), ragged=True)
y_ragged = rnn_layer(x_ragged)
model = keras.models.Model(x_ragged, y_ragged)
output_ragged = model.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(3, 5))
masking = keras.layers.Masking()(x_dense)
y_dense = rnn_layer(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
self.assertAllClose(output_dense, output_ragged)
# Test results with go backwards
np.random.seed(200)
back_rnn_layer = layer(8, go_backwards=True, activation="sigmoid")
x_ragged = keras.Input(shape=(None, 5), ragged=True)
y_ragged = back_rnn_layer(x_ragged)
model = keras.models.Model(x_ragged, y_ragged)
output_ragged = model.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(3, 5))
masking = keras.layers.Masking()(x_dense)
y_dense = back_rnn_layer(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
self.assertAllClose(output_dense, output_ragged)
# Test densification of the ragged input
dense_tensor, row_lengths = keras.backend.convert_inputs_if_ragged(
ragged_data
)
self.assertAllClose(dense_data, dense_tensor)
# Test optional params, all should work except unrolling
inputs = keras.Input(shape=(None, 5), dtype=tf.float32, ragged=True)
custom_rnn_layer = layer(
3, zero_output_for_mask=True, dropout=0.1, use_bias=True
)
outputs = custom_rnn_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(ragged_data, label_data)
# Test stateful and full shape specification
inputs = keras.Input(
shape=(None, 5), batch_size=3, dtype=tf.float32, ragged=True
)
stateful_rnn_layer = layer(3, stateful=True)
outputs = stateful_rnn_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(ragged_data, label_data)
# Must raise error when unroll is set to True
unroll_rnn_layer = layer(3, unroll=True)
with self.assertRaisesRegex(
ValueError, "The input received contains RaggedTensors *"
):
unroll_rnn_layer(inputs)
# Check if return sequences outputs are correct
np.random.seed(100)
returning_rnn_layer = layer(4, return_sequences=True)
x_ragged = keras.Input(shape=(None, 5), ragged=True)
y_ragged = returning_rnn_layer(x_ragged)
model = keras.models.Model(x_ragged, y_ragged)
output_ragged = model.predict(ragged_data, steps=1)
self.assertAllClose(output_ragged.ragged_rank, ragged_data.ragged_rank)
self.assertAllClose(output_ragged.row_splits, ragged_data.row_splits)
x_dense = keras.Input(shape=(3, 5))
masking = keras.layers.Masking()(x_dense)
y_dense = returning_rnn_layer(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
# Convert the output here to ragged for value comparison
output_dense = tf.RaggedTensor.from_tensor(
output_dense, lengths=row_lengths
)
self.assertAllClose(output_ragged, output_dense)
# Check if return sequences and go_backwards outputs are correct
np.random.seed(100)
returning_rnn_layer = layer(4, go_backwards=True, return_sequences=True)
x_ragged = keras.Input(shape=(None, 5), ragged=True)
y_ragged = returning_rnn_layer(x_ragged)
model = keras.models.Model(x_ragged, y_ragged)
output_ragged = model.predict(ragged_data, steps=1)
self.assertAllClose(output_ragged.ragged_rank, ragged_data.ragged_rank)
self.assertAllClose(output_ragged.row_splits, ragged_data.row_splits)
x_dense = keras.Input(shape=(3, 5))
masking = keras.layers.Masking()(x_dense)
y_dense = returning_rnn_layer(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
# Note that the raw output for dense and ragged input when
# go_backward=True will be different. Consider following input
# [[a, b, 0], [c, 0, 0], [d, e, f]] where 0s are masked value.
# The dense output will be [[0, b, a], [0, 0, c], [f, e, d]] since it
# will process the whole sequence from the end.
# While ragged output will be [[b, a], [c], [f, e, d]] since it just
# ignore the 0s. And if we densify the ragged output, it will by default
# inserting 0s to the end (rather than from the beginning), which make
# the output to be [[b, a, 0], [c, 0, 0], [f, e, d]]. With this, we need
# to verify that reverse(ragged_output.to_tensor()) ==
# reverse(dense_output)
output_dense = keras.backend.reverse(output_dense, [1])
output_dense = tf.RaggedTensor.from_tensor(
output_dense, lengths=row_lengths
)
self.assertAllClose(
keras.backend.reverse(output_ragged, [1]), output_dense
)
def test_stateless_rnn_cell(self):
class StatelessCell(keras.layers.Layer):
def __init__(self):
self.state_size = ((), [], ())
self.output_size = None
super().__init__()
def build(self, input_shape):
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs, states
x = keras.Input((None, 5))
cell = StatelessCell()
initial_state = tf.nest.map_structure(lambda t: None, cell.state_size)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=initial_state)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 5)))
@parameterized.parameters(
[keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM]
)
def test_for_enable_caching_device_for_layer(self, layer_cls):
expected_caching_device = (
tf.compat.v1.executing_eagerly_outside_functions()
)
layer = layer_cls(1)
self.assertEqual(
layer.cell._enable_caching_device, expected_caching_device
)
# Make sure the config only appears when the none default value is used.
config = layer.get_config()
self.assertNotIn("enable_caching_device", config)
non_default_value = not expected_caching_device
layer = layer_cls(1, enable_caching_device=non_default_value)
self.assertEqual(layer.cell._enable_caching_device, non_default_value)
config = layer.get_config()
self.assertEqual(config["enable_caching_device"], non_default_value)
@parameterized.parameters(
[
keras.layers.SimpleRNNCell,
gru_v1.GRUCell,
lstm_v1.LSTMCell,
gru.GRUCell,
lstm.LSTMCell,
]
)
def test_for_enable_caching_device_for_cell(self, cell_cls):
expected_caching_device = (
tf.compat.v1.executing_eagerly_outside_functions()
)
cell = cell_cls(1)
self.assertEqual(cell._enable_caching_device, expected_caching_device)
# Make sure the config only appears when the none default value is used.
config = cell.get_config()
self.assertNotIn("enable_caching_device", config)
non_default_value = not expected_caching_device
cell = cell_cls(1, enable_caching_device=non_default_value)
self.assertEqual(cell._enable_caching_device, non_default_value)
config = cell.get_config()
self.assertEqual(config["enable_caching_device"], non_default_value)
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super().__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer="uniform",
name="constant_kernel",
)
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {"units": self.units, "constant_size": self.constant_size}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class Minimal2DRNNCell(keras.layers.Layer):
"""The minimal 2D RNN cell is a simple combination of 2 1-D RNN cell.
Both internal state and output have 2 dimensions and are orthogonal
between each other.
"""
def __init__(self, unit_a, unit_b, **kwargs):
self.unit_a = unit_a
self.unit_b = unit_b
self.state_size = tf.TensorShape([unit_a, unit_b])
self.output_size = tf.TensorShape([unit_a, unit_b])
super().__init__(**kwargs)
def build(self, input_shape):
input_a = input_shape[-2]
input_b = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_a, input_b, self.unit_a, self.unit_b),
initializer="uniform",
name="kernel",
)
self.recurring_kernel = self.add_weight(
shape=(self.unit_a, self.unit_b, self.unit_a, self.unit_b),
initializer="uniform",
name="recurring_kernel",
)
self.bias = self.add_weight(
shape=(self.unit_a, self.unit_b), initializer="uniform", name="bias"
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = tf.einsum("bij,ijkl->bkl", inputs, self.kernel)
h += tf.expand_dims(self.bias, axis=0)
output = h + tf.einsum(
"bij,ijkl->bkl", prev_output, self.recurring_kernel
)
return output, [output]
class PlusOneRNNCell(keras.layers.Layer):
"""Add one to the input and state.
This cell is used for testing state_size and output_size.
"""
def __init__(self, num_unit, **kwargs):
self.state_size = num_unit
super().__init__(**kwargs)
def build(self, input_shape):
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs + 1, [states[0] + 1]
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, use_tuple=False, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.use_tuple = use_tuple
super().__init__(**kwargs)
# A nested state.
if use_tuple:
self.state_size = NestedState(
s1=unit_1, s2=tf.TensorShape([unit_2, unit_3])
)
else:
self.state_size = (unit_1, tf.TensorShape([unit_2, unit_3]))
self.output_size = (unit_1, tf.TensorShape([unit_2, unit_3]))
def build(self, inputs_shape):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
if self.use_tuple:
input_1 = inputs_shape.t1[1]
input_2, input_3 = inputs_shape.t2[1:]
else:
input_1 = inputs_shape[0][1]
input_2, input_3 = inputs_shape[1][1:]
self.kernel_1 = self.add_weight(
shape=(input_1, self.unit_1), initializer="uniform", name="kernel_1"
)
self.kernel_2_3 = self.add_weight(
shape=(input_2, input_3, self.unit_2, self.unit_3),
initializer="uniform",
name="kernel_2_3",
)
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
flatten_inputs = tf.nest.flatten(inputs)
s1, s2 = states
output_1 = tf.matmul(flatten_inputs[0], self.kernel_1)
output_2_3 = tf.einsum(
"bij,ijkl->bkl", flatten_inputs[1], self.kernel_2_3
)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = [output_1, output_2_3]
new_states = NestedState(s1=state_1, s2=state_2_3)
return output, new_states
if __name__ == "__main__":
tf.test.main()
| [
"keras.testing_infra.test_utils.should_run_eagerly",
"tensorflow.compat.v2.nest.map_structure",
"keras.engine.base_layer_utils.call_context",
"tensorflow.compat.v2.einsum",
"numpy.array",
"keras.backend.dot",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.executing_eagerly",
"keras.Model",
... | [((1424, 1475), 'collections.namedtuple', 'collections.namedtuple', (['"""NestedInput"""', "['t1', 't2']"], {}), "('NestedInput', ['t1', 't2'])\n", (1446, 1475), False, 'import collections\n'), ((1490, 1541), 'collections.namedtuple', 'collections.namedtuple', (['"""NestedState"""', "['s1', 's2']"], {}), "('NestedState', ['s1', 's2'])\n", (1512, 1541), False, 'import collections\n'), ((24021, 24113), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[keras.layers.SimpleRNN, keras.layers.GRU, keras.layers.LSTM]'], {}), '([keras.layers.SimpleRNN, keras.layers.GRU, keras.\n layers.LSTM])\n', (24045, 24113), False, 'from absl.testing import parameterized\n'), ((44443, 44482), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[True, False]'], {}), '([True, False])\n', (44467, 44482), False, 'from absl.testing import parameterized\n'), ((56507, 56610), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]'], {}), '([keras.layers.SimpleRNNCell, keras.layers.GRUCell,\n keras.layers.LSTMCell])\n', (56531, 56610), False, 'from absl.testing import parameterized\n'), ((61711, 61814), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]'], {}), '([keras.layers.SimpleRNNCell, keras.layers.GRUCell,\n keras.layers.LSTMCell])\n', (61735, 61814), False, 'from absl.testing import parameterized\n'), ((70565, 70665), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM]'], {}), '([keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM,\n gru.GRU, lstm.LSTM])\n', (70589, 70665), False, 'from absl.testing import parameterized\n'), ((71489, 71609), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[keras.layers.SimpleRNNCell, gru_v1.GRUCell, lstm_v1.LSTMCell, gru.GRUCell,\n lstm.LSTMCell]'], {}), '([keras.layers.SimpleRNNCell, gru_v1.GRUCell,\n lstm_v1.LSTMCell, gru.GRUCell, lstm.LSTMCell])\n', (71513, 71609), False, 'from absl.testing import parameterized\n'), ((77676, 77690), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (77688, 77690), True, 'import tensorflow.compat.v2 as tf\n'), ((2237, 2259), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (2248, 2259), False, 'import keras\n'), ((2276, 2298), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (2292, 2298), False, 'import keras\n'), ((2336, 2360), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (2354, 2360), False, 'import keras\n'), ((2752, 2775), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (2768, 2775), False, 'import keras\n'), ((2813, 2837), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (2831, 2837), False, 'import keras\n'), ((3817, 3839), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (3828, 3839), False, 'import keras\n'), ((3856, 3878), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (3872, 3878), False, 'import keras\n'), ((3916, 3940), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (3934, 3940), False, 'import keras\n'), ((4332, 4355), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (4348, 4355), False, 'import keras\n'), ((4524, 4548), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (4542, 4548), False, 'import keras\n'), ((6074, 6096), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (6085, 6096), False, 'import keras\n'), ((6147, 6169), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (6163, 6169), False, 'import keras\n'), ((6207, 6231), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (6225, 6231), False, 'import keras\n'), ((6505, 6532), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (6521, 6532), True, 'import numpy as np\n'), ((6840, 6864), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (6858, 6864), False, 'import keras\n'), ((7108, 7131), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (7124, 7131), False, 'import keras\n'), ((7169, 7193), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (7187, 7193), False, 'import keras\n'), ((7468, 7495), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (7484, 7495), True, 'import numpy as np\n'), ((7803, 7827), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (7821, 7827), False, 'import keras\n'), ((9221, 9243), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (9232, 9243), False, 'import keras\n'), ((9260, 9282), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (9276, 9282), False, 'import keras\n'), ((9320, 9344), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (9338, 9344), False, 'import keras\n'), ((9679, 9702), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (9695, 9702), False, 'import keras\n'), ((9740, 9764), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (9758, 9764), False, 'import keras\n'), ((10147, 10186), 'keras.Input', 'keras.Input', (['(time_step, embedding_dim)'], {}), '((time_step, embedding_dim))\n', (10158, 10186), False, 'import keras\n'), ((10309, 10378), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['units'], {'time_major': '(True)', 'return_sequences': '(True)'}), '(units, time_major=True, return_sequences=True)\n', (10331, 10378), False, 'import keras\n'), ((10783, 10807), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (10801, 10807), False, 'import keras\n'), ((11139, 11178), 'keras.Input', 'keras.Input', (['(time_step, embedding_dim)'], {}), '((time_step, embedding_dim))\n', (11150, 11178), False, 'import keras\n'), ((11411, 11474), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {'time_major': '(True)', 'return_sequences': '(True)'}), '(cells, time_major=True, return_sequences=True)\n', (11427, 11474), False, 'import keras\n'), ((11677, 11701), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (11695, 11701), False, 'import keras\n'), ((12041, 12080), 'keras.Input', 'keras.Input', (['(time_step, embedding_dim)'], {}), '((time_step, embedding_dim))\n', (12052, 12080), False, 'import keras\n'), ((12438, 12462), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (12456, 12462), False, 'import keras\n'), ((12797, 12836), 'keras.Input', 'keras.Input', (['(time_step, embedding_dim)'], {}), '((time_step, embedding_dim))\n', (12808, 12836), False, 'import keras\n'), ((12853, 12905), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['units'], {'return_sequences': '(True)'}), '(units, return_sequences=True)\n', (12875, 12905), False, 'import keras\n'), ((12944, 12968), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (12962, 12968), False, 'import keras\n'), ((13278, 13329), 'numpy.random.random', 'np.random.random', (['(batch, time_step, embedding_dim)'], {}), '((batch, time_step, embedding_dim))\n', (13294, 13329), True, 'import numpy as np\n'), ((13488, 13557), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['units'], {'time_major': '(True)', 'return_sequences': '(True)'}), '(units, time_major=True, return_sequences=True)\n', (13510, 13557), False, 'import keras\n'), ((13708, 13734), 'keras.models.Model', 'keras.models.Model', (['x', 'y_2'], {}), '(x, y_2)\n', (13726, 13734), False, 'import keras\n'), ((13967, 13989), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (13978, 13989), False, 'import keras\n'), ((14002, 14019), 'keras.Input', 'keras.Input', (['(3,)'], {}), '((3,))\n', (14013, 14019), False, 'import keras\n'), ((14093, 14115), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (14109, 14115), False, 'import keras\n'), ((14167, 14196), 'keras.models.Model', 'keras.models.Model', (['[x, c]', 'y'], {}), '([x, c], y)\n', (14185, 14196), False, 'import keras\n'), ((14512, 14539), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (14528, 14539), True, 'import numpy as np\n'), ((14555, 14579), 'numpy.random.random', 'np.random.random', (['(6, 3)'], {}), '((6, 3))\n', (14571, 14579), True, 'import numpy as np\n'), ((14945, 14974), 'keras.models.Model', 'keras.models.Model', (['[x, c]', 'y'], {}), '([x, c], y)\n', (14963, 14974), False, 'import keras\n'), ((15310, 15339), 'keras.models.Model', 'keras.models.Model', (['[x, c]', 'y'], {}), '([x, c], y)\n', (15328, 15339), False, 'import keras\n'), ((15681, 15704), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (15697, 15704), False, 'import keras\n'), ((15755, 15784), 'keras.models.Model', 'keras.models.Model', (['[x, c]', 'y'], {}), '([x, c], y)\n', (15773, 15784), False, 'import keras\n'), ((16101, 16123), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (16112, 16123), False, 'import keras\n'), ((16136, 16153), 'keras.Input', 'keras.Input', (['(3,)'], {}), '((3,))\n', (16147, 16153), False, 'import keras\n'), ((16222, 16245), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (16238, 16245), False, 'import keras\n'), ((16296, 16325), 'keras.models.Model', 'keras.models.Model', (['[x, c]', 'y'], {}), '([x, c], y)\n', (16314, 16325), False, 'import keras\n'), ((16641, 16668), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (16657, 16668), True, 'import numpy as np\n'), ((16684, 16708), 'numpy.random.random', 'np.random.random', (['(6, 3)'], {}), '((6, 3))\n', (16700, 16708), True, 'import numpy as np\n'), ((17002, 17031), 'keras.models.Model', 'keras.models.Model', (['[x, c]', 'y'], {}), '([x, c], y)\n', (17020, 17031), False, 'import keras\n'), ((17259, 17281), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (17270, 17281), False, 'import keras\n'), ((17294, 17328), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[6, 3]'], {'dtype': 'tf.float32'}), '([6, 3], dtype=tf.float32)\n', (17302, 17328), True, 'import tensorflow.compat.v2 as tf\n'), ((17402, 17424), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (17418, 17424), False, 'import keras\n'), ((17476, 17500), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (17494, 17500), False, 'import keras\n'), ((17925, 17948), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (17941, 17948), False, 'import keras\n'), ((17999, 18023), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (18017, 18023), False, 'import keras\n'), ((18352, 18374), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (18363, 18374), False, 'import keras\n'), ((18387, 18404), 'keras.Input', 'keras.Input', (['(3,)'], {}), '((3,))\n', (18398, 18404), False, 'import keras\n'), ((18417, 18435), 'keras.Input', 'keras.Input', (['(32,)'], {}), '((32,))\n', (18428, 18435), False, 'import keras\n'), ((18509, 18531), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (18525, 18531), False, 'import keras\n'), ((18599, 18631), 'keras.models.Model', 'keras.models.Model', (['[x, s, c]', 'y'], {}), '([x, s, c], y)\n', (18617, 18631), False, 'import keras\n'), ((18979, 19006), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (18995, 19006), True, 'import numpy as np\n'), ((19022, 19047), 'numpy.random.random', 'np.random.random', (['(6, 32)'], {}), '((6, 32))\n', (19038, 19047), True, 'import numpy as np\n'), ((19063, 19087), 'numpy.random.random', 'np.random.random', (['(6, 3)'], {}), '((6, 3))\n', (19079, 19087), True, 'import numpy as np\n'), ((19476, 19508), 'keras.models.Model', 'keras.models.Model', (['[x, s, c]', 'y'], {}), '([x, s, c], y)\n', (19494, 19508), False, 'import keras\n'), ((20076, 20108), 'keras.models.Model', 'keras.models.Model', (['[x, s, c]', 'y'], {}), '([x, s, c], y)\n', (20094, 20108), False, 'import keras\n'), ((20360, 20382), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (20371, 20382), False, 'import keras\n'), ((20395, 20429), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[6, 3]'], {'dtype': 'tf.float32'}), '([6, 3], dtype=tf.float32)\n', (20403, 20429), True, 'import tensorflow.compat.v2 as tf\n'), ((20442, 20477), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[6, 32]'], {'dtype': 'tf.float32'}), '([6, 32], dtype=tf.float32)\n', (20450, 20477), True, 'import tensorflow.compat.v2 as tf\n'), ((20551, 20573), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (20567, 20573), False, 'import keras\n'), ((20642, 20666), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (20660, 20666), False, 'import keras\n'), ((21091, 21114), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (21107, 21114), False, 'import keras\n'), ((21352, 21376), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (21370, 21376), False, 'import keras\n'), ((21648, 21670), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (21668, 21670), True, 'import tensorflow.compat.v2 as tf\n'), ((21830, 21853), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (21846, 21853), False, 'import keras\n'), ((22186, 22208), 'keras.Input', 'keras.Input', (['(None, 1)'], {}), '((None, 1))\n', (22197, 22208), False, 'import keras\n'), ((22226, 22242), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['x'], {}), '(x)\n', (22239, 22242), True, 'import tensorflow.compat.v2 as tf\n'), ((22260, 22290), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['cells[0].kernel'], {}), '(cells[0].kernel)\n', (22273, 22290), True, 'import tensorflow.compat.v2 as tf\n'), ((22650, 22673), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (22666, 22673), False, 'import keras\n'), ((22686, 22708), 'keras.Input', 'keras.Input', (['(None, 1)'], {}), '((None, 1))\n', (22697, 22708), False, 'import keras\n'), ((22750, 22820), 'tensorflow.compat.v2.compat.v1.assign_add', 'tf.compat.v1.assign_add', (['cells[0].kernel', '(x[0, 0, 0] * cells[0].kernel)'], {}), '(cells[0].kernel, x[0, 0, 0] * cells[0].kernel)\n', (22773, 22820), True, 'import tensorflow.compat.v2 as tf\n'), ((24611, 24675), 'keras.Input', 'keras.Input', ([], {'batch_shape': '(num_samples, timesteps, embedding_dim)'}), '(batch_shape=(num_samples, timesteps, embedding_dim))\n', (24622, 24675), False, 'import keras\n'), ((24862, 24926), 'keras.Input', 'keras.Input', ([], {'batch_shape': '(num_samples, timesteps, embedding_dim)'}), '(batch_shape=(num_samples, timesteps, embedding_dim))\n', (24873, 24926), False, 'import keras\n'), ((25030, 25067), 'keras.Model', 'keras.Model', (['[input1, input2]', 'output'], {}), '([input1, input2], output)\n', (25041, 25067), False, 'import keras\n'), ((25342, 25415), 'keras.utils.generic_utils.register_keras_serializable', 'keras.utils.generic_utils.register_keras_serializable', ([], {'package': '"""TestOnly"""'}), "(package='TestOnly')\n", (25395, 25415), False, 'import keras\n'), ((29173, 29197), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (29191, 29197), False, 'import keras\n'), ((29294, 29321), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (29310, 29321), True, 'import numpy as np\n'), ((29337, 29361), 'numpy.random.random', 'np.random.random', (['(6, 3)'], {}), '((6, 3))\n', (29353, 29361), True, 'import numpy as np\n'), ((29920, 29958), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {'unroll': 'unroll'}), '(cells, unroll=unroll)\n', (29936, 29958), False, 'import keras\n'), ((30109, 30133), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (30127, 30133), False, 'import keras\n'), ((30230, 30257), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (30246, 30257), True, 'import numpy as np\n'), ((30273, 30297), 'numpy.random.random', 'np.random.random', (['(6, 3)'], {}), '((6, 3))\n', (30289, 30297), True, 'import numpy as np\n'), ((30648, 30784), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['(3)'], {'dropout': '(0.5)', 'kernel_initializer': '"""ones"""', 'recurrent_initializer': '"""zeros"""', 'return_sequences': '(True)', 'unroll': '(True)'}), "(3, dropout=0.5, kernel_initializer='ones',\n recurrent_initializer='zeros', return_sequences=True, unroll=True)\n", (30670, 30784), False, 'import keras\n'), ((33003, 33068), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {'return_state': '(True)', 'return_sequences': '(True)'}), '(cells, return_state=True, return_sequences=True)\n', (33019, 33068), False, 'import keras\n'), ((33565, 33626), 'keras.layers.StackedRNNCells', 'keras.layers.StackedRNNCells', (['cells'], {'reverse_state_order': '(True)'}), '(cells, reverse_state_order=True)\n', (33593, 33626), False, 'import keras\n'), ((33665, 33737), 'keras.layers.RNN', 'keras.layers.RNN', (['stacked_cell'], {'return_state': '(True)', 'return_sequences': '(True)'}), '(stacked_cell, return_state=True, return_sequences=True)\n', (33681, 33737), False, 'import keras\n'), ((35220, 35245), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(32)'], {}), '(32)\n', (35241, 35245), False, 'import keras\n'), ((35294, 35330), 'keras.layers.StackedRNNCells', 'keras.layers.StackedRNNCells', (['[cell]'], {}), '([cell])\n', (35322, 35330), False, 'import keras\n'), ((35346, 35368), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (35362, 35368), False, 'import keras\n'), ((35386, 35423), 'numpy.ones', 'np.ones', (['(8, 4, 16)'], {'dtype': 'np.float32'}), '((8, 4, 16), dtype=np.float32)\n', (35393, 35423), True, 'import numpy as np\n'), ((35755, 35820), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(cells, return_sequences=True, return_state=True)\n', (35771, 35820), False, 'import keras\n'), ((35840, 35860), 'keras.Input', 'keras.Input', (['(t, i1)'], {}), '((t, i1))\n', (35851, 35860), False, 'import keras\n'), ((35879, 35903), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {}), '((t, i2, i3))\n', (35890, 35903), False, 'import keras\n'), ((36425, 36483), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2]', '[output1, output2]'], {}), '([input_1, input_2], [output1, output2])\n', (36443, 36483), False, 'import keras\n'), ((37091, 37156), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(cells, return_sequences=True, return_state=True)\n', (37107, 37156), False, 'import keras\n'), ((37176, 37196), 'keras.Input', 'keras.Input', (['(t, i1)'], {}), '((t, i1))\n', (37187, 37196), False, 'import keras\n'), ((37215, 37239), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {}), '((t, i2, i3))\n', (37226, 37239), False, 'import keras\n'), ((37800, 37858), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2]', '[output1, output2]'], {}), '([input_1, input_2], [output1, output2])\n', (37818, 37858), False, 'import keras\n'), ((38388, 38415), 'numpy.random.random', 'np.random.random', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (38404, 38415), True, 'import numpy as np\n'), ((38428, 38452), 'numpy.random.random', 'np.random.random', (['(2, 2)'], {}), '((2, 2))\n', (38444, 38452), True, 'import numpy as np\n'), ((38469, 38494), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (38492, 38494), False, 'import keras\n'), ((39260, 39297), 'keras.Input', 'keras.Input', (['(None, input_a, input_b)'], {}), '((None, input_a, input_b))\n', (39271, 39297), False, 'import keras\n'), ((39314, 39336), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (39330, 39336), False, 'import keras\n'), ((39701, 39725), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (39719, 39725), False, 'import keras\n'), ((40313, 40336), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (40329, 40336), False, 'import keras\n'), ((40374, 40398), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (40392, 40398), False, 'import keras\n'), ((41044, 41081), 'keras.Input', 'keras.Input', (['(None, input_a, input_b)'], {}), '((None, input_a, input_b))\n', (41055, 41081), False, 'import keras\n'), ((41094, 41123), 'keras.Input', 'keras.Input', (['(unit_a, unit_b)'], {}), '((unit_a, unit_b))\n', (41105, 41123), False, 'import keras\n'), ((41140, 41162), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (41156, 41162), False, 'import keras\n'), ((41218, 41247), 'keras.models.Model', 'keras.models.Model', (['[x, s]', 'y'], {}), '([x, s], y)\n', (41236, 41247), False, 'import keras\n'), ((41854, 41891), 'keras.Input', 'keras.Input', (['(None, input_a, input_b)'], {}), '((None, input_a, input_b))\n', (41865, 41891), False, 'import keras\n'), ((41904, 41943), 'keras.Input', 'keras.Input', (['(bad_shape_a, bad_shape_b)'], {}), '((bad_shape_a, bad_shape_b))\n', (41915, 41943), False, 'import keras\n'), ((41960, 41982), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (41976, 41982), False, 'import keras\n'), ((42330, 42361), 'keras.Input', 'keras.Input', (['(None, input_size)'], {}), '((None, input_size))\n', (42341, 42361), False, 'import keras\n'), ((42378, 42400), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (42394, 42400), False, 'import keras\n'), ((42714, 42738), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (42732, 42738), False, 'import keras\n'), ((43142, 43171), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(5)'], {}), '(5)\n', (43168, 43171), False, 'import keras\n'), ((44677, 44718), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'stateful': 'stateful'}), '(cell, stateful=stateful)\n', (44693, 44718), False, 'import keras\n'), ((44787, 44830), 'keras.Input', 'keras.Input', (['(t, i1)'], {'batch_size': 'batch_size'}), '((t, i1), batch_size=batch_size)\n', (44798, 44830), False, 'import keras\n'), ((44849, 44896), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {'batch_size': 'batch_size'}), '((t, i2, i3), batch_size=batch_size)\n', (44860, 44896), False, 'import keras\n'), ((45146, 45193), 'keras.models.Model', 'keras.models.Model', (['(input_1, input_2)', 'outputs'], {}), '((input_1, input_2), outputs)\n', (45164, 45193), False, 'import keras\n'), ((45694, 45735), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'stateful': 'stateful'}), '(cell, stateful=stateful)\n', (45710, 45735), False, 'import keras\n'), ((45755, 45798), 'keras.Input', 'keras.Input', (['(t, i1)'], {'batch_size': 'batch_size'}), '((t, i1), batch_size=batch_size)\n', (45766, 45798), False, 'import keras\n'), ((45817, 45864), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {'batch_size': 'batch_size'}), '((t, i2, i3), batch_size=batch_size)\n', (45828, 45864), False, 'import keras\n'), ((46131, 46178), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2]', 'outputs'], {}), '([input_1, input_2], outputs)\n', (46149, 46178), False, 'import keras\n'), ((46805, 46869), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(cell, return_sequences=True, return_state=True)\n', (46821, 46869), False, 'import keras\n'), ((46889, 46909), 'keras.Input', 'keras.Input', (['(t, i1)'], {}), '((t, i1))\n', (46900, 46909), False, 'import keras\n'), ((46928, 46952), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {}), '((t, i2, i3))\n', (46939, 46952), False, 'import keras\n'), ((47283, 47341), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2]', '[output1, output2]'], {}), '([input_1, input_2], [output1, output2])\n', (47301, 47341), False, 'import keras\n'), ((47820, 47884), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(cell, return_sequences=True, return_state=True)\n', (47836, 47884), False, 'import keras\n'), ((47904, 47924), 'keras.Input', 'keras.Input', (['(t, i1)'], {}), '((t, i1))\n', (47915, 47924), False, 'import keras\n'), ((47943, 47967), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {}), '((t, i2, i3))\n', (47954, 47967), False, 'import keras\n'), ((48315, 48373), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2]', '[output1, output2]'], {}), '([input_1, input_2], [output1, output2])\n', (48333, 48373), False, 'import keras\n'), ((48981, 49045), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(cell, return_sequences=True, return_state=True)\n', (48997, 49045), False, 'import keras\n'), ((49065, 49085), 'keras.Input', 'keras.Input', (['(t, i1)'], {}), '((t, i1))\n', (49076, 49085), False, 'import keras\n'), ((49104, 49128), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {}), '((t, i2, i3))\n', (49115, 49128), False, 'import keras\n'), ((49147, 49165), 'keras.Input', 'keras.Input', (['(o1,)'], {}), '((o1,))\n', (49158, 49165), False, 'import keras\n'), ((49184, 49205), 'keras.Input', 'keras.Input', (['(o2, o3)'], {}), '((o2, o3))\n', (49195, 49205), False, 'import keras\n'), ((49592, 49668), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2, init_s1, init_s2]', '[output1, output2]'], {}), '([input_1, input_2, init_s1, init_s2], [output1, output2])\n', (49610, 49668), False, 'import keras\n'), ((50298, 50362), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(cell, return_sequences=True, return_state=True)\n', (50314, 50362), False, 'import keras\n'), ((50382, 50402), 'keras.Input', 'keras.Input', (['(t, i1)'], {}), '((t, i1))\n', (50393, 50402), False, 'import keras\n'), ((50421, 50445), 'keras.Input', 'keras.Input', (['(t, i2, i3)'], {}), '((t, i2, i3))\n', (50432, 50445), False, 'import keras\n'), ((50464, 50482), 'keras.Input', 'keras.Input', (['(o1,)'], {}), '((o1,))\n', (50475, 50482), False, 'import keras\n'), ((50501, 50522), 'keras.Input', 'keras.Input', (['(o2, o3)'], {}), '((o2, o3))\n', (50512, 50522), False, 'import keras\n'), ((50975, 51051), 'keras.models.Model', 'keras.models.Model', (['[input_1, input_2, init_s1, init_s2]', '[output1, output2]'], {}), '([input_1, input_2, init_s1, init_s2], [output1, output2])\n', (50993, 51051), False, 'import keras\n'), ((52110, 52139), 'keras.Input', 'keras.Input', (['(3, 1)'], {'name': '"""x"""'}), "((3, 1), name='x')\n", (52121, 52139), False, 'import keras\n'), ((52199, 52228), 'keras.Input', 'keras.Input', (['(1,)'], {'name': '"""s_0"""'}), "((1,), name='s_0')\n", (52210, 52228), False, 'import keras\n'), ((52355, 52391), 'keras.models.Model', 'keras.models.Model', (['[x, s_0]', '[y, s]'], {}), '([x, s_0], [y, s])\n', (52373, 52391), False, 'import keras\n'), ((52587, 52620), 'numpy.array', 'np.array', (['[[[1.0], [2.0], [0.0]]]'], {}), '([[[1.0], [2.0], [0.0]]])\n', (52595, 52620), True, 'import numpy as np\n'), ((52638, 52656), 'numpy.array', 'np.array', (['[[10.0]]'], {}), '([[10.0]])\n', (52646, 52656), True, 'import numpy as np\n'), ((54165, 54194), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(5)'], {}), '(5)\n', (54191, 54194), False, 'import keras\n'), ((54207, 54226), 'keras.Input', 'keras.Input', (['(1, 5)'], {}), '((1, 5))\n', (54218, 54226), False, 'import keras\n'), ((54243, 54301), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'unroll': '(True)'}), '(cell, return_sequences=True, unroll=True)\n', (54259, 54301), False, 'import keras\n'), ((54339, 54363), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (54357, 54363), False, 'import keras\n'), ((54527, 54545), 'numpy.ones', 'np.ones', (['(6, 1, 5)'], {}), '((6, 1, 5))\n', (54534, 54545), True, 'import numpy as np\n'), ((54757, 54786), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(5)'], {}), '(5)\n', (54783, 54786), False, 'import keras\n'), ((54799, 54821), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (54810, 54821), False, 'import keras\n'), ((54838, 54896), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'unroll': '(True)'}), '(cell, return_sequences=True, unroll=True)\n', (54854, 54896), False, 'import keras\n'), ((55114, 55155), 'keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': '(1, 1, 1)'}), '(batch_shape=(1, 1, 1))\n', (55132, 55155), False, 'import keras\n'), ((55174, 55212), 'keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': '(1, 1)'}), '(batch_shape=(1, 1))\n', (55192, 55212), False, 'import keras\n'), ((55231, 55269), 'keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': '(1, 1)'}), '(batch_shape=(1, 1))\n', (55249, 55269), False, 'import keras\n'), ((55432, 55484), 'keras.Model', 'keras.Model', (['[inputs, state_h, state_c]', 'decoder_out'], {}), '([inputs, state_h, state_c], decoder_out)\n', (55443, 55484), False, 'import keras\n'), ((56927, 56976), 'keras.Input', 'keras.Input', ([], {'batch_shape': '(batch, None, input_dim)'}), '(batch_shape=(batch, None, input_dim))\n', (56938, 56976), False, 'import keras\n'), ((56993, 57031), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {'stateful': '(True)'}), '(cells, stateful=True)\n', (57009, 57031), False, 'import keras\n'), ((57070, 57087), 'keras.Model', 'keras.Model', (['x', 'y'], {}), '(x, y)\n', (57081, 57087), False, 'import keras\n'), ((57978, 58021), 'numpy.full', 'np.full', (['(batch, timesteps, input_dim)', '(0.5)'], {}), '((batch, timesteps, input_dim), 0.5)\n', (57985, 58021), True, 'import numpy as np\n'), ((60958, 60998), 'keras.Input', 'keras.Input', ([], {'shape': '(10, 2)', 'batch_size': '(4)'}), '(shape=(10, 2), batch_size=4)\n', (60969, 60998), False, 'import keras\n'), ((61062, 61099), 'numpy.ones', 'np.ones', (['(4, 10, 2)'], {'dtype': 'np.float32'}), '((4, 10, 2), dtype=np.float32)\n', (61069, 61099), True, 'import numpy as np\n'), ((61305, 61360), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['(5)'], {'input_length': '(10)', 'input_dim': '(8)'}), '(5, input_length=10, input_dim=8)\n', (61327, 61360), False, 'import keras\n'), ((61454, 61492), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['(5)'], {'input_dim': '(8)'}), '(5, input_dim=8)\n', (61476, 61492), False, 'import keras\n'), ((61588, 61630), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['(5)'], {'input_length': '(10)'}), '(5, input_length=10)\n', (61610, 61630), False, 'import keras\n'), ((62227, 62262), 'keras.Input', 'keras.Input', (['(timesteps, input_dim)'], {}), '((timesteps, input_dim))\n', (62238, 62262), False, 'import keras\n'), ((62533, 62575), 'keras.models.Model', 'keras.models.Model', (['inputs', 'decoder_output'], {}), '(inputs, decoder_output)\n', (62551, 62575), False, 'import keras\n'), ((63292, 63503), 'tensorflow.compat.v2.ragged.constant', 'tf.ragged.constant', (['[[[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 1.0, 1.0]], [[2.0, 4.0, 1.0, \n 3.0, 1.0]], [[2.0, 3.0, 4.0, 1.0, 5.0], [2.0, 3.0, 1.0, 1.0, 1.0], [1.0,\n 2.0, 3.0, 4.0, 5.0]]]'], {'ragged_rank': '(1)'}), '([[[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 1.0, 1.0]],\n [[2.0, 4.0, 1.0, 3.0, 1.0]], [[2.0, 3.0, 4.0, 1.0, 5.0], [2.0, 3.0, 1.0,\n 1.0, 1.0], [1.0, 2.0, 3.0, 4.0, 5.0]]], ragged_rank=1)\n', (63310, 63503), True, 'import tensorflow.compat.v2 as tf\n'), ((63694, 63737), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 1], [1, 1, 0], [0, 0, 1]])\n', (63702, 63737), True, 'import numpy as np\n'), ((63786, 63805), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (63800, 63805), True, 'import numpy as np\n'), ((63877, 63918), 'keras.Input', 'keras.Input', ([], {'shape': '(None, 5)', 'ragged': '(True)'}), '(shape=(None, 5), ragged=True)\n', (63888, 63918), False, 'import keras\n'), ((63974, 64012), 'keras.models.Model', 'keras.models.Model', (['x_ragged', 'y_ragged'], {}), '(x_ragged, y_ragged)\n', (63992, 64012), False, 'import keras\n'), ((64092, 64117), 'keras.Input', 'keras.Input', ([], {'shape': '(3, 5)'}), '(shape=(3, 5))\n', (64103, 64117), False, 'import keras\n'), ((64223, 64259), 'keras.models.Model', 'keras.models.Model', (['x_dense', 'y_dense'], {}), '(x_dense, y_dense)\n', (64241, 64259), False, 'import keras\n'), ((64473, 64492), 'numpy.random.seed', 'np.random.seed', (['(200)'], {}), '(200)\n', (64487, 64492), True, 'import numpy as np\n'), ((64588, 64629), 'keras.Input', 'keras.Input', ([], {'shape': '(None, 5)', 'ragged': '(True)'}), '(shape=(None, 5), ragged=True)\n', (64599, 64629), False, 'import keras\n'), ((64690, 64728), 'keras.models.Model', 'keras.models.Model', (['x_ragged', 'y_ragged'], {}), '(x_ragged, y_ragged)\n', (64708, 64728), False, 'import keras\n'), ((64808, 64833), 'keras.Input', 'keras.Input', ([], {'shape': '(3, 5)'}), '(shape=(3, 5))\n', (64819, 64833), False, 'import keras\n'), ((64944, 64980), 'keras.models.Model', 'keras.models.Model', (['x_dense', 'y_dense'], {}), '(x_dense, y_dense)\n', (64962, 64980), False, 'import keras\n'), ((65230, 65281), 'keras.backend.convert_inputs_if_ragged', 'keras.backend.convert_inputs_if_ragged', (['ragged_data'], {}), '(ragged_data)\n', (65268, 65281), False, 'import keras\n'), ((65441, 65500), 'keras.Input', 'keras.Input', ([], {'shape': '(None, 5)', 'dtype': 'tf.float32', 'ragged': '(True)'}), '(shape=(None, 5), dtype=tf.float32, ragged=True)\n', (65452, 65500), False, 'import keras\n'), ((65673, 65708), 'keras.models.Model', 'keras.models.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (65691, 65708), False, 'import keras\n'), ((65977, 66050), 'keras.Input', 'keras.Input', ([], {'shape': '(None, 5)', 'batch_size': '(3)', 'dtype': 'tf.float32', 'ragged': '(True)'}), '(shape=(None, 5), batch_size=3, dtype=tf.float32, ragged=True)\n', (65988, 66050), False, 'import keras\n'), ((66187, 66222), 'keras.models.Model', 'keras.models.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (66205, 66222), False, 'import keras\n'), ((66744, 66763), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (66758, 66763), True, 'import numpy as np\n'), ((66846, 66887), 'keras.Input', 'keras.Input', ([], {'shape': '(None, 5)', 'ragged': '(True)'}), '(shape=(None, 5), ragged=True)\n', (66857, 66887), False, 'import keras\n'), ((66953, 66991), 'keras.models.Model', 'keras.models.Model', (['x_ragged', 'y_ragged'], {}), '(x_ragged, y_ragged)\n', (66971, 66991), False, 'import keras\n'), ((67229, 67254), 'keras.Input', 'keras.Input', ([], {'shape': '(3, 5)'}), '(shape=(3, 5))\n', (67240, 67254), False, 'import keras\n'), ((67370, 67406), 'keras.models.Model', 'keras.models.Model', (['x_dense', 'y_dense'], {}), '(x_dense, y_dense)\n', (67388, 67406), False, 'import keras\n'), ((67600, 67662), 'tensorflow.compat.v2.RaggedTensor.from_tensor', 'tf.RaggedTensor.from_tensor', (['output_dense'], {'lengths': 'row_lengths'}), '(output_dense, lengths=row_lengths)\n', (67627, 67662), True, 'import tensorflow.compat.v2 as tf\n'), ((67824, 67843), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (67838, 67843), True, 'import numpy as np\n'), ((67945, 67986), 'keras.Input', 'keras.Input', ([], {'shape': '(None, 5)', 'ragged': '(True)'}), '(shape=(None, 5), ragged=True)\n', (67956, 67986), False, 'import keras\n'), ((68052, 68090), 'keras.models.Model', 'keras.models.Model', (['x_ragged', 'y_ragged'], {}), '(x_ragged, y_ragged)\n', (68070, 68090), False, 'import keras\n'), ((68328, 68353), 'keras.Input', 'keras.Input', ([], {'shape': '(3, 5)'}), '(shape=(3, 5))\n', (68339, 68353), False, 'import keras\n'), ((68469, 68505), 'keras.models.Model', 'keras.models.Model', (['x_dense', 'y_dense'], {}), '(x_dense, y_dense)\n', (68487, 68505), False, 'import keras\n'), ((69391, 69431), 'keras.backend.reverse', 'keras.backend.reverse', (['output_dense', '[1]'], {}), '(output_dense, [1])\n', (69412, 69431), False, 'import keras\n'), ((69455, 69517), 'tensorflow.compat.v2.RaggedTensor.from_tensor', 'tf.RaggedTensor.from_tensor', (['output_dense'], {'lengths': 'row_lengths'}), '(output_dense, lengths=row_lengths)\n', (69482, 69517), True, 'import tensorflow.compat.v2 as tf\n'), ((70081, 70103), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (70092, 70103), False, 'import keras\n'), ((70159, 70213), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: None)', 'cell.state_size'], {}), '(lambda t: None, cell.state_size)\n', (70180, 70213), True, 'import tensorflow.compat.v2 as tf\n'), ((70230, 70252), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (70246, 70252), False, 'import keras\n'), ((70319, 70343), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (70337, 70343), False, 'import keras\n'), ((70791, 70841), 'tensorflow.compat.v2.compat.v1.executing_eagerly_outside_functions', 'tf.compat.v1.executing_eagerly_outside_functions', ([], {}), '()\n', (70839, 70841), True, 'import tensorflow.compat.v2 as tf\n'), ((71804, 71854), 'tensorflow.compat.v2.compat.v1.executing_eagerly_outside_functions', 'tf.compat.v1.executing_eagerly_outside_functions', ([], {}), '()\n', (71852, 71854), True, 'import tensorflow.compat.v2 as tf\n'), ((73410, 73454), 'keras.backend.dot', 'keras.backend.dot', (['inputs', 'self.input_kernel'], {}), '(inputs, self.input_kernel)\n', (73427, 73454), False, 'import keras\n'), ((73473, 73526), 'keras.backend.dot', 'keras.backend.dot', (['prev_output', 'self.recurrent_kernel'], {}), '(prev_output, self.recurrent_kernel)\n', (73490, 73526), False, 'import keras\n'), ((73545, 73594), 'keras.backend.dot', 'keras.backend.dot', (['constant', 'self.constant_kernel'], {}), '(constant, self.constant_kernel)\n', (73562, 73594), False, 'import keras\n'), ((74248, 74280), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[unit_a, unit_b]'], {}), '([unit_a, unit_b])\n', (74262, 74280), True, 'import tensorflow.compat.v2 as tf\n'), ((74308, 74340), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[unit_a, unit_b]'], {}), '([unit_a, unit_b])\n', (74322, 74340), True, 'import tensorflow.compat.v2 as tf\n'), ((75092, 75139), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""bij,ijkl->bkl"""', 'inputs', 'self.kernel'], {}), "('bij,ijkl->bkl', inputs, self.kernel)\n", (75101, 75139), True, 'import tensorflow.compat.v2 as tf\n'), ((75153, 75186), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['self.bias'], {'axis': '(0)'}), '(self.bias, axis=0)\n', (75167, 75186), True, 'import tensorflow.compat.v2 as tf\n'), ((77222, 77245), 'tensorflow.compat.v2.nest.flatten', 'tf.nest.flatten', (['inputs'], {}), '(inputs)\n', (77237, 77245), True, 'import tensorflow.compat.v2 as tf\n'), ((77290, 77333), 'tensorflow.compat.v2.matmul', 'tf.matmul', (['flatten_inputs[0]', 'self.kernel_1'], {}), '(flatten_inputs[0], self.kernel_1)\n', (77299, 77333), True, 'import tensorflow.compat.v2 as tf\n'), ((77355, 77417), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""bij,ijkl->bkl"""', 'flatten_inputs[1]', 'self.kernel_2_3'], {}), "('bij,ijkl->bkl', flatten_inputs[1], self.kernel_2_3)\n", (77364, 77417), True, 'import tensorflow.compat.v2 as tf\n'), ((2537, 2556), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (2545, 2556), True, 'import numpy as np\n'), ((2558, 2575), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (2566, 2575), True, 'import numpy as np\n'), ((3014, 3033), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (3022, 3033), True, 'import numpy as np\n'), ((3035, 3052), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (3043, 3052), True, 'import numpy as np\n'), ((4117, 4136), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (4125, 4136), True, 'import numpy as np\n'), ((4138, 4155), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (4146, 4155), True, 'import numpy as np\n'), ((4725, 4744), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (4733, 4744), True, 'import numpy as np\n'), ((4746, 4763), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (4754, 4763), True, 'import numpy as np\n'), ((6408, 6427), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (6416, 6427), True, 'import numpy as np\n'), ((6429, 6446), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (6437, 6446), True, 'import numpy as np\n'), ((6655, 6722), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (["{'MinimalRNNCell': MinimalRNNCell}"], {}), "({'MinimalRNNCell': MinimalRNNCell})\n", (6686, 6722), False, 'from keras.utils import generic_utils\n'), ((6766, 6802), 'keras.layers.RNN.from_config', 'keras.layers.RNN.from_config', (['config'], {}), '(config)\n', (6794, 6802), False, 'import keras\n'), ((7370, 7389), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (7378, 7389), True, 'import numpy as np\n'), ((7391, 7408), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (7399, 7408), True, 'import numpy as np\n'), ((7618, 7685), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (["{'MinimalRNNCell': MinimalRNNCell}"], {}), "({'MinimalRNNCell': MinimalRNNCell})\n", (7649, 7685), False, 'from keras.utils import generic_utils\n'), ((7729, 7765), 'keras.layers.RNN.from_config', 'keras.layers.RNN.from_config', (['config'], {}), '(config)\n', (7757, 7765), False, 'import keras\n'), ((9521, 9540), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (9529, 9540), True, 'import numpy as np\n'), ((9542, 9559), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (9550, 9559), True, 'import numpy as np\n'), ((9941, 9960), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (9949, 9960), True, 'import numpy as np\n'), ((9962, 9979), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (9970, 9979), True, 'import numpy as np\n'), ((10997, 11040), 'numpy.zeros', 'np.zeros', (['(batch, time_step, embedding_dim)'], {}), '((batch, time_step, embedding_dim))\n', (11005, 11040), True, 'import numpy as np\n'), ((11054, 11089), 'numpy.zeros', 'np.zeros', (['(batch, time_step, units)'], {}), '((batch, time_step, units))\n', (11062, 11089), True, 'import numpy as np\n'), ((11334, 11375), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['cell_units[i]'], {}), '(cell_units[i])\n', (11360, 11375), False, 'import keras\n'), ((11891, 11934), 'numpy.zeros', 'np.zeros', (['(batch, time_step, embedding_dim)'], {}), '((batch, time_step, embedding_dim))\n', (11899, 11934), True, 'import numpy as np\n'), ((11948, 11992), 'numpy.zeros', 'np.zeros', (['(batch, time_step, cell_units[-1])'], {}), '((batch, time_step, cell_units[-1]))\n', (11956, 11992), True, 'import numpy as np\n'), ((12200, 12222), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (12220, 12222), False, 'import keras\n'), ((12249, 12318), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['units'], {'time_major': '(True)', 'return_sequences': '(True)'}), '(units, time_major=True, return_sequences=True)\n', (12271, 12318), False, 'import keras\n'), ((12652, 12695), 'numpy.zeros', 'np.zeros', (['(batch, time_step, embedding_dim)'], {}), '((batch, time_step, embedding_dim))\n', (12660, 12695), True, 'import numpy as np\n'), ((12709, 12744), 'numpy.zeros', 'np.zeros', (['(batch, time_step, units)'], {}), '((batch, time_step, units))\n', (12717, 12744), True, 'import numpy as np\n'), ((13158, 13201), 'numpy.zeros', 'np.zeros', (['(batch, time_step, embedding_dim)'], {}), '((batch, time_step, embedding_dim))\n', (13166, 13201), True, 'import numpy as np\n'), ((13215, 13250), 'numpy.zeros', 'np.zeros', (['(batch, time_step, units)'], {}), '((batch, time_step, units))\n', (13223, 13250), True, 'import numpy as np\n'), ((14427, 14444), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (14435, 14444), True, 'import numpy as np\n'), ((14782, 14829), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (['custom_objects'], {}), '(custom_objects)\n', (14813, 14829), False, 'from keras.utils import generic_utils\n'), ((15155, 15202), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (['custom_objects'], {}), '(custom_objects)\n', (15186, 15202), False, 'from keras.utils import generic_utils\n'), ((15529, 15543), 'keras.layers.rnn.gru.GRUCell', 'gru.GRUCell', (['(8)'], {}), '(8)\n', (15540, 15543), False, 'from keras.layers.rnn import gru\n'), ((16015, 16032), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (16023, 16032), True, 'import numpy as np\n'), ((16171, 16204), 'keras.layers.rnn.gru.GRUCell', 'gru.GRUCell', (['(32)'], {'reset_after': '(True)'}), '(32, reset_after=True)\n', (16182, 16204), False, 'from keras.layers.rnn import gru\n'), ((16556, 16573), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (16564, 16573), True, 'import numpy as np\n'), ((16839, 16886), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (['custom_objects'], {}), '(custom_objects)\n', (16870, 16886), False, 'from keras.utils import generic_utils\n'), ((17677, 17696), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (17685, 17696), True, 'import numpy as np\n'), ((17698, 17715), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (17706, 17715), True, 'import numpy as np\n'), ((17773, 17787), 'keras.layers.rnn.gru.GRUCell', 'gru.GRUCell', (['(8)'], {}), '(8)\n', (17784, 17787), False, 'from keras.layers.rnn import gru\n'), ((18200, 18219), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (18208, 18219), True, 'import numpy as np\n'), ((18221, 18238), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (18229, 18238), True, 'import numpy as np\n'), ((18893, 18910), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (18901, 18910), True, 'import numpy as np\n'), ((19296, 19343), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (['custom_objects'], {}), '(custom_objects)\n', (19327, 19343), False, 'from keras.utils import generic_utils\n'), ((19918, 19965), 'keras.utils.generic_utils.CustomObjectScope', 'generic_utils.CustomObjectScope', (['custom_objects'], {}), '(custom_objects)\n', (19949, 19965), False, 'from keras.utils import generic_utils\n'), ((20843, 20862), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (20851, 20862), True, 'import numpy as np\n'), ((20864, 20881), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (20872, 20881), True, 'import numpy as np\n'), ((20939, 20953), 'keras.layers.rnn.gru.GRUCell', 'gru.GRUCell', (['(8)'], {}), '(8)\n', (20950, 20953), False, 'from keras.layers.rnn import gru\n'), ((21141, 21175), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[6, 8]'], {'dtype': 'tf.float32'}), '([6, 8], dtype=tf.float32)\n', (21149, 21175), True, 'import tensorflow.compat.v2 as tf\n'), ((21189, 21224), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[6, 12]'], {'dtype': 'tf.float32'}), '([6, 12], dtype=tf.float32)\n', (21197, 21224), True, 'import tensorflow.compat.v2 as tf\n'), ((21238, 21273), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[6, 32]'], {'dtype': 'tf.float32'}), '([6, 32], dtype=tf.float32)\n', (21246, 21273), True, 'import tensorflow.compat.v2 as tf\n'), ((21553, 21572), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (21561, 21572), True, 'import numpy as np\n'), ((21574, 21591), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (21582, 21591), True, 'import numpy as np\n'), ((21762, 21786), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(1)'], {}), '(1)\n', (21783, 21786), False, 'import keras\n'), ((21788, 21812), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(1)'], {}), '(1)\n', (21809, 21812), False, 'import keras\n'), ((22582, 22606), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(1)'], {}), '(1)\n', (22603, 22606), False, 'import keras\n'), ((22608, 22632), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(1)'], {}), '(1)\n', (22629, 22632), False, 'import keras\n'), ((22916, 22945), 'tensorflow.compat.v2.ones_like', 'tf.ones_like', (['cells[0].kernel'], {}), '(cells[0].kernel)\n', (22928, 22945), True, 'import tensorflow.compat.v2 as tf\n'), ((25100, 25157), 'numpy.random.random', 'np.random.random', (['(num_samples, timesteps, embedding_dim)'], {}), '((num_samples, timesteps, embedding_dim))\n', (25116, 25157), True, 'import numpy as np\n'), ((25171, 25228), 'numpy.random.random', 'np.random.random', (['(num_samples, timesteps, embedding_dim)'], {}), '((num_samples, timesteps, embedding_dim))\n', (25187, 25228), True, 'import numpy as np\n'), ((26885, 26907), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (26896, 26907), False, 'import keras\n'), ((26962, 26984), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {}), '(cell)\n', (26978, 26984), False, 'import keras\n'), ((27030, 27054), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (27048, 27054), False, 'import keras\n'), ((27287, 27314), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (27303, 27314), True, 'import numpy as np\n'), ((27456, 27492), 'keras.layers.RNN.from_config', 'keras.layers.RNN.from_config', (['config'], {}), '(config)\n', (27484, 27492), False, 'import keras\n'), ((27538, 27562), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (27556, 27562), False, 'import keras\n'), ((27818, 27841), 'keras.layers.RNN', 'keras.layers.RNN', (['cells'], {}), '(cells)\n', (27834, 27841), False, 'import keras\n'), ((27887, 27911), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (27905, 27911), False, 'import keras\n'), ((28145, 28172), 'numpy.random.random', 'np.random.random', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (28161, 28172), True, 'import numpy as np\n'), ((28314, 28350), 'keras.layers.RNN.from_config', 'keras.layers.RNN.from_config', (['config'], {}), '(config)\n', (28342, 28350), False, 'import keras\n'), ((28396, 28420), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (28414, 28420), False, 'import keras\n'), ((29059, 29081), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (29070, 29081), False, 'import keras\n'), ((29112, 29131), 'keras.Input', 'keras.Input', (['(5, 5)'], {}), '((5, 5))\n', (29123, 29131), False, 'import keras\n'), ((28605, 28765), 'keras.testing_infra.test_utils.generate_combinations_with_testcase_name', 'test_utils.generate_combinations_with_testcase_name', ([], {'layer': '[keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM]', 'unroll': '[True, False]'}), '(layer=[keras.layers.\n SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM], unroll=[True,\n False])\n', (28656, 28765), False, 'from keras.testing_infra import test_utils\n'), ((29999, 30021), 'keras.Input', 'keras.Input', (['(None, 5)'], {}), '((None, 5))\n', (30010, 30021), False, 'import keras\n'), ((30052, 30071), 'keras.Input', 'keras.Input', (['(5, 5)'], {}), '((5, 5))\n', (30063, 30071), False, 'import keras\n'), ((29450, 29613), 'keras.testing_infra.test_utils.generate_combinations_with_testcase_name', 'test_utils.generate_combinations_with_testcase_name', ([], {'cell': '[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell]', 'unroll': '[True, False]'}), '(cell=[keras.layers.\n SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell], unroll=[\n True, False])\n', (29501, 29613), False, 'from keras.testing_infra import test_utils\n'), ((30912, 31016), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(3)'], {'dropout': '(0.5)', 'kernel_initializer': '"""ones"""', 'recurrent_initializer': '"""zeros"""'}), "(3, dropout=0.5, kernel_initializer='ones',\n recurrent_initializer='zeros')\n", (30938, 31016), False, 'import keras\n'), ((31785, 31818), 'tensorflow.compat.v2.constant', 'tf.constant', (['(1.0)'], {'shape': '(6, 2, 5)'}), '(1.0, shape=(6, 2, 5))\n', (31796, 31818), True, 'import tensorflow.compat.v2 as tf\n'), ((32254, 32276), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (32274, 32276), True, 'import tensorflow.compat.v2 as tf\n'), ((32887, 32911), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(3)'], {}), '(3)\n', (32908, 32911), False, 'import keras\n'), ((32913, 32937), 'keras.layers.LSTMCell', 'keras.layers.LSTMCell', (['(6)'], {}), '(6)\n', (32934, 32937), False, 'import keras\n'), ((39445, 39467), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (39465, 39467), True, 'import tensorflow.compat.v2 as tf\n'), ((39915, 39961), 'numpy.zeros', 'np.zeros', (['(batch, time_step, input_a, input_b)'], {}), '((batch, time_step, input_a, input_b))\n', (39923, 39961), True, 'import numpy as np\n'), ((39975, 40008), 'numpy.zeros', 'np.zeros', (['(batch, unit_a, unit_b)'], {}), '((batch, unit_a, unit_b))\n', (39983, 40008), True, 'import numpy as np\n'), ((40588, 40634), 'numpy.zeros', 'np.zeros', (['(batch, time_step, input_a, input_b)'], {}), '((batch, time_step, input_a, input_b))\n', (40596, 40634), True, 'import numpy as np\n'), ((40648, 40689), 'numpy.zeros', 'np.zeros', (['(batch, unit_a * 4, unit_b * 4)'], {}), '((batch, unit_a * 4, unit_b * 4))\n', (40656, 40689), True, 'import numpy as np\n'), ((41581, 41614), 'numpy.zeros', 'np.zeros', (['(batch, unit_a, unit_b)'], {}), '((batch, unit_a, unit_b))\n', (41589, 41614), True, 'import numpy as np\n'), ((42492, 42514), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (42512, 42514), True, 'import tensorflow.compat.v2 as tf\n'), ((42928, 42968), 'numpy.zeros', 'np.zeros', (['(batch, time_step, input_size)'], {}), '((batch, time_step, input_size))\n', (42936, 42968), True, 'import numpy as np\n'), ((42982, 43011), 'numpy.zeros', 'np.zeros', (['(batch, input_size)'], {}), '((batch, input_size))\n', (42990, 43011), True, 'import numpy as np\n'), ((43351, 43373), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (43371, 43373), True, 'import tensorflow.compat.v2 as tf\n'), ((43396, 43419), 'keras.Input', 'keras.Input', (['(None, 10)'], {}), '((None, 10))\n', (43407, 43419), False, 'import keras\n'), ((43963, 43992), 'numpy.random.random', 'np.random.random', (['(batch, 10)'], {}), '((batch, 10))\n', (43979, 43992), True, 'import numpy as np\n'), ((52159, 52181), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (52179, 52181), False, 'import keras\n'), ((53028, 53057), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(5)'], {}), '(5)\n', (53054, 53057), False, 'import keras\n'), ((53074, 53093), 'keras.Input', 'keras.Input', (['(5, 5)'], {}), '((5, 5))\n', (53085, 53093), False, 'import keras\n'), ((53113, 53135), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (53133, 53135), False, 'import keras\n'), ((53156, 53247), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'return_sequences': '(True)', 'zero_output_for_mask': '(True)', 'unroll': 'unroll'}), '(cell, return_sequences=True, zero_output_for_mask=True,\n unroll=unroll)\n', (53172, 53247), False, 'import keras\n'), ((53414, 53438), 'keras.models.Model', 'keras.models.Model', (['x', 'y'], {}), '(x, y)\n', (53432, 53438), False, 'import keras\n'), ((53626, 53644), 'numpy.ones', 'np.ones', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (53633, 53644), True, 'import numpy as np\n'), ((55328, 55363), 'keras.layers.LSTM', 'keras.layers.LSTM', (['(1)'], {'stateful': '(True)'}), '(1, stateful=True)\n', (55345, 55363), False, 'import keras\n'), ((56172, 56212), 'keras.layers.SimpleRNN', 'keras.layers.SimpleRNN', (['(1)'], {'stateful': '(True)'}), '(1, stateful=True)\n', (56194, 56212), False, 'import keras\n'), ((56425, 56462), 'keras.layers.RNN', 'keras.layers.RNN', (['cell'], {'stateful': '(True)'}), '(cell, stateful=True)\n', (56441, 56462), False, 'import keras\n'), ((57277, 57316), 'numpy.zeros', 'np.zeros', (['(batch, timesteps, input_dim)'], {}), '((batch, timesteps, input_dim))\n', (57285, 57316), True, 'import numpy as np\n'), ((57330, 57359), 'numpy.zeros', 'np.zeros', (['(batch, output_dim)'], {}), '((batch, output_dim))\n', (57338, 57359), True, 'import numpy as np\n'), ((57393, 57431), 'numpy.ones', 'np.ones', (['(batch, timesteps, input_dim)'], {}), '((batch, timesteps, input_dim))\n', (57400, 57431), True, 'import numpy as np\n'), ((57485, 57523), 'numpy.ones', 'np.ones', (['(batch, timesteps, input_dim)'], {}), '((batch, timesteps, input_dim))\n', (57492, 57523), True, 'import numpy as np\n'), ((57706, 57744), 'numpy.ones', 'np.ones', (['(batch, timesteps, input_dim)'], {}), '((batch, timesteps, input_dim))\n', (57713, 57744), True, 'import numpy as np\n'), ((58115, 58169), 'keras.Input', 'keras.Input', ([], {'shape': '(None, input_dim)', 'batch_size': 'batch'}), '(shape=(None, input_dim), batch_size=batch)\n', (58126, 58169), False, 'import keras\n'), ((58580, 58616), 'keras.Model', 'keras.Model', (['input_layer', 'rnn_output'], {}), '(input_layer, rnn_output)\n', (58591, 58616), False, 'import keras\n'), ((61177, 61192), 'numpy.ones', 'np.ones', (['(4, 2)'], {}), '((4, 2))\n', (61184, 61192), True, 'import numpy as np\n'), ((61229, 61244), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (61236, 61244), True, 'import numpy as np\n'), ((62765, 62804), 'numpy.zeros', 'np.zeros', (['(batch, timesteps, input_dim)'], {}), '((batch, timesteps, input_dim))\n', (62773, 62804), True, 'import numpy as np\n'), ((62818, 62847), 'numpy.zeros', 'np.zeros', (['(batch, output_dim)'], {}), '((batch, output_dim))\n', (62826, 62847), True, 'import numpy as np\n'), ((62881, 62919), 'numpy.ones', 'np.ones', (['(batch, timesteps, input_dim)'], {}), '((batch, timesteps, input_dim))\n', (62888, 62919), True, 'import numpy as np\n'), ((64136, 64158), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (64156, 64158), False, 'import keras\n'), ((64852, 64874), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (64872, 64874), False, 'import keras\n'), ((67273, 67295), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (67293, 67295), False, 'import keras\n'), ((68372, 68394), 'keras.layers.Masking', 'keras.layers.Masking', ([], {}), '()\n', (68392, 68394), False, 'import keras\n'), ((69582, 69623), 'keras.backend.reverse', 'keras.backend.reverse', (['output_ragged', '[1]'], {}), '(output_ragged, [1])\n', (69603, 69623), False, 'import keras\n'), ((62968, 63102), 'keras.testing_infra.test_utils.generate_combinations_with_testcase_name', 'test_utils.generate_combinations_with_testcase_name', ([], {'layer': '[keras.layers.SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM]'}), '(layer=[keras.layers.\n SimpleRNN, gru_v1.GRU, lstm_v1.LSTM, gru.GRU, lstm.LSTM])\n', (63019, 63102), False, 'from keras.testing_infra import test_utils\n'), ((70520, 70539), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (70528, 70539), True, 'import numpy as np\n'), ((70541, 70557), 'numpy.zeros', 'np.zeros', (['(6, 5)'], {}), '((6, 5))\n', (70549, 70557), True, 'import numpy as np\n'), ((75208, 75270), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""bij,ijkl->bkl"""', 'prev_output', 'self.recurring_kernel'], {}), "('bij,ijkl->bkl', prev_output, self.recurring_kernel)\n", (75217, 75270), True, 'import tensorflow.compat.v2 as tf\n'), ((76315, 76347), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[unit_2, unit_3]'], {}), '([unit_2, unit_3])\n', (76329, 76347), True, 'import tensorflow.compat.v2 as tf\n'), ((2465, 2496), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (2494, 2496), False, 'from keras.testing_infra import test_utils\n'), ((2942, 2973), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (2971, 2973), False, 'from keras.testing_infra import test_utils\n'), ((3565, 3603), 'keras.backend.dot', 'keras.backend.dot', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (3582, 3603), False, 'import keras\n'), ((4045, 4076), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (4074, 4076), False, 'from keras.testing_infra import test_utils\n'), ((4653, 4684), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (4682, 4684), False, 'from keras.testing_infra import test_utils\n'), ((5623, 5661), 'keras.backend.dot', 'keras.backend.dot', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (5640, 5661), False, 'import keras\n'), ((6336, 6367), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (6365, 6367), False, 'from keras.testing_infra import test_utils\n'), ((7298, 7329), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (7327, 7329), False, 'from keras.testing_infra import test_utils\n'), ((8884, 8922), 'keras.backend.dot', 'keras.backend.dot', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (8901, 8922), False, 'import keras\n'), ((9449, 9480), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (9478, 9480), False, 'from keras.testing_infra import test_utils\n'), ((9869, 9900), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (9898, 9900), False, 'from keras.testing_infra import test_utils\n'), ((10912, 10943), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (10941, 10943), False, 'from keras.testing_infra import test_utils\n'), ((11806, 11837), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (11835, 11837), False, 'from keras.testing_infra import test_utils\n'), ((12567, 12598), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (12596, 12598), False, 'from keras.testing_infra import test_utils\n'), ((13073, 13104), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (13102, 13104), False, 'from keras.testing_infra import test_utils\n'), ((14301, 14332), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (14330, 14332), False, 'from keras.testing_infra import test_utils\n'), ((14387, 14406), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (14395, 14406), True, 'import numpy as np\n'), ((14408, 14424), 'numpy.zeros', 'np.zeros', (['(6, 3)'], {}), '((6, 3))\n', (14416, 14424), True, 'import numpy as np\n'), ((15889, 15920), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (15918, 15920), False, 'from keras.testing_infra import test_utils\n'), ((15975, 15994), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (15983, 15994), True, 'import numpy as np\n'), ((15996, 16012), 'numpy.zeros', 'np.zeros', (['(6, 3)'], {}), '((6, 3))\n', (16004, 16012), True, 'import numpy as np\n'), ((16430, 16461), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (16459, 16461), False, 'from keras.testing_infra import test_utils\n'), ((16516, 16535), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (16524, 16535), True, 'import numpy as np\n'), ((16537, 16553), 'numpy.zeros', 'np.zeros', (['(6, 3)'], {}), '((6, 3))\n', (16545, 16553), True, 'import numpy as np\n'), ((17605, 17636), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (17634, 17636), False, 'from keras.testing_infra import test_utils\n'), ((18128, 18159), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (18157, 18159), False, 'from keras.testing_infra import test_utils\n'), ((18736, 18767), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (18765, 18767), False, 'from keras.testing_infra import test_utils\n'), ((18822, 18841), 'numpy.zeros', 'np.zeros', (['(6, 5, 5)'], {}), '((6, 5, 5))\n', (18830, 18841), True, 'import numpy as np\n'), ((18843, 18860), 'numpy.zeros', 'np.zeros', (['(6, 32)'], {}), '((6, 32))\n', (18851, 18860), True, 'import numpy as np\n'), ((18862, 18878), 'numpy.zeros', 'np.zeros', (['(6, 3)'], {}), '((6, 3))\n', (18870, 18878), True, 'import numpy as np\n'), ((20771, 20802), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (20800, 20802), False, 'from keras.testing_infra import test_utils\n'), ((21481, 21512), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (21510, 21512), False, 'from keras.testing_infra import test_utils\n'), ((26251, 26289), 'keras.backend.dot', 'keras.backend.dot', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (26268, 26289), False, 'import keras\n'), ((29246, 29277), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (29275, 29277), False, 'from keras.testing_infra import test_utils\n'), ((30182, 30213), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (30211, 30213), False, 'from keras.testing_infra import test_utils\n'), ((31229, 31333), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(3)'], {'dropout': '(0.5)', 'kernel_initializer': '"""ones"""', 'recurrent_initializer': '"""zeros"""'}), "(3, dropout=0.5, kernel_initializer='ones',\n recurrent_initializer='zeros')\n", (31255, 31333), False, 'import keras\n'), ((31446, 31550), 'keras.layers.SimpleRNNCell', 'keras.layers.SimpleRNNCell', (['(3)'], {'dropout': '(0.5)', 'kernel_initializer': '"""ones"""', 'recurrent_initializer': '"""zeros"""'}), "(3, dropout=0.5, kernel_initializer='ones',\n recurrent_initializer='zeros')\n", (31472, 31550), False, 'import keras\n'), ((31889, 31911), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (31909, 31911), True, 'import tensorflow.compat.v2 as tf\n'), ((36588, 36619), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (36617, 36619), False, 'from keras.testing_infra import test_utils\n'), ((36674, 36698), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (36682, 36698), True, 'import numpy as np\n'), ((36700, 36728), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (36708, 36728), True, 'import numpy as np\n'), ((36744, 36769), 'numpy.zeros', 'np.zeros', (['(batch, t, o21)'], {}), '((batch, t, o21))\n', (36752, 36769), True, 'import numpy as np\n'), ((36771, 36801), 'numpy.zeros', 'np.zeros', (['(batch, t, o22, o23)'], {}), '((batch, t, o22, o23))\n', (36779, 36801), True, 'import numpy as np\n'), ((37963, 37994), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (37992, 37994), False, 'from keras.testing_infra import test_utils\n'), ((38049, 38073), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (38057, 38073), True, 'import numpy as np\n'), ((38075, 38103), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (38083, 38103), True, 'import numpy as np\n'), ((38119, 38144), 'numpy.zeros', 'np.zeros', (['(batch, t, o21)'], {}), '((batch, t, o21))\n', (38127, 38144), True, 'import numpy as np\n'), ((38146, 38176), 'numpy.zeros', 'np.zeros', (['(batch, t, o22, o23)'], {}), '((batch, t, o22, o23))\n', (38154, 38176), True, 'import numpy as np\n'), ((38625, 38656), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (38654, 38656), False, 'from keras.testing_infra import test_utils\n'), ((38876, 38910), 'tensorflow.python.training.tracking.util.list_objects', 'trackable_util.list_objects', (['model'], {}), '(model)\n', (38903, 38910), True, 'from tensorflow.python.training.tracking import util as trackable_util\n'), ((39830, 39861), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (39859, 39861), False, 'from keras.testing_infra import test_utils\n'), ((40503, 40534), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (40532, 40534), False, 'from keras.testing_infra import test_utils\n'), ((41352, 41383), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (41381, 41383), False, 'from keras.testing_infra import test_utils\n'), ((41455, 41501), 'numpy.zeros', 'np.zeros', (['(batch, time_step, input_a, input_b)'], {}), '((batch, time_step, input_a, input_b))\n', (41463, 41501), True, 'import numpy as np\n'), ((41519, 41552), 'numpy.zeros', 'np.zeros', (['(batch, unit_a, unit_b)'], {}), '((batch, unit_a, unit_b))\n', (41527, 41552), True, 'import numpy as np\n'), ((42843, 42874), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (42872, 42874), False, 'from keras.testing_infra import test_utils\n'), ((43647, 43663), 'tensorflow.compat.v2.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (43655, 43663), True, 'import tensorflow.compat.v2 as tf\n'), ((45298, 45329), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (45327, 45329), False, 'from keras.testing_infra import test_utils\n'), ((45384, 45408), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (45392, 45408), True, 'import numpy as np\n'), ((45410, 45438), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (45418, 45438), True, 'import numpy as np\n'), ((45454, 45475), 'numpy.zeros', 'np.zeros', (['(batch, o1)'], {}), '((batch, o1))\n', (45462, 45475), True, 'import numpy as np\n'), ((45477, 45502), 'numpy.zeros', 'np.zeros', (['(batch, o2, o3)'], {}), '((batch, o2, o3))\n', (45485, 45502), True, 'import numpy as np\n'), ((46283, 46314), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (46312, 46314), False, 'from keras.testing_infra import test_utils\n'), ((46369, 46393), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (46377, 46393), True, 'import numpy as np\n'), ((46395, 46423), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (46403, 46423), True, 'import numpy as np\n'), ((46439, 46460), 'numpy.zeros', 'np.zeros', (['(batch, o1)'], {}), '((batch, o1))\n', (46447, 46460), True, 'import numpy as np\n'), ((46462, 46487), 'numpy.zeros', 'np.zeros', (['(batch, o2, o3)'], {}), '((batch, o2, o3))\n', (46470, 46487), True, 'import numpy as np\n'), ((47446, 47477), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (47475, 47477), False, 'from keras.testing_infra import test_utils\n'), ((47532, 47556), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (47540, 47556), True, 'import numpy as np\n'), ((47558, 47586), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (47566, 47586), True, 'import numpy as np\n'), ((47602, 47626), 'numpy.zeros', 'np.zeros', (['(batch, t, o1)'], {}), '((batch, t, o1))\n', (47610, 47626), True, 'import numpy as np\n'), ((47628, 47656), 'numpy.zeros', 'np.zeros', (['(batch, t, o2, o3)'], {}), '((batch, t, o2, o3))\n', (47636, 47656), True, 'import numpy as np\n'), ((48478, 48509), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (48507, 48509), False, 'from keras.testing_infra import test_utils\n'), ((48564, 48588), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (48572, 48588), True, 'import numpy as np\n'), ((48590, 48618), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (48598, 48618), True, 'import numpy as np\n'), ((48634, 48658), 'numpy.zeros', 'np.zeros', (['(batch, t, o1)'], {}), '((batch, t, o1))\n', (48642, 48658), True, 'import numpy as np\n'), ((48660, 48688), 'numpy.zeros', 'np.zeros', (['(batch, t, o2, o3)'], {}), '((batch, t, o2, o3))\n', (48668, 48688), True, 'import numpy as np\n'), ((49795, 49826), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (49824, 49826), False, 'from keras.testing_infra import test_utils\n'), ((49898, 49922), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (49906, 49922), True, 'import numpy as np\n'), ((49940, 49968), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (49948, 49968), True, 'import numpy as np\n'), ((49986, 50007), 'numpy.zeros', 'np.zeros', (['(batch, o1)'], {}), '((batch, o1))\n', (49994, 50007), True, 'import numpy as np\n'), ((50025, 50050), 'numpy.zeros', 'np.zeros', (['(batch, o2, o3)'], {}), '((batch, o2, o3))\n', (50033, 50050), True, 'import numpy as np\n'), ((50080, 50104), 'numpy.zeros', 'np.zeros', (['(batch, t, o1)'], {}), '((batch, t, o1))\n', (50088, 50104), True, 'import numpy as np\n'), ((50106, 50134), 'numpy.zeros', 'np.zeros', (['(batch, t, o2, o3)'], {}), '((batch, t, o2, o3))\n', (50114, 50134), True, 'import numpy as np\n'), ((51178, 51209), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (51207, 51209), False, 'from keras.testing_infra import test_utils\n'), ((51281, 51305), 'numpy.zeros', 'np.zeros', (['(batch, t, i1)'], {}), '((batch, t, i1))\n', (51289, 51305), True, 'import numpy as np\n'), ((51323, 51351), 'numpy.zeros', 'np.zeros', (['(batch, t, i2, i3)'], {}), '((batch, t, i2, i3))\n', (51331, 51351), True, 'import numpy as np\n'), ((51369, 51390), 'numpy.zeros', 'np.zeros', (['(batch, o1)'], {}), '((batch, o1))\n', (51377, 51390), True, 'import numpy as np\n'), ((51408, 51433), 'numpy.zeros', 'np.zeros', (['(batch, o2, o3)'], {}), '((batch, o2, o3))\n', (51416, 51433), True, 'import numpy as np\n'), ((51463, 51487), 'numpy.zeros', 'np.zeros', (['(batch, t, o1)'], {}), '((batch, t, o1))\n', (51471, 51487), True, 'import numpy as np\n'), ((51489, 51517), 'numpy.zeros', 'np.zeros', (['(batch, t, o2, o3)'], {}), '((batch, t, o2, o3))\n', (51497, 51517), True, 'import numpy as np\n'), ((52496, 52527), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (52525, 52527), False, 'from keras.testing_infra import test_utils\n'), ((54468, 54499), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (54497, 54499), False, 'from keras.testing_infra import test_utils\n'), ((55531, 55549), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (55538, 55549), True, 'import numpy as np\n'), ((55551, 55566), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (55558, 55566), True, 'import numpy as np\n'), ((55568, 55583), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (55575, 55583), True, 'import numpy as np\n'), ((55641, 55659), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (55648, 55659), True, 'import numpy as np\n'), ((55661, 55676), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (55668, 55676), True, 'import numpy as np\n'), ((55678, 55693), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (55685, 55693), True, 'import numpy as np\n'), ((55780, 55798), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (55787, 55798), True, 'import numpy as np\n'), ((55800, 55815), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (55807, 55815), True, 'import numpy as np\n'), ((55817, 55832), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (55824, 55832), True, 'import numpy as np\n'), ((57192, 57223), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (57221, 57223), False, 'from keras.testing_infra import test_utils\n'), ((57592, 57611), 'numpy.ones', 'np.ones', (['(batch, s)'], {}), '((batch, s))\n', (57599, 57611), True, 'import numpy as np\n'), ((58410, 58486), 'keras.layers.GRU', 'keras.layers.GRU', ([], {'units': 'output_dim', 'return_sequences': '(True)', 'stateful': 'stateful'}), '(units=output_dim, return_sequences=True, stateful=stateful)\n', (58426, 58486), False, 'import keras\n'), ((60740, 60777), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {'dtype': 'dtype'}), '((batch_size, 1), dtype=dtype)\n', (60747, 60777), True, 'import numpy as np\n'), ((62680, 62711), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (62709, 62711), False, 'from keras.testing_infra import test_utils\n'), ((65809, 65840), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (65838, 65840), False, 'from keras.testing_infra import test_utils\n'), ((66323, 66354), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (66352, 66354), False, 'from keras.testing_infra import test_utils\n'), ((70448, 70479), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (70477, 70479), False, 'from keras.testing_infra import test_utils\n'), ((76245, 76277), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[unit_2, unit_3]'], {}), '([unit_2, unit_3])\n', (76259, 76277), True, 'import tensorflow.compat.v2 as tf\n'), ((1902, 1938), 'numpy.random.random', 'np.random.random', (['(input_dim, units)'], {}), '((input_dim, units))\n', (1918, 1938), True, 'import numpy as np\n'), ((2067, 2105), 'keras.backend.dot', 'keras.backend.dot', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (2084, 2105), False, 'import keras\n'), ((3356, 3392), 'numpy.random.random', 'np.random.random', (['(input_dim, units)'], {}), '((input_dim, units))\n', (3372, 3392), True, 'import numpy as np\n'), ((5691, 5744), 'keras.backend.dot', 'keras.backend.dot', (['prev_output', 'self.recurrent_kernel'], {}), '(prev_output, self.recurrent_kernel)\n', (5708, 5744), False, 'import keras\n'), ((8952, 9005), 'keras.backend.dot', 'keras.backend.dot', (['prev_output', 'self.recurrent_kernel'], {}), '(prev_output, self.recurrent_kernel)\n', (8969, 9005), False, 'import keras\n'), ((10253, 10279), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (10265, 10279), True, 'import tensorflow.compat.v2 as tf\n'), ((10735, 10761), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (10747, 10761), True, 'import tensorflow.compat.v2 as tf\n'), ((11245, 11271), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (11257, 11271), True, 'import tensorflow.compat.v2 as tf\n'), ((11630, 11656), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (11642, 11656), True, 'import tensorflow.compat.v2 as tf\n'), ((12132, 12158), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (12144, 12158), True, 'import tensorflow.compat.v2 as tf\n'), ((12389, 12415), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (12401, 12415), True, 'import tensorflow.compat.v2 as tf\n'), ((13419, 13445), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (13431, 13445), True, 'import tensorflow.compat.v2 as tf\n'), ((13656, 13682), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['t', '[1, 0, 2]'], {}), '(t, [1, 0, 2])\n', (13668, 13682), True, 'import tensorflow.compat.v2 as tf\n'), ((23035, 23066), 'keras.engine.base_layer_utils.call_context', 'base_layer_utils.call_context', ([], {}), '()\n', (23064, 23066), False, 'from keras.engine import base_layer_utils\n'), ((26319, 26372), 'keras.backend.dot', 'keras.backend.dot', (['prev_output', 'self.recurrent_kernel'], {}), '(prev_output, self.recurrent_kernel)\n', (26336, 26372), False, 'import keras\n'), ((27175, 27206), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (27204, 27206), False, 'from keras.testing_infra import test_utils\n'), ((28032, 28063), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (28061, 28063), False, 'from keras.testing_infra import test_utils\n'), ((31943, 31986), 'tensorflow.compat.v2.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (31984, 31986), True, 'import tensorflow.compat.v2 as tf\n'), ((53559, 53590), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (53588, 53590), False, 'from keras.testing_infra import test_utils\n'), ((58282, 58310), 'numpy.ones', 'np.ones', (['(batch, output_dim)'], {}), '((batch, output_dim))\n', (58289, 58310), True, 'import numpy as np\n'), ((58737, 58768), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (58766, 58768), False, 'from keras.testing_infra import test_utils\n'), ((76145, 76177), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[unit_2, unit_3]'], {}), '([unit_2, unit_3])\n', (76159, 76177), True, 'import tensorflow.compat.v2 as tf\n')] |
# import packages
from matplotlib.colors import ListedColormap
import numpy as np
# generate my colors
from matplotlib.colors import ListedColormap
# red
Red_colors = np.ones([256,4])
Red_colors[:,1] = np.linspace(1,0,256)
Red_colors[:,2] = np.linspace(1,0,256)
myReds = ListedColormap(Red_colors)
myReds_r = ListedColormap(np.flipud(Red_colors))
# blue
Blue_colors = np.ones([256,4])
Blue_colors[:,0] = np.linspace(1,0,256)
Blue_colors[:,1] = np.linspace(1,0,256)
myBlues = ListedColormap(Blue_colors)
myBlues_r = ListedColormap(np.flipud(Blue_colors))
# green
Green_colors = np.ones([256,4])
Green_colors[:,0] = np.linspace(1,0,256)
Green_colors[:,2] = np.linspace(1,0,256)
myGreens = ListedColormap(Green_colors)
myGreens_r = ListedColormap(np.flipud(Green_colors))
# cmaps
_myCmaps = [myReds, myReds_r,
myBlues, myBlues_r,
myGreens, myGreens_r]
def transparent_cmap(cmap, increasing_alpha=True, N=255, max_alpha=1):
"Copy colormap and set a gradually changing alpha values"
mycmap = cmap
mycmap._init()
if increasing_alpha:
mycmap._lut[:,-1] = np.linspace(0, max_alpha, N+4)
else:
mycmap._lut[:,-1] = np.linspace(max_alpha, 0, N+4)
return mycmap
def black_gradient(color, num_colors=256, max_alpha=1, transparent=False):
"""Generate a black backed linear color map"""
color = np.array(color[:3])
# initialzie colors
_colors = np.zeros([num_colors,4])
for _i, _c in enumerate(color):
_colors[:,_i] = np.linspace(0, _c, num_colors)
if transparent:
_colors[:, -1] = np.linspace(0, max_alpha, num_colors)
else:
_colors[:, -1] = max_alpha
return ListedColormap(_colors)
def transparent_gradient(color, num_colors=256, max_alpha=1):
"""Generate a black backed linear color map"""
color = np.array(color[:3])
# initialzie colors
_colors = np.zeros([num_colors,4])
_colors[:,:3] = color[np.newaxis,:]
# set alpha
_colors[:, -1] = np.linspace(0, max_alpha, num_colors)
return ListedColormap(_colors)
def normlize_color(mat, vmin=None, vmax=None):
"""linearly Normalize extreme colors"""
_mat = np.array(mat).copy()
if vmin is None:
vmin = np.nanmin(_mat)
if vmax is None:
vmax = np.nanmax(_mat)
# based on vmin vmax, do thresholding
_mat[_mat < vmin] = vmin
_mat[_mat > vmax] = vmax
_mat = (_mat - np.nanmin(_mat)) / (np.nanmax(_mat) - np.nanmin(_mat))
return _mat
| [
"numpy.ones",
"numpy.flipud",
"matplotlib.colors.ListedColormap",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.nanmax",
"numpy.nanmin"
] | [((168, 185), 'numpy.ones', 'np.ones', (['[256, 4]'], {}), '([256, 4])\n', (175, 185), True, 'import numpy as np\n'), ((203, 225), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (214, 225), True, 'import numpy as np\n'), ((242, 264), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (253, 264), True, 'import numpy as np\n'), ((272, 298), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['Red_colors'], {}), '(Red_colors)\n', (286, 298), False, 'from matplotlib.colors import ListedColormap\n'), ((369, 386), 'numpy.ones', 'np.ones', (['[256, 4]'], {}), '([256, 4])\n', (376, 386), True, 'import numpy as np\n'), ((405, 427), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (416, 427), True, 'import numpy as np\n'), ((445, 467), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (456, 467), True, 'import numpy as np\n'), ((476, 503), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['Blue_colors'], {}), '(Blue_colors)\n', (490, 503), False, 'from matplotlib.colors import ListedColormap\n'), ((578, 595), 'numpy.ones', 'np.ones', (['[256, 4]'], {}), '([256, 4])\n', (585, 595), True, 'import numpy as np\n'), ((615, 637), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (626, 637), True, 'import numpy as np\n'), ((656, 678), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (667, 678), True, 'import numpy as np\n'), ((688, 716), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['Green_colors'], {}), '(Green_colors)\n', (702, 716), False, 'from matplotlib.colors import ListedColormap\n'), ((325, 346), 'numpy.flipud', 'np.flipud', (['Red_colors'], {}), '(Red_colors)\n', (334, 346), True, 'import numpy as np\n'), ((531, 553), 'numpy.flipud', 'np.flipud', (['Blue_colors'], {}), '(Blue_colors)\n', (540, 553), True, 'import numpy as np\n'), ((745, 768), 'numpy.flipud', 'np.flipud', (['Green_colors'], {}), '(Green_colors)\n', (754, 768), True, 'import numpy as np\n'), ((1358, 1377), 'numpy.array', 'np.array', (['color[:3]'], {}), '(color[:3])\n', (1366, 1377), True, 'import numpy as np\n'), ((1416, 1441), 'numpy.zeros', 'np.zeros', (['[num_colors, 4]'], {}), '([num_colors, 4])\n', (1424, 1441), True, 'import numpy as np\n'), ((1671, 1694), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['_colors'], {}), '(_colors)\n', (1685, 1694), False, 'from matplotlib.colors import ListedColormap\n'), ((1822, 1841), 'numpy.array', 'np.array', (['color[:3]'], {}), '(color[:3])\n', (1830, 1841), True, 'import numpy as np\n'), ((1880, 1905), 'numpy.zeros', 'np.zeros', (['[num_colors, 4]'], {}), '([num_colors, 4])\n', (1888, 1905), True, 'import numpy as np\n'), ((1982, 2019), 'numpy.linspace', 'np.linspace', (['(0)', 'max_alpha', 'num_colors'], {}), '(0, max_alpha, num_colors)\n', (1993, 2019), True, 'import numpy as np\n'), ((2032, 2055), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['_colors'], {}), '(_colors)\n', (2046, 2055), False, 'from matplotlib.colors import ListedColormap\n'), ((1100, 1132), 'numpy.linspace', 'np.linspace', (['(0)', 'max_alpha', '(N + 4)'], {}), '(0, max_alpha, N + 4)\n', (1111, 1132), True, 'import numpy as np\n'), ((1169, 1201), 'numpy.linspace', 'np.linspace', (['max_alpha', '(0)', '(N + 4)'], {}), '(max_alpha, 0, N + 4)\n', (1180, 1201), True, 'import numpy as np\n'), ((1501, 1531), 'numpy.linspace', 'np.linspace', (['(0)', '_c', 'num_colors'], {}), '(0, _c, num_colors)\n', (1512, 1531), True, 'import numpy as np\n'), ((1577, 1614), 'numpy.linspace', 'np.linspace', (['(0)', 'max_alpha', 'num_colors'], {}), '(0, max_alpha, num_colors)\n', (1588, 1614), True, 'import numpy as np\n'), ((2216, 2231), 'numpy.nanmin', 'np.nanmin', (['_mat'], {}), '(_mat)\n', (2225, 2231), True, 'import numpy as np\n'), ((2268, 2283), 'numpy.nanmax', 'np.nanmax', (['_mat'], {}), '(_mat)\n', (2277, 2283), True, 'import numpy as np\n'), ((2159, 2172), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (2167, 2172), True, 'import numpy as np\n'), ((2403, 2418), 'numpy.nanmin', 'np.nanmin', (['_mat'], {}), '(_mat)\n', (2412, 2418), True, 'import numpy as np\n'), ((2423, 2438), 'numpy.nanmax', 'np.nanmax', (['_mat'], {}), '(_mat)\n', (2432, 2438), True, 'import numpy as np\n'), ((2441, 2456), 'numpy.nanmin', 'np.nanmin', (['_mat'], {}), '(_mat)\n', (2450, 2456), True, 'import numpy as np\n')] |
from threading import Lock
import numpy as np
import sklearn
import tensorflow as tf
from tensorflow.keras.models import model_from_json
import tensorflow.keras.backend as K
class LocalModel(object):
"""
Local Model
Each Client has its own model. The Weights will be sent to the Server.
The Server updates a Global Model and sends this back to the clients.
"""
def __init__(self, model_config, data_collected, optimizer):
"""
Create Local Modal.
Retrieved Configuration from server and local client data are applied here.
:param model_config:
:param data_collected:
:param optimizer:
"""
assert optimizer is not None, "please prove a optimizer dict"
assert optimizer['loss'] is not None, "a loss function must be set in the optimizer dict"
assert optimizer['metrics'] is not None, "a metric must be defined in the optimizer dict"
assert optimizer['optimizer'] is not None, "a optimizer must be defined in the optimizer dict"
assert data_collected["x_train"] is not None, "X matrix for training must be provided"
assert data_collected["y_train"] is not None, "y vector for training must be provided"
assert data_collected["x_test"] is not None, "X matrix for testing must be provided"
assert data_collected["y_test"] is not None, "y vector for testing must be provided"
self.model_config = model_config
self.update_lock = Lock()
self.graph = tf.Graph()
with self.graph.as_default():
self.session = tf.Session()
with self.session.as_default():
self.model = model_from_json(model_config['model_json'])
if len(self.model.layers) >= 32: # hardcoded if model is lfw
for l in self.model.layers[:-4]:
l.trainable = False
self.optimizer = optimizer
self.model.compile(loss=optimizer['loss'],
optimizer=optimizer['optimizer'](),
metrics=optimizer['metrics'])
self.model._make_predict_function()
self.x_train = np.array(data_collected["x_train"])
self.y_train = np.array(data_collected["y_train"])
self.x_test = np.array(data_collected["x_test"])
self.y_test = np.array(data_collected["y_test"])
def get_weights(self):
"""
Get Keras Model Weights
:return: weights
"""
return self.model.get_weights()
def set_weights(self, new_weights):
"""
Sets the Keras model Weights
:param new_weights:
:return:
"""
with self.update_lock:
with self.graph.as_default():
with self.session.as_default():
self.model.set_weights(new_weights)
def get_batch(self, x, y):
"""
Returns a random training batch
:return: Training Batch
"""
x, y = sklearn.utils.shuffle(x, y)
residual = (len(x) % self.model_config['batch_size'])
return x[:-residual], y[:-residual]
def train_one_round(self):
"""
Train one round
:return: weights and score
"""
x_train, y_train = self.get_batch(self.x_train, self.y_train)
with self.update_lock:
with self.graph.as_default():
with self.session.as_default():
self.model.fit(x_train, y_train,
epochs=self.model_config['epoch_per_round'],
batch_size=self.model_config['batch_size'],
verbose=1,
validation_data=(x_train, y_train))
score = self.model.evaluate(x_train, y_train, batch_size=self.model_config['batch_size'], verbose=0)
score[0] = np.mean(score[0])
print('Train loss:', score[0])
print('Train accuracy:', score[1])
return self.model.get_weights(), score[0], score[1]
def evaluate(self):
"""
Evaluation fo Test set after global model converged
:return:
"""
with self.update_lock:
with self.graph.as_default():
with self.session.as_default():
x_test, y_test = self.get_batch(self.x_test, self.y_test)
score = self.model.evaluate(x_test, y_test, batch_size=self.model_config['batch_size'], verbose=0)
score[0] = np.mean(score[0])
print('Test loss:', score[0])
print('Test accuracy:', score[1])
return score
def save_model(self, path, cid):
print(f'saving local model to {path}/{cid}_local_model.h5')
with self.update_lock:
with self.graph.as_default():
with self.session.as_default():
self.model.save(f'{path}/{cid}_local_model.h5')
def save(self, path, cid, train_indices, test_indices):
"""
Save Global Model wrt. to client
:param path:
:param cid:
:param train_indices:
:param test_indices:
:param validation_indices:
:return:
"""
save_indices = {"train": train_indices, "test": test_indices}
print(f'saving indices to {path}/{cid}_indices.npy')
np.save(f'{path}/{cid}_indices.npy', save_indices)
self.save_model(path, cid)
| [
"tensorflow.Graph",
"numpy.mean",
"tensorflow.keras.models.model_from_json",
"threading.Lock",
"sklearn.utils.shuffle",
"tensorflow.Session",
"numpy.array",
"numpy.save"
] | [((1485, 1491), 'threading.Lock', 'Lock', ([], {}), '()\n', (1489, 1491), False, 'from threading import Lock\n'), ((1513, 1523), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1521, 1523), True, 'import tensorflow as tf\n'), ((2207, 2242), 'numpy.array', 'np.array', (["data_collected['x_train']"], {}), "(data_collected['x_train'])\n", (2215, 2242), True, 'import numpy as np\n'), ((2266, 2301), 'numpy.array', 'np.array', (["data_collected['y_train']"], {}), "(data_collected['y_train'])\n", (2274, 2301), True, 'import numpy as np\n'), ((2324, 2358), 'numpy.array', 'np.array', (["data_collected['x_test']"], {}), "(data_collected['x_test'])\n", (2332, 2358), True, 'import numpy as np\n'), ((2381, 2415), 'numpy.array', 'np.array', (["data_collected['y_test']"], {}), "(data_collected['y_test'])\n", (2389, 2415), True, 'import numpy as np\n'), ((3032, 3059), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['x', 'y'], {}), '(x, y)\n', (3053, 3059), False, 'import sklearn\n'), ((5504, 5554), 'numpy.save', 'np.save', (['f"""{path}/{cid}_indices.npy"""', 'save_indices'], {}), "(f'{path}/{cid}_indices.npy', save_indices)\n", (5511, 5554), True, 'import numpy as np\n'), ((1589, 1601), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1599, 1601), True, 'import tensorflow as tf\n'), ((1675, 1718), 'tensorflow.keras.models.model_from_json', 'model_from_json', (["model_config['model_json']"], {}), "(model_config['model_json'])\n", (1690, 1718), False, 'from tensorflow.keras.models import model_from_json\n'), ((3954, 3971), 'numpy.mean', 'np.mean', (['score[0]'], {}), '(score[0])\n', (3961, 3971), True, 'import numpy as np\n'), ((4625, 4642), 'numpy.mean', 'np.mean', (['score[0]'], {}), '(score[0])\n', (4632, 4642), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
import numpy as np
input_map = np.array([np.char.array(line.strip().encode('us-ascii'), unicode=False).view('u1', np.ndarray) - ord('0') for line in sys.stdin if line])
h, w = input_map.shape
heightmap = np.zeros((h+2, w+2), dtype='u1')
heightmap += 9
main_map = heightmap[1:-1,1:-1]
main_map[:,:] = input_map
is_low = main_map < heightmap[:-2,1:-1]
is_low &= main_map < heightmap[2:,1:-1]
is_low &= main_map < heightmap[1:-1,:-2]
is_low &= main_map < heightmap[1:-1,2:]
print(np.sum(main_map[is_low] + 1))
| [
"numpy.sum",
"numpy.zeros"
] | [((240, 276), 'numpy.zeros', 'np.zeros', (['(h + 2, w + 2)'], {'dtype': '"""u1"""'}), "((h + 2, w + 2), dtype='u1')\n", (248, 276), True, 'import numpy as np\n'), ((513, 541), 'numpy.sum', 'np.sum', (['(main_map[is_low] + 1)'], {}), '(main_map[is_low] + 1)\n', (519, 541), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This is a process class whose evolve_state method is called
at each timestep.
An instance of MRNAExport is initialized once per Simulation
with the State as input. Only the state 'mRNAs' is modified in
this process.
At each timestep, for an mRNA strand x, for all x, evolve_state reads
the amount of x in the nucleus and cytoplasm and writes back new values
of x in the nucleus and cytoplasm depending on how much of the particular
strand is set to be exported from the nucleus.
All types of:
-multi-spliced mRNAs (17)
-8-Rev-bound single-spliced mRNAs (7)
-8-Rev-bound full-length mRNAs (1)
have a chance to be exported.
Summary of the biology:
This process takes into account the export of mRNA strands from the nucleus to
the cytoplasm at each timestep.
The HIV mRNA transcript has an RRE (Rev Response Element) located between splice
donor 4 (D4) and splice acceptor 7 (A7). This RRE must bind a threshold number of
Rev molecules to enable mRNA export to the cytoplasm. Multi-spliced mRNA transcripts
have excised this RRE element (D4-A7 splice), and therefore may be exported
immediatly to the cytoplasm. Multi-spliced mRNAs are therefore called
"Rev-independent" mRNAS. The full length and single-spliced mRNAs still have
their RRE elements, and therefore are "Rev-dependent". It is unclear from the
literature, how many Rev molecules must be bound for export. We know that the
value is >1 and <= the max number of bound Rev. The requirement for Rev
binding essentially poses a delay on certain transcripts getting to the
cytoplasm. The Rev-independent transcripts start translation before the
Rev-dependent transcripts.
#References:
#1. Pond, <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2009). HIV-1 Rev protein assembles on viral RNA one molecule at a time. Proceedings of the National Academy of Sciences of the United States of America, 106(5), 1404–8. doi:10.1073/pnas.0807388106
#2. <NAME>., <NAME>. (2005) In silico mutagenesis of RNA Splicing in HIV-1. Biotechnology and bioengineering 91: 877-893.
"""
import numpy as np
from mainaux.State import State
from mainaux.Process import Process
from process.RevBinding import RevBinding
from mainaux.InitParamValues import *
#This is a Process Class
class MRNAExport(Process):
def __init__(self, state, param_dict=None):
self.state = state
if param_dict==None:
param_dict = generate_param_dict();
#Constant parameters
self.NUM_OF_REV_REQ_FOR_EXPORT = param_dict['NUM_OF_REV_REQ_FOR_EXPORT'] #fittable value. Pond et al. state it is >1.
self.PROB_REV_INDEP_EXPORT = param_dict['PROB_REV_INDEP_EXPORT'] #1/min #Kim, H., <NAME>. (2005)
self.PROB_REV_DEP_EXPORT = param_dict['PROB_REV_DEP_EXPORT'] #1/min #<NAME>., <NAME>. (2005)
self.MAX_REV_PER_TRANSCRIPT = param_dict['MAX_REV_PER_TRANSCRIPT']
def nuclear_export(self, what_may_be_exported, abundances_nuc, abundances_cyt, export_rate):
#what_may_be_exported = array/list of indexes in abundances_nuc/Cyt of constructs to export
#abundances_nuc = starting abundances of things to be exported
#abundances_cyt = starting abundances of things in the destination location
#export_rate = rate of export
for i in what_may_be_exported:
temp_rand = np.random.rand(abundances_nuc[i])
#count how many are less than the export probability, and do accounting
decrement_amount = (temp_rand<export_rate).sum()
abundances_cyt[i]=abundances_cyt[i]+decrement_amount
abundances_nuc[i]=abundances_nuc[i]-decrement_amount
return [abundances_nuc, abundances_cyt]
def evolve_state(self, timestep):
#get variables
mRNA_state = self.state.get_state('mRNAs')
full_len_transcripts_nuc = mRNA_state.full_len_transcripts_nuc
full_len_transcripts_cyt = mRNA_state.full_len_transcripts_cyt
single_splice_transcript_nuc = mRNA_state.single_splice_transcript_nuc
single_splice_transcript_cyt = mRNA_state.single_splice_transcript_cyt
multi_splice_transcript_nuc = mRNA_state.multi_splice_transcript_nuc
multi_splice_transcript_cyt = mRNA_state.multi_splice_transcript_cyt
#Part A. Rev Independent Export
#The fully spliced transcripts stored in multi_splice_transcript_nuc do not have a RRE (Rev binding) element
#and therefore can be exported without rev
what_may_be_exported = range(np.size(multi_splice_transcript_nuc))
[multi_splice_transcript_nuc, multi_splice_transcript_cyt] = self.nuclear_export(what_may_be_exported, multi_splice_transcript_nuc, multi_splice_transcript_cyt, self.PROB_REV_INDEP_EXPORT)
#Part B. Rev Dependent Export
#The unspliced and singly spliced transcripts stored in full/single_splice_transcript_nuc have a RRE (Rev binding) element
#and therefore cannot be exported without rev
#start with full transcripts
what_may_be_exported = np.arange(self.NUM_OF_REV_REQ_FOR_EXPORT, self.MAX_REV_PER_TRANSCRIPT+1) # Contains only one value right now
[full_len_transcripts_nuc, full_len_transcripts_cyt] = self.nuclear_export(what_may_be_exported, full_len_transcripts_nuc, full_len_transcripts_cyt, self.PROB_REV_DEP_EXPORT)
#single splice transcripts
what_may_be_exported = np.arange((self.NUM_OF_REV_REQ_FOR_EXPORT*7),(7*(self.MAX_REV_PER_TRANSCRIPT+1))) # Contains indices of 8-Rev elements
what_may_be_exported = what_may_be_exported[np.where(single_splice_transcript_nuc[what_may_be_exported]>0)[0]]
[single_splice_transcript_nuc, single_splice_transcript_cyt] = self.nuclear_export(what_may_be_exported, single_splice_transcript_nuc, single_splice_transcript_cyt, self.PROB_REV_DEP_EXPORT)
#write back parameters to state object
mRNA_state.full_len_transcripts_nuc = full_len_transcripts_nuc
mRNA_state.single_splice_transcript_nuc = single_splice_transcript_nuc
mRNA_state.full_len_transcripts_cyt = full_len_transcripts_cyt
mRNA_state.single_splice_transcript_cyt = single_splice_transcript_cyt
mRNA_state.multi_splice_transcript_nuc = multi_splice_transcript_nuc
mRNA_state.multi_splice_transcript_cyt = multi_splice_transcript_cyt
#update state to new values
#self.state.set_state('mRNAs', mRNA_state)
| [
"numpy.where",
"numpy.size",
"numpy.random.rand",
"numpy.arange"
] | [((5093, 5167), 'numpy.arange', 'np.arange', (['self.NUM_OF_REV_REQ_FOR_EXPORT', '(self.MAX_REV_PER_TRANSCRIPT + 1)'], {}), '(self.NUM_OF_REV_REQ_FOR_EXPORT, self.MAX_REV_PER_TRANSCRIPT + 1)\n', (5102, 5167), True, 'import numpy as np\n'), ((5451, 5540), 'numpy.arange', 'np.arange', (['(self.NUM_OF_REV_REQ_FOR_EXPORT * 7)', '(7 * (self.MAX_REV_PER_TRANSCRIPT + 1))'], {}), '(self.NUM_OF_REV_REQ_FOR_EXPORT * 7, 7 * (self.\n MAX_REV_PER_TRANSCRIPT + 1))\n', (5460, 5540), True, 'import numpy as np\n'), ((3360, 3393), 'numpy.random.rand', 'np.random.rand', (['abundances_nuc[i]'], {}), '(abundances_nuc[i])\n', (3374, 3393), True, 'import numpy as np\n'), ((4557, 4593), 'numpy.size', 'np.size', (['multi_splice_transcript_nuc'], {}), '(multi_splice_transcript_nuc)\n', (4564, 4593), True, 'import numpy as np\n'), ((5622, 5686), 'numpy.where', 'np.where', (['(single_splice_transcript_nuc[what_may_be_exported] > 0)'], {}), '(single_splice_transcript_nuc[what_may_be_exported] > 0)\n', (5630, 5686), True, 'import numpy as np\n')] |
def find_max_difference(datafile1, datafile2):
'''A function to find absolute differences between mass fraction in two datafiles.
Inputs: datafile1 = ts file to be compared
datafile2 = second ts file to be compared
Output: largest = list of n largest differences
names = list of nuclide names corresponding to items in largest
times = list of times corresponding to items in largest (list of times where largest difference occurs
'''
import numpy as np
import read_ts_file as rtf
import plot_time_mf_2files as plt2
import heapq
#Read data file, change variable names.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile1)
zz1, aa1, xmf1, time1, temperature1, density1, timestep1, edot1, flx_end1, flx1 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Set certain constraints based on the size and shape of the mass fraction array.
num_species_total = np.shape(xmf1)[1]
num_timesteps = np.shape(xmf1)[0]
n = num_species_total
#Read the second data file.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile2)
#Make lists of elements and nuclear names for later use.
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz,aa)
#Change variable names.
zz2, aa2, xmf2, time2, temperature2, density2, timestep2, edot2, flx_end2, flx2 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Make empty list for maximum differences.
max_diff = []
for counter in np.arange(0, num_species_total): #Count through number of species.
for counter2 in np.arange(0, num_timesteps): #Count through number of timesteps.
diff_list = [] #Make empty list for differences.
diff = float(xmf1[counter2][counter]) - float(xmf2[counter2][counter]) #Find differences, add to list
diff = abs(diff)
diff_list.append(diff)
max_diff.append(max(diff_list)) #Add largest difference to max_diff list
largest = heapq.nlargest(n, max_diff) #Make list of all maximum differences, from smallest to largest. (max_diff but sorted)
names = [] #Make empty list for names to fill into
times = [] #Make empty list for times to fill into
for item in largest: #Assign relevant name and time to each difference.
foo = max_diff.index(item)
name = nuc_name[foo]
times.append(time[foo])
names.append(name)
#Print results.
for counter in np.arange(0, n):
print("%s %s %s %s" % (counter + 1, names[counter], largest[counter], times[counter]))
def find_final_difference(datafile1, datafile2, print_values = True):
'''A function to find final absolute difference between mass fraction in two datafiles.
Inputs: datafile1 = ts file to be compared
datafile2 = second ts file to be compared
print_values = default to True, if True will print differences, if not will return lists of differences and corresponding nuclide names
Output: largest = list of n largest differences
names = list of nuclide names corresponding to items in largest
'''
import numpy as np
import read_ts_file as rtf
import heapq
#Read ts file, rename variables.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile1)
zz1, aa1, xmf1, time1, temperature1, density1, timestep1, edot1, flx_end1, flx1 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Set certain parameters based on the data.
num_species_total = np.shape(xmf1)[1]
num_timesteps = np.shape(xmf1)[0]
#Read the second ts file, rename variables.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile2)
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz,aa)
zz2, aa2, xmf2, time2, temperature2, density2, timestep2, edot2, flx_end2, flx2 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Make empty list for maximum differences.
max_diff = []
for counter in np.arange(0, num_species_total): #Count through number of species.
diff_list = [] #Make empty list for differences.
diff = float(xmf1[-1][counter]) - float(xmf2[-1][counter]) #Find differences at end (accessed by -1), add to list.
diff = abs(diff)
diff_list.append(diff)
max_diff.append(max(diff_list)) #Add largest difference to max_diff list.
largest = heapq.nlargest(n, max_diff) #list of final absolute differences, from largest to smallest
names = [] #Make empty list for names to fill in to.
for item in largest: #Assign relevant name to each difference.
foo = max_diff.index(item)
name = nuc_name[foo]
names.append(name)
#Either print or return largest and names.
if print_values == True:
for counter in np.arange(0, n):
print("%s %s %s" % (counter + 1, names[counter], largest[counter]))
else:
return largest, names
def find_point_difference(datafile1, datafile2, t):
'''A function to find differences between mass fraction in two datafiles at a specific timestep.
Inputs: datafile1 = ts file to be compared
datafile2 = second ts file to be compared
t = timestep desired
Output: largest = list of n largest differences
names = list of nuclide names corresponding to items in largest
'''
import numpy as np
import read_ts_file as rtf
import heapq
#Read datafile, rename variables.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile1)
zz1, aa1, xmf1, time1, temperature1, density1, timestep1, edot1, flx_end1, flx1 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Set certain parameters based on the data.
num_species_total = np.shape(xmf1)[1]
num_timesteps = np.shape(xmf1)[0]
#Read second ts file, rename and use variables.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile2)
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz,aa)
zz2, aa2, xmf2, time2, temperature2, density2, timestep2, edot2, flx_end2, flx2 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
diff_list = [] #Make empty list for differences.
for counter in np.arange(0, num_species_total): #Count through number of species.
diff = float(xmf1[t][counter]) - float(xmf2[t][counter]) #Find each difference at specified timestep, add to list.
diff = abs(diff)
diff_list.append(diff)
largest = heapq.nlargest(n, diff_list) #Rearrange diff_list into largest to smallest order.
names = [] #Make empty list for names to fill in to.
for item in largest: #Assign relevant name to each difference.
foo = diff_list.index(item)
name = nuc_name[foo]
names.append(name)
return largest, names #Return lists of differences and names.
#Print results.
for counter in np.arange(0, n):
print("%s %s %s" % (counter + 1, names[counter], largest[counter]))
def per_diff(datafile1, datafile2):
'''A function to find final percent difference between two data sets of mass fraction.
Inputs: datafile1 = ts file to be compared
datafile2 = second ts file to be compared
Output: smallest = list of differences from smallest to largest
names = list of nuclide names corresponding to items in largest
'''
import numpy as np
import read_ts_file as rtf
import heapq
#Take in data from the first file, give it variable names.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile1)
zz1, aa1, xmf1, time1, temperature1, density1, timestep1, edot1, flx_end1, flx1 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Set certain constraints based on the input data. (How many species, how many timesteps, etc)
num_species_total = np.shape(xmf1)[1]
num_timesteps = np.shape(xmf1)[0]
n = num_species_total
#Take in data from the second file.
zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx = rtf.read_ts_file(datafile2)
#Make lists of elements and nuclear names for later use.
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz,aa)
#Change variable names.
zz2, aa2, xmf2, time2, temperature2, density2, timestep2, edot2, flx_end2, flx2 = zz, aa, xmf, time, temperature, density, timestep, edot, flx_end, flx
#Set certain constraints based on the new set of input data.
num_species_total2 = np.shape(xmf2)[1]
num_timesteps2 = np.shape(xmf2)[0]
#Create an empty list for the maximum percentage differences.
max_per_diff = []
for counter in np.arange(0, num_species_total): #count through each column (species)
avg = (float((xmf1[-1][counter]) + float(xmf2[-1][counter]))/2) #take the average of each item, -1 as an index accesses the last entry, so it's end time
if avg == 0.0: #if average is zero, make it something calculable
avg = 10E-20
diff_list = [] #make empty list for differences
diff = float(xmf1[-1][counter]) - float(xmf2[-1][counter]) #calculate differences and add to list
diff = abs(diff)
diff_list.append(diff)
per_diff_list = [] #create list of percent differences
for item in diff_list: #calculate percent differences, add to list
per_diff = item / avg
per_diff_list.append(per_diff)
max_per_diff.append(max(per_diff_list)) #find largest percentage differences and add to list
smallest = heapq.nsmallest(n, max_per_diff) #make list of smallest percentage differences (remember that n = num_species_total, so this is a list of all perdiffs in ascending order)
names = [] #make empty list of names
for item in smallest: #assign relevant name to each percentage difference
foo = max_per_diff.index(item)
name = nuc_name[foo]
names.append(name)
for counter in np.arange(0, n):
print("%s %s %s" % (counter + 1, names[counter], smallest[counter]))
def compare_final(datafile1, datafile2):
'''A function to compare one datafile relative to another
Inputs: datafile1 = ts file to be compared (reported errors are relative to this file)
datafile2 = second ts file to be compared
Output: rerr_sort = list of relative errors from smallest to largest
names = list of nuclide names corresponding to items in rerr_sort
'''
import numpy as np
import read_ts_file as rtf
import heapq
#Take in data from the first file, give it variable names.
zz1, aa1, xmf1, time1, temperature1, density1, timestep1, edot1, flx_end1, flx1 = rtf.read_ts_file(datafile1)
en1 = np.multiply(edot1,timestep1)
enuc1 = np.cumsum(en1)
#Set certain constraints based on the input data. (How many species, how many timesteps, etc)
num_species_total = np.shape(xmf1)[1]
num_timesteps = np.shape(xmf1)[0]
#Take in data from the second file.
zz2, aa2, xmf2, time2, temperature2, density2, timestep2, edot2, flx_end2, flx2 = rtf.read_ts_file(datafile2)
en2 = np.multiply(edot2,timestep2)
enuc2 = np.cumsum(en2)
#Make lists of elements and nuclear names for later use.
element = rtf.build_element_symbol()
nuc_name = rtf.build_isotope_symbol(zz2,aa2)
#Set certain constraints based on the new set of input data.
num_species_total2 = np.shape(xmf2)[1]
#Create lists for the differences
aerr = np.abs(np.subtract(xmf2[-1][:],xmf1[-1][:]))
rerr = np.divide(aerr,np.abs(xmf1[-1][:]))
isort = np.argsort(rerr)
aerr_sort = aerr[isort]
rerr_sort = rerr[isort]
xmf1_sort = xmf1[-1][isort]
xmf2_sort = xmf2[-1][isort]
name_sort = [nuc_name[i] for i in isort]
print("%5s %5s\t%10s\t%16s\t%16s\t%16s\t%16s" % ('i','isort','name','X1','X2','|dX|','|dX| / |X1|'))
fmt = "%5s %5s\t%10s\t%16.8e\t%16.8e\t%16.8e\t%16.8e"
for i in np.arange(0, num_species_total):
print(fmt % (i+1, isort[i]+1, name_sort[i], xmf1_sort[i], xmf2_sort[i], aerr_sort[i], rerr_sort[i]))
print("")
fmt = "%5s %5s\t%10s\t%16s\t%16s\t%16.8e\t%16.8e"
aerr_norm = np.linalg.norm(aerr,ord=2)
rerr_norm = np.divide(aerr_norm,np.linalg.norm(xmf1[-1][:],ord=2))
print(fmt % ('', '', '2-norm', '', '', aerr_norm, rerr_norm))
fmt = "%5s %5s\t%10s\t%16.8e\t%16.8e\t%16.8e\t%16.8e"
aerr = np.abs(np.subtract(temperature2[-1], temperature1[-1]))
rerr = np.divide(aerr, np.abs(temperature1[-1]))
print(fmt % ('', '', 'T', temperature1[-1], temperature2[-1], aerr, rerr))
aerr = np.abs(np.subtract(enuc2[-1], enuc1[-1]))
rerr = np.divide(aerr, np.abs(enuc1[-1]))
print(fmt % ('', '', 'E_nuc', enuc1[-1], enuc2[-1], aerr, rerr))
| [
"numpy.abs",
"numpy.multiply",
"numpy.linalg.norm",
"numpy.subtract",
"numpy.argsort",
"heapq.nlargest",
"read_ts_file.read_ts_file",
"read_ts_file.build_element_symbol",
"heapq.nsmallest",
"read_ts_file.build_isotope_symbol",
"numpy.cumsum",
"numpy.shape",
"numpy.arange"
] | [((750, 777), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile1'], {}), '(datafile1)\n', (766, 777), True, 'import read_ts_file as rtf\n'), ((1236, 1263), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile2'], {}), '(datafile2)\n', (1252, 1263), True, 'import read_ts_file as rtf\n'), ((1340, 1366), 'read_ts_file.build_element_symbol', 'rtf.build_element_symbol', ([], {}), '()\n', (1364, 1366), True, 'import read_ts_file as rtf\n'), ((1382, 1414), 'read_ts_file.build_isotope_symbol', 'rtf.build_isotope_symbol', (['zz', 'aa'], {}), '(zz, aa)\n', (1406, 1414), True, 'import read_ts_file as rtf\n'), ((1683, 1714), 'numpy.arange', 'np.arange', (['(0)', 'num_species_total'], {}), '(0, num_species_total)\n', (1692, 1714), True, 'import numpy as np\n'), ((2174, 2201), 'heapq.nlargest', 'heapq.nlargest', (['n', 'max_diff'], {}), '(n, max_diff)\n', (2188, 2201), False, 'import heapq\n'), ((2638, 2653), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (2647, 2653), True, 'import numpy as np\n'), ((3527, 3554), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile1'], {}), '(datafile1)\n', (3543, 3554), True, 'import read_ts_file as rtf\n'), ((3972, 3999), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile2'], {}), '(datafile2)\n', (3988, 3999), True, 'import read_ts_file as rtf\n'), ((4014, 4040), 'read_ts_file.build_element_symbol', 'rtf.build_element_symbol', ([], {}), '()\n', (4038, 4040), True, 'import read_ts_file as rtf\n'), ((4056, 4088), 'read_ts_file.build_isotope_symbol', 'rtf.build_isotope_symbol', (['zz', 'aa'], {}), '(zz, aa)\n', (4080, 4088), True, 'import read_ts_file as rtf\n'), ((4332, 4363), 'numpy.arange', 'np.arange', (['(0)', 'num_species_total'], {}), '(0, num_species_total)\n', (4341, 4363), True, 'import numpy as np\n'), ((4736, 4763), 'heapq.nlargest', 'heapq.nlargest', (['n', 'max_diff'], {}), '(n, max_diff)\n', (4750, 4763), False, 'import heapq\n'), ((5925, 5952), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile1'], {}), '(datafile1)\n', (5941, 5952), True, 'import read_ts_file as rtf\n'), ((6374, 6401), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile2'], {}), '(datafile2)\n', (6390, 6401), True, 'import read_ts_file as rtf\n'), ((6416, 6442), 'read_ts_file.build_element_symbol', 'rtf.build_element_symbol', ([], {}), '()\n', (6440, 6442), True, 'import read_ts_file as rtf\n'), ((6458, 6490), 'read_ts_file.build_isotope_symbol', 'rtf.build_isotope_symbol', (['zz', 'aa'], {}), '(zz, aa)\n', (6482, 6490), True, 'import read_ts_file as rtf\n'), ((6719, 6750), 'numpy.arange', 'np.arange', (['(0)', 'num_species_total'], {}), '(0, num_species_total)\n', (6728, 6750), True, 'import numpy as np\n'), ((6980, 7008), 'heapq.nlargest', 'heapq.nlargest', (['n', 'diff_list'], {}), '(n, diff_list)\n', (6994, 7008), False, 'import heapq\n'), ((7384, 7399), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (7393, 7399), True, 'import numpy as np\n'), ((8079, 8106), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile1'], {}), '(datafile1)\n', (8095, 8106), True, 'import read_ts_file as rtf\n'), ((8590, 8617), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile2'], {}), '(datafile2)\n', (8606, 8617), True, 'import read_ts_file as rtf\n'), ((8698, 8724), 'read_ts_file.build_element_symbol', 'rtf.build_element_symbol', ([], {}), '()\n', (8722, 8724), True, 'import read_ts_file as rtf\n'), ((8740, 8772), 'read_ts_file.build_isotope_symbol', 'rtf.build_isotope_symbol', (['zz', 'aa'], {}), '(zz, aa)\n', (8764, 8772), True, 'import read_ts_file as rtf\n'), ((9218, 9249), 'numpy.arange', 'np.arange', (['(0)', 'num_species_total'], {}), '(0, num_species_total)\n', (9227, 9249), True, 'import numpy as np\n'), ((10101, 10133), 'heapq.nsmallest', 'heapq.nsmallest', (['n', 'max_per_diff'], {}), '(n, max_per_diff)\n', (10116, 10133), False, 'import heapq\n'), ((10506, 10521), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (10515, 10521), True, 'import numpy as np\n'), ((11239, 11266), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile1'], {}), '(datafile1)\n', (11255, 11266), True, 'import read_ts_file as rtf\n'), ((11277, 11306), 'numpy.multiply', 'np.multiply', (['edot1', 'timestep1'], {}), '(edot1, timestep1)\n', (11288, 11306), True, 'import numpy as np\n'), ((11318, 11332), 'numpy.cumsum', 'np.cumsum', (['en1'], {}), '(en1)\n', (11327, 11332), True, 'import numpy as np\n'), ((11643, 11670), 'read_ts_file.read_ts_file', 'rtf.read_ts_file', (['datafile2'], {}), '(datafile2)\n', (11659, 11670), True, 'import read_ts_file as rtf\n'), ((11681, 11710), 'numpy.multiply', 'np.multiply', (['edot2', 'timestep2'], {}), '(edot2, timestep2)\n', (11692, 11710), True, 'import numpy as np\n'), ((11722, 11736), 'numpy.cumsum', 'np.cumsum', (['en2'], {}), '(en2)\n', (11731, 11736), True, 'import numpy as np\n'), ((11817, 11843), 'read_ts_file.build_element_symbol', 'rtf.build_element_symbol', ([], {}), '()\n', (11841, 11843), True, 'import read_ts_file as rtf\n'), ((11859, 11893), 'read_ts_file.build_isotope_symbol', 'rtf.build_isotope_symbol', (['zz2', 'aa2'], {}), '(zz2, aa2)\n', (11883, 11893), True, 'import read_ts_file as rtf\n'), ((12157, 12173), 'numpy.argsort', 'np.argsort', (['rerr'], {}), '(rerr)\n', (12167, 12173), True, 'import numpy as np\n'), ((12517, 12548), 'numpy.arange', 'np.arange', (['(0)', 'num_species_total'], {}), '(0, num_species_total)\n', (12526, 12548), True, 'import numpy as np\n'), ((12745, 12772), 'numpy.linalg.norm', 'np.linalg.norm', (['aerr'], {'ord': '(2)'}), '(aerr, ord=2)\n', (12759, 12772), True, 'import numpy as np\n'), ((1045, 1059), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (1053, 1059), True, 'import numpy as np\n'), ((1083, 1097), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (1091, 1097), True, 'import numpy as np\n'), ((1774, 1801), 'numpy.arange', 'np.arange', (['(0)', 'num_timesteps'], {}), '(0, num_timesteps)\n', (1783, 1801), True, 'import numpy as np\n'), ((3787, 3801), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (3795, 3801), True, 'import numpy as np\n'), ((3825, 3839), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (3833, 3839), True, 'import numpy as np\n'), ((5141, 5156), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (5150, 5156), True, 'import numpy as np\n'), ((6185, 6199), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (6193, 6199), True, 'import numpy as np\n'), ((6223, 6237), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (6231, 6237), True, 'import numpy as np\n'), ((8387, 8401), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (8395, 8401), True, 'import numpy as np\n'), ((8425, 8439), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (8433, 8439), True, 'import numpy as np\n'), ((9048, 9062), 'numpy.shape', 'np.shape', (['xmf2'], {}), '(xmf2)\n', (9056, 9062), True, 'import numpy as np\n'), ((9087, 9101), 'numpy.shape', 'np.shape', (['xmf2'], {}), '(xmf2)\n', (9095, 9101), True, 'import numpy as np\n'), ((11456, 11470), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (11464, 11470), True, 'import numpy as np\n'), ((11494, 11508), 'numpy.shape', 'np.shape', (['xmf1'], {}), '(xmf1)\n', (11502, 11508), True, 'import numpy as np\n'), ((11984, 11998), 'numpy.shape', 'np.shape', (['xmf2'], {}), '(xmf2)\n', (11992, 11998), True, 'import numpy as np\n'), ((12059, 12096), 'numpy.subtract', 'np.subtract', (['xmf2[-1][:]', 'xmf1[-1][:]'], {}), '(xmf2[-1][:], xmf1[-1][:])\n', (12070, 12096), True, 'import numpy as np\n'), ((12123, 12142), 'numpy.abs', 'np.abs', (['xmf1[-1][:]'], {}), '(xmf1[-1][:])\n', (12129, 12142), True, 'import numpy as np\n'), ((12808, 12842), 'numpy.linalg.norm', 'np.linalg.norm', (['xmf1[-1][:]'], {'ord': '(2)'}), '(xmf1[-1][:], ord=2)\n', (12822, 12842), True, 'import numpy as np\n'), ((12986, 13033), 'numpy.subtract', 'np.subtract', (['temperature2[-1]', 'temperature1[-1]'], {}), '(temperature2[-1], temperature1[-1])\n', (12997, 13033), True, 'import numpy as np\n'), ((13062, 13086), 'numpy.abs', 'np.abs', (['temperature1[-1]'], {}), '(temperature1[-1])\n', (13068, 13086), True, 'import numpy as np\n'), ((13186, 13219), 'numpy.subtract', 'np.subtract', (['enuc2[-1]', 'enuc1[-1]'], {}), '(enuc2[-1], enuc1[-1])\n', (13197, 13219), True, 'import numpy as np\n'), ((13248, 13265), 'numpy.abs', 'np.abs', (['enuc1[-1]'], {}), '(enuc1[-1])\n', (13254, 13265), True, 'import numpy as np\n')] |
# Exercise 6.26
# Author: <NAME>
import numpy as np
import matplotlib.pyplot as plt
def wave_packet(x, t):
return np.exp(-(x - 3 * t) ** 2) * np.sin(3 * np.pi * (x - t))
xlist = np.linspace(-4, 4, 1001)
tlist = (-0.85, 0, 0.85)
for t in (-0.85, 0, 0.85):
ylist = wave_packet(xlist, t)
plt.plot(xlist, ylist)
plt.xlabel('x')
plt.ylabel('Amplitude')
plt.title('One dimensional wave packet: t = %.2f s' % t)
plt.savefig('wavepacket_report-t=%.2f.png' % t)
plt.close()
outfile = open('wave_packet.html', 'w')
outfile.write('<html>\n<body bgcolor="#FFFAFA" style="margin: 20px 200px">\n')
outfile.write('<h1 style="color: #053061; font: 60pt Helvetica Neue; font-weight:100; text-align:center">\
Wavepacket Program Report\
</h1>\n')
outfile.write('<p style="color: #333; font: 20pt Helvetica Neue; font-weight:100;" > \
This report details the output of a wave_packet plotting function: wave_packet(x, t).\
The function is demonstrated in the following example: \
</p >\n')
outfile.write('<pre style="color: #333; font-size: 12pt; margin-left: 100px">\n')
infile = open('wavepacket_report_code.txt','r')
counter = 1
for line in infile:
outfile.write('%-4s %s \n' % (str(counter), line))
counter += 1
outfile.write('</pre>\n')
outfile.write('<hr>')
outfile.write('<h2 style="color: #053061; font: 50pt Helvetica Neue; font-weight:100; text-align:center">\
Example plots\
</h2>\n')
for t in tlist:
outfile.write('<div align="middle" style="margin-top: 20px"><img align="middle" src="wavepacket_report-t=%.2f.png"/></div>\n' % t)
outfile.write('<h2 style="color: #053061; font: 50pt Helvetica Neue; font-weight:100; text-align:center">\
Animated Gif\
</h2>\n')
outfile.write('<hr>')
outfile.write('<div align="middle" style="margin-top: 20px"><img align="middle" src="wavepacket_report.gif"/></div>\n')
outfile.write('</html>\n</body>\n')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.title"
] | [((186, 210), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(1001)'], {}), '(-4, 4, 1001)\n', (197, 210), True, 'import numpy as np\n'), ((301, 323), 'matplotlib.pyplot.plot', 'plt.plot', (['xlist', 'ylist'], {}), '(xlist, ylist)\n', (309, 323), True, 'import matplotlib.pyplot as plt\n'), ((328, 343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (338, 343), True, 'import matplotlib.pyplot as plt\n'), ((348, 371), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (358, 371), True, 'import matplotlib.pyplot as plt\n'), ((376, 432), 'matplotlib.pyplot.title', 'plt.title', (["('One dimensional wave packet: t = %.2f s' % t)"], {}), "('One dimensional wave packet: t = %.2f s' % t)\n", (385, 432), True, 'import matplotlib.pyplot as plt\n'), ((437, 484), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('wavepacket_report-t=%.2f.png' % t)"], {}), "('wavepacket_report-t=%.2f.png' % t)\n", (448, 484), True, 'import matplotlib.pyplot as plt\n'), ((489, 500), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (498, 500), True, 'import matplotlib.pyplot as plt\n'), ((121, 146), 'numpy.exp', 'np.exp', (['(-(x - 3 * t) ** 2)'], {}), '(-(x - 3 * t) ** 2)\n', (127, 146), True, 'import numpy as np\n'), ((149, 176), 'numpy.sin', 'np.sin', (['(3 * np.pi * (x - t))'], {}), '(3 * np.pi * (x - t))\n', (155, 176), True, 'import numpy as np\n')] |
from __future__ import annotations
import numpy as np
import pytest
from pytest_lazyfixture import lazy_fixture
import stk
from ...case_data import CaseData
@pytest.fixture(
params=(
lazy_fixture('cage1'),
lazy_fixture('cage2'),
lazy_fixture('cage3'),
),
)
def case_data(request):
return request.param
@pytest.fixture
def cage1(cls, position):
return CaseData(
vertex=cls(
id=0,
position=position,
),
id=0,
position=position,
cell=np.array([0, 0, 0]),
)
@pytest.fixture
def cage2(init_at_center, vertices_):
return CaseData(
vertex=init_at_center(
id=0,
vertices=vertices_,
),
id=0,
position=(
sum(v.get_position() for v in vertices_) / len(vertices_)
),
cell=np.array([0, 0, 0]),
)
@pytest.fixture
def cage3(position):
return CaseData(
vertex=stk.cage.UnaligningVertex(
id=0,
position=position,
),
id=0,
position=position,
cell=np.array([0, 0, 0]),
)
@pytest.fixture(
params=(
[0, 0, 0],
[1, 2, -20],
),
)
def position(request):
return np.array(request.param, dtype=np.float64)
@pytest.fixture(
params=(
stk.cage.LinearVertex,
stk.cage.NonLinearVertex,
stk.cage.UnaligningVertex,
stk.cage.AngledVertex,
),
)
def cls(request) -> stk.Vertex:
return request.param
@pytest.fixture(
params=(
stk.cage.LinearVertex.init_at_center,
stk.cage.NonLinearVertex.init_at_center,
stk.cage.UnaligningVertex.init_at_center,
stk.cage.AngledVertex.init_at_center,
),
)
def init_at_center(request) -> stk.Vertex:
return request.param
@pytest.fixture(
params=(
lambda: (stk.Vertex(0, [1, 2, 3]), ),
lambda: (
stk.Vertex(0, [1, 2, 3]),
stk.Vertex(1, [-1, 2, -32]),
),
),
)
def vertices_(request) -> tuple[stk.Vertex, ...]:
return request.param()
| [
"stk.cage.UnaligningVertex",
"pytest_lazyfixture.lazy_fixture",
"numpy.array",
"pytest.fixture",
"stk.Vertex"
] | [((1138, 1185), 'pytest.fixture', 'pytest.fixture', ([], {'params': '([0, 0, 0], [1, 2, -20])'}), '(params=([0, 0, 0], [1, 2, -20]))\n', (1152, 1185), False, 'import pytest\n'), ((1295, 1422), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(stk.cage.LinearVertex, stk.cage.NonLinearVertex, stk.cage.UnaligningVertex,\n stk.cage.AngledVertex)'}), '(params=(stk.cage.LinearVertex, stk.cage.NonLinearVertex, stk\n .cage.UnaligningVertex, stk.cage.AngledVertex))\n', (1309, 1422), False, 'import pytest\n'), ((1524, 1716), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(stk.cage.LinearVertex.init_at_center, stk.cage.NonLinearVertex.\n init_at_center, stk.cage.UnaligningVertex.init_at_center, stk.cage.\n AngledVertex.init_at_center)'}), '(params=(stk.cage.LinearVertex.init_at_center, stk.cage.\n NonLinearVertex.init_at_center, stk.cage.UnaligningVertex.\n init_at_center, stk.cage.AngledVertex.init_at_center))\n', (1538, 1716), False, 'import pytest\n'), ((1250, 1291), 'numpy.array', 'np.array', (['request.param'], {'dtype': 'np.float64'}), '(request.param, dtype=np.float64)\n', (1258, 1291), True, 'import numpy as np\n'), ((200, 221), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""cage1"""'], {}), "('cage1')\n", (212, 221), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((231, 252), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""cage2"""'], {}), "('cage2')\n", (243, 252), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((262, 283), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""cage3"""'], {}), "('cage3')\n", (274, 283), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((542, 561), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (550, 561), True, 'import numpy as np\n'), ((865, 884), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (873, 884), True, 'import numpy as np\n'), ((967, 1017), 'stk.cage.UnaligningVertex', 'stk.cage.UnaligningVertex', ([], {'id': '(0)', 'position': 'position'}), '(id=0, position=position)\n', (992, 1017), False, 'import stk\n'), ((1108, 1127), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1116, 1127), True, 'import numpy as np\n'), ((1870, 1894), 'stk.Vertex', 'stk.Vertex', (['(0)', '[1, 2, 3]'], {}), '(0, [1, 2, 3])\n', (1880, 1894), False, 'import stk\n'), ((1929, 1953), 'stk.Vertex', 'stk.Vertex', (['(0)', '[1, 2, 3]'], {}), '(0, [1, 2, 3])\n', (1939, 1953), False, 'import stk\n'), ((1967, 1994), 'stk.Vertex', 'stk.Vertex', (['(1)', '[-1, 2, -32]'], {}), '(1, [-1, 2, -32])\n', (1977, 1994), False, 'import stk\n')] |
import argparse
import os
import sys
import random
import numpy as np
import scipy
import torch
import torch.optim as optim
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data
from params import Params
import pickle
import time as t
from model import ActorCriticNet, Shared_obs_stats, ActorCriticNetWithContact
import statistics
import matplotlib.pyplot as plt
from operator import add, sub
import pickle
import threading
import torch.multiprocessing as mp
import queue
from utils import TrafficLight
from utils import Counter
from radam import RAdam
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# try:
# mp.set_start_method('spawn')
# except RuntimeError:
# pass
import sys
sys.path.append('/home/zhaoming/Documents/dev/gym/gym/envs/mujoco')
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.sample_index = 0
def push(self, events):
for event in zip(*events):
self.memory.append(event)
if len(self.memory)>self.capacity:
del self.memory[0]
def push_half(self, events):
temp_memory = []
for event in zip(*events):
temp_memory.append(event)
self.memory = self.memory + temp_memory[len(temp_memory)//4:3*len(temp_memory)//2]
while len(self.memory)>self.capacity:
del self.memory[0]
def push_half(self, events):
temp_memory = []
for event in zip(*events):
temp_memory.append(event)
self.memory = self.memory + temp_memory[2*len(temp_memory)//4:len(temp_memory)]
while len(self.memory)>self.capacity:
del self.memory[0]
def clear(self):
self.memory = []
self.sample_index = 0
def sample(self, batch_size):
#print(len(self.memory), batch_size)
samples = zip(*random.sample(self.memory, batch_size))
return map(lambda x: np.concatenate(x, 0), samples)
def clean_memory(self):
while len(self.memory) > self.capacity:
del self.memory[0]
def shuffle(self):
random.shuffle(self.memory)
def sample_one_at_a_time(self):
samples = zip(*self.memory[self.sample_index:self.sample_index+1])
self.sample_index += 1
return map(lambda x: np.concatenate(x, 0), samples)
def normal(x, mu, log_std):
a = (x - mu)/(log_std.exp())
a = -0.5 * a.pow(2)
a = torch.sum(a, dim=1)
b = torch.sum(log_std, dim=1)
#print(a-b)
return a-b
class RL(object):
def __init__(self, env, hidden_layer=[64, 64], contact=False):
self.env = env
#self.env.env.disableViewer = False
self.num_inputs = env.observation_space.shape[0]
self.num_outputs = env.action_space.shape[0]
self.hidden_layer = hidden_layer
self.num_contact = 2
self.params = Params()
if contact:
self.Net = ActorCriticNetWithContact
else:
self.Net = ActorCriticNet
self.model = self.Net(self.num_inputs, self.num_outputs, self.hidden_layer, num_contact=self.num_contact)
self.model.share_memory()
self.shared_obs_stats = Shared_obs_stats(self.num_inputs)
self.memory = ReplayMemory(10000000)
self.value_memory = ReplayMemory(10000000)
self.test_mean = []
self.test_std = []
self.noisy_test_mean = []
self.noisy_test_std = []
self.fig = plt.figure()
#self.fig2 = plt.figure()
self.lr = self.params.lr
plt.show(block=False)
self.test_list = []
self.noisy_test_list = []
self.queue = mp.Queue()
self.value_queue = mp.Queue()
self.mpdone = [mp.Event(), mp.Event(), mp.Event(), mp.Event()]
self.process = []
self.traffic_light = TrafficLight()
self.counter = Counter()
self.best_trajectory = ReplayMemory(5000)
self.best_score_queue = mp.Queue()
self.best_score = mp.Value("f", 0)
self.max_reward = mp.Value("f", 1)
self.expert_trajectory = ReplayMemory(1e7)
self.validation_trajectory = ReplayMemory(6000*9)
self.best_validation = 1.0
self.current_best_validation = 1.0
self.return_obs_stats = Shared_obs_stats(1)
self.gpu_model = self.Net(self.num_inputs, self.num_outputs,self.hidden_layer, num_contact=self.num_contact)
self.base_controller = None
def normalize_data(self, num_iter=1000, file='shared_obs_stats.pkl'):
state = self.env.reset()
state = Variable(torch.Tensor(state).unsqueeze(0))
#model_old = ActorCriticNet(self.num_inputs, self.num_outputs,self.hidden_layer)
#model_old.load_state_dict(self.model.state_dict())
for i in range(num_iter):
print(i)
self.shared_obs_stats.observes(state)
state = self.shared_obs_stats.normalize(state)#.to(device)
#mu = self.model.sample_actions(state)
#action = mu#(mu + log_std.exp()*Variable(eps))
#env_action = action.cpu().data.squeeze().numpy()
env_action = np.random.randn(self.num_outputs)
state, reward, done, _ = self.env.step(env_action*0)
if done:
state = self.env.reset()
state = Variable(torch.Tensor(state).unsqueeze(0))
with open(file, 'wb') as output:
pickle.dump(self.shared_obs_stats, output, pickle.HIGHEST_PROTOCOL)
def run_test(self, num_test=1):
state = self.env.reset()
state = Variable(torch.Tensor(state).unsqueeze(0))
ave_test_reward = 0
total_rewards = []
for i in range(num_test):
total_reward = 0
while True:
state = self.shared_obs_stats.normalize(state)
mu = self.model.sample_best_actions(state)
action = mu.cpu().data.squeeze().numpy()
if self.base_controller is not None:
base_action = self.base_controller.sample_best_actions(state)
action += base_action.cpu().data.squeeze().numpy()
state, reward, done, _ = self.env.step(action)
total_reward += reward
#print(state)
#print("done", done, "state", state)
if done:
state = self.env.reset()
#print(self.env.position)
#print(self.env.time)
state = Variable(torch.Tensor(state).unsqueeze(0))
ave_test_reward += total_reward / num_test
total_rewards.append(total_reward)
break
state = Variable(torch.Tensor(state).unsqueeze(0))
#print("avg test reward is", ave_test_reward)
reward_mean = statistics.mean(total_rewards)
reward_std = statistics.stdev(total_rewards)
self.test_mean.append(reward_mean)
self.test_std.append(reward_std)
self.test_list.append((reward_mean, reward_std))
#print(self.model.state_dict())
def run_test_with_noise(self, num_test=10):
state = self.env.reset()
state = Variable(torch.Tensor(state).unsqueeze(0))
ave_test_reward = 0
total_rewards = []
for i in range(num_test):
total_reward = 0
while True:
state = self.shared_obs_stats.normalize(state)
mu = self.model.sample_actions(state)
eps = torch.randn(mu.size())
action = (mu + 0.0*Variable(eps))
action = action.cpu().data.squeeze().numpy()
if self.base_controller is not None:
base_action = self.base_controller.sample_best_actions(state)
action += base_action.cpu().data.squeeze().numpy()
state, reward, done, _ = self.env.step(action)
total_reward += reward
if done:
state = self.env.reset()
state = Variable(torch.Tensor(state).unsqueeze(0))
ave_test_reward += total_reward / num_test
total_rewards.append(total_reward)
break
state = Variable(torch.Tensor(state).unsqueeze(0))
#print("avg test reward is", ave_test_reward)
reward_mean = statistics.mean(total_rewards)
reward_std = statistics.stdev(total_rewards)
self.noisy_test_mean.append(reward_mean)
self.noisy_test_std.append(reward_std)
self.noisy_test_list.append((reward_mean, reward_std))
def plot_statistics(self):
ax = self.fig.add_subplot(121)
ax2 = self.fig.add_subplot(122)
low = []
high = []
index = []
noisy_low = []
noisy_high = []
for i in range(len(self.test_mean)):
low.append(self.test_mean[i] - self.test_std[i])
high.append(self.test_mean[i] + self.test_std[i])
noisy_low.append(self.noisy_test_mean[i]-self.noisy_test_std[i])
noisy_high.append(self.noisy_test_mean[i]+self.noisy_test_std[i])
index.append(i)
plt.xlabel('iterations')
plt.ylabel('average rewards')
ax.plot(self.test_mean, 'b')
ax2.plot(self.noisy_test_mean, 'g')
ax.fill_between(index, low, high, color='cyan')
ax2.fill_between(index, noisy_low, noisy_high, color='r')
#ax.plot(map(sub, test_mean, test_std))
self.fig.canvas.draw()
def collect_samples(self, num_samples, start_state=None, noise=-2.0, env_index=0, random_seed=1):
random.seed(random_seed)
torch.manual_seed(random_seed+1)
np.random.seed(random_seed+2)
env.seed(random_seed + 3)
#env.seed(random_seed+3)
#print(noise)
if start_state == None:
start_state = self.env.reset()
samples = 0
done = False
states = []
next_states = []
actions = []
rewards = []
values = []
q_values = []
real_rewards = []
log_probs = []
noise = self.base_noise * self.explore_noise.value
self.model.set_noise(noise)
state = start_state
state = Variable(torch.Tensor(state).unsqueeze(0))
total_reward = 0
#q_value = Variable(torch.zeros(1, 1))
while True:
noise = self.base_noise * self.explore_noise.value
self.model.set_noise(noise)
#print("local", self.model.p_fcs[1].bias.data[0])
#self.model.load_state_dict(torch.load(self.model_name))
signal_init = self.traffic_light.get()
score = 0
while samples < num_samples and not done:
#self.shared_obs_stats.observes(state)
states.append(state.cpu().data.numpy())
#self.shared_obs_stats.observes(state)
#print("samples", samples)
state = self.shared_obs_stats.normalize(state)
action = self.model.sample_actions(state)
log_prob = self.model.calculate_prob(state, action)
actions.append(action.cpu().data.numpy())
log_probs.append(log_prob.data.numpy())
env_action = action.data.squeeze().numpy()
if self.base_controller is not None:
base_action = self.base_controller.sample_best_actions(state)
env_action += base_action.cpu().data.squeeze().numpy()
state, reward, done, _ = self.env.step(env_action)
score += reward
if reward > self.max_reward.value:
self.max_reward.value = reward
if self.max_reward.value > 50:
self.max_reward.value = 50
#print(self.max_reward.value)
#reward *= 0.3
rewards.append(Variable(reward * torch.ones(1)).data.numpy())
real_rewards.append(Variable(reward * torch.ones(1)).data.numpy())
state = Variable(torch.Tensor(state).unsqueeze(0))
next_states.append(state.cpu().data.numpy())
next_state = self.shared_obs_stats.normalize(state)
samples += 1
state = self.shared_obs_stats.normalize(state)
v = (self.model.get_value(state))*self.max_reward.value# / self.return_obs_stats.std) + self.return_obs_stats.mean
if self.base_controller is not None:
v += self.base_controller.get_value(state)*self.max_reward.value
if done:
R = torch.zeros(1, 1)
else:
R = v.data
R = Variable(R)
for i in reversed(range(len(real_rewards))):
reward = Variable(torch.from_numpy(real_rewards[i]).unsqueeze(0))
R = self.params.gamma * R + reward#self.return_obs_stats.normalize(reward)# Variable(torch.from_numpy(real_rewards[i]))
q_values.insert(0, R.cpu().data.numpy())
#self.return_obs_stats.observes(R)
#mirror
# mirror_states = np.array(states)
# mirror_actions = np.array(actions)
# (
# negation_obs_indices,
# right_obs_indices,
# left_obs_indices,
# negation_action_indices,
# right_action_indices,
# left_action_indices,
# ) = self.env.get_mirror_indices()
# mirror_states[:, :, negation_obs_indices] *= -1
# rl = np.concatenate((right_obs_indices, left_obs_indices))
# lr = np.concatenate((left_obs_indices, right_obs_indices))
# mirror_states[:, :, rl] = mirror_states[:, :,lr]
# #mirror_actions = self.model.sample_best_actions(batch_states)
# mirror_actions[:, :, negation_action_indices] = mirror_actions[:, :, negation_action_indices] * -1
# rl = np.concatenate((right_action_indices, left_action_indices))
# lr = np.concatenate((left_action_indices, right_action_indices))
# mirror_actions[:, :, rl] = mirror_actions[:, :, lr]
# mirror_states = list(mirror_states)
# mirror_actions = list(mirror_actions)
# #self.queue.put([mirror_states, mirror_actions, np.copy(next_states), np.copy(rewards), np.copy(q_values), np.copy(log_probs)])
# value_states = states + mirror_states
# value_actions = actions + mirror_actions
# value_next_states = next_states + next_states
# value_rewards = rewards + rewards
# value_q_values = q_values + q_values
# value_log_probs = log_probs + log_probs
self.queue.put([states, actions, next_states, rewards, q_values, log_probs])
#self.value_queue.put([value_states, value_actions, value_next_states, value_rewards, value_q_values, value_log_probs])
self.counter.increment()
self.env.reset()
while self.traffic_light.get() == signal_init:
pass
start_state = self.env.reset()
state = start_state
state = Variable(torch.Tensor(state).unsqueeze(0))
total_reward = 0
samples = 0
done = False
states = []
next_states = []
actions = []
rewards = []
values = []
q_values = []
real_rewards = []
log_probs = []
#print("child", self.model.noise)
#if self.model.noise[0] > -2:
# self.model.noise *= 1.001
def collect_expert_samples(self, num_samples, filename, noise=-2.0,validation=False, difficulty=[0, 0]):
import gym
expert_env = gym.make("mocca_envs:Walker3DStepperEnv-v0")
expert_env.set_difficulty(difficulty)
start_state = expert_env.reset()
samples = 0
done = False
states = []
next_states = []
actions = []
rewards = []
q_values = []
model_expert = self.Net(self.num_inputs, self.num_outputs,self.hidden_layer)
model_expert.load_state_dict(torch.load(filename))
policy_noise = noise * np.ones(self.num_outputs)
model_expert.set_noise(policy_noise)
state = start_state
state = Variable(torch.Tensor(state).unsqueeze(0))
total_reward = 0
total_sample = 0
#q_value = Variable(torch.zeros(1, 1))
if validation:
max_sample = 300
else:
max_sample = 50000
while total_sample < max_sample:
score = 0
while samples < num_samples and not done:
state = self.shared_obs_stats.normalize(state)
states.append(state.data.numpy())
mu = model_expert.sample_best_actions(state)
actions.append(mu.data.numpy())
eps = torch.randn(mu.size())
if validation:
weight = 0.1
else:
weight = 0.1
env_action = model_expert.sample_actions(state)
env_action = env_action.data.squeeze().numpy()
state, reward, done, _ = expert_env.step(env_action)
reward = 1
rewards.append(Variable(reward * torch.ones(1)).data.numpy())
state = Variable(torch.Tensor(state).unsqueeze(0))
next_state = self.shared_obs_stats.normalize(state)
next_states.append(next_state.data.numpy())
samples += 1
#total_sample += 1
score += reward
print("expert score", score)
state = self.shared_obs_stats.normalize(state)
#print(state)
v = model_expert.get_value(state)
if done:
R = torch.zeros(1, 1)
else:
R = v.data
R = torch.ones(1, 1) * 100
R = Variable(R)
for i in reversed(range(len(rewards))):
R = self.params.gamma * R + Variable(torch.from_numpy(rewards[i]))
q_values.insert(0, R.data.numpy())
if not validation and score >= num_samples:
self.expert_trajectory.push([states, actions, next_states, rewards, q_values])
total_sample += num_samples
elif score >= num_samples:
self.validation_trajectory.push([states, actions, next_states, rewards, q_values])
start_state = expert_env.reset()
state = start_state
state = Variable(torch.Tensor(state).unsqueeze(0))
total_reward = 0
samples = 0
done = False
states = []
next_states = []
actions = []
rewards = []
q_values = []
def normalize(self):
for i in range(len(self.memory.memory)):
batch_states, _, _, _, _ = self.memory.sample_one_at_a_time()
batch_states = Variable(torch.Tensor(batch_states))
self.shared_obs_stats.observes(batch_states)
def update_critic(self, batch_size, num_epoch):
self.gpu_model.train()
optimizer = optim.Adam(self.gpu_model.parameters(), lr=10*self.lr)
#optimizer = RAdam(self.model.parameters(), lr=self.lr*10)
for k in range(num_epoch):
batch_states, batch_actions, batch_next_states, batch_rewards, batch_q_values, _ = self.memory.sample(batch_size)
batch_states = self.shared_obs_stats.normalize(Variable(torch.Tensor(batch_states))).to(device)
batch_q_values = Variable(torch.Tensor(batch_q_values)).to(device) / self.max_reward.value
v_pred = self.gpu_model.get_value(batch_states, device=device)
if self.base_controller is not None:
v_pred = self.base_controller.get_value(batch_states) + v_pred
loss_value = (v_pred - batch_q_values)**2
loss_value = 0.5*torch.mean(loss_value)
optimizer.zero_grad()
loss_value.backward(retain_graph=True)
optimizer.step()
#print(loss_value)
def update_actor(self, batch_size, num_epoch, supervised=False):
model_old = self.Net(self.num_inputs, self.num_outputs, self.hidden_layer, num_contact=self.num_contact).to(device)
model_old.load_state_dict(self.gpu_model.state_dict())
model_old.set_noise(self.model.noise)
self.gpu_model.train()
optimizer = optim.Adam(self.gpu_model.parameters(), lr=self.lr)
#optimizer = RAdam(self.model.parameters(), lr=self.lr)
for k in range(num_epoch):
batch_states, batch_actions, _, _, batch_q_values, batch_log_probs = self.memory.sample(batch_size)
#mirror
batch_mirror_states = np.copy(batch_states)
batch_states = self.shared_obs_stats.normalize(Variable(torch.Tensor(batch_states))).to(device)
batch_q_values = Variable(torch.Tensor(batch_q_values)).to(device) / self.max_reward.value
#batch_q_values = self.return_obs_stats.normalize(Variable(torch.Tensor(batch_q_values)))
batch_actions = Variable(torch.Tensor(batch_actions)).to(device)
v_pred_old = model_old.get_value(batch_states, device=device)
if self.base_controller is not None:
v_pred_old += self.base_controller.get_value(batch_states)
batch_advantages = (batch_q_values - v_pred_old)
probs = self.gpu_model.calculate_prob_gpu(batch_states, batch_actions)
probs_old = Variable(torch.Tensor(batch_log_probs)).to(device)#model_old.calculate_prob_gpu(batch_states, batch_actions)
ratio = (probs - (probs_old)).exp()
ratio = ratio.unsqueeze(1)
#print("ratio", ratio)
#print(probs, probs_old)
surr1 = ratio * batch_advantages
surr2 = ratio.clamp(1-self.params.clip, 1+self.params.clip) * batch_advantages
loss_clip = -torch.mean(torch.min(surr1, surr2))
#expert loss
if supervised is True:
if k % 1000 == 999:
batch_expert_states, batch_expert_actions, _, _, _ = self.expert_trajectory.sample(len(self.expert_trajectory.memory))
else:
batch_expert_states, batch_expert_actions, _, _, _ = self.expert_trajectory.sample(min(batch_size, len(self.expert_trajectory.memory)))
batch_expert_states = Variable(torch.Tensor(batch_expert_states)).to(device)
batch_expert_actions = Variable(torch.Tensor(batch_expert_actions)).to(device)
mu_expert = self.gpu_model.sample_best_actions(batch_expert_states)
loss_expert = torch.mean((batch_expert_actions-mu_expert)**2)
print(loss_expert)
else:
loss_expert = 0
#mirror loss
# (
# negation_obs_indices,
# right_obs_indices,
# left_obs_indices,
# negation_action_indices,
# right_action_indices,
# left_action_indices,
# ) = self.env.get_mirror_indices()
# batch_mirror_states[:, negation_obs_indices] *= -1
# rl = np.concatenate((right_obs_indices, left_obs_indices))
# lr = np.concatenate((left_obs_indices, right_obs_indices))
# batch_mirror_states[:, rl] = batch_mirror_states[:, lr]
# #with torch.no_grad():
# batch_mirror_actions = self.gpu_model.sample_best_actions(batch_states)
# if self.base_controller is not None:
# batch_mirror_actions = self.base_controller.sample_best_actions(batch_states) + batch_mirror_actions
# batch_mirror_actions_clone = batch_mirror_actions.clone()
# batch_mirror_actions_clone[:, negation_action_indices] = batch_mirror_actions[:, negation_action_indices] * -1
# rl = np.concatenate((right_action_indices, left_action_indices))
# lr = np.concatenate((left_action_indices, right_action_indices))
# batch_mirror_actions_clone[:, rl] = batch_mirror_actions[:, lr]
# batch_mirror_states = Variable(torch.Tensor(batch_mirror_states)).to(device)
# mirror_mu = self.gpu_model.sample_best_actions(batch_mirror_states)
# if self.base_controller is not None:
# mirror_mu = self.base_controller.sample_best_actions(batch_mirror_states) + mirror_mu
# mirror_loss = torch.mean((mirror_mu - batch_mirror_actions_clone)**2)
loss_w = 0#torch.mean(batch_w**2)
entropy_loss = -self.gpu_model.log_std.mean()
if supervised:
total_loss = 1.0*loss_expert
else:
total_loss = loss_clip
#print(total_loss)
#print("mirror_loss", mirror_loss)
#print(k, loss_w)
optimizer.zero_grad()
total_loss.backward(retain_graph=True)
#print(torch.nn.utils.clip_grad_norm(self.model.parameters(),1))
optimizer.step()
#print(self.shared_obs_stats.mean.data)
if self.lr > 1e-5:
self.lr *= 0.99
else:
self.lr = 1e-5
if self.weight > 10:
self.weight *= 0.99
if self.weight < 10:
self.weight = 10.0
def validation(self):
batch_states, batch_actions, batch_next_states, batch_rewards, batch_q_values = self.validation_trajectory.sample(300)
model_old = ActorCriticNet(self.num_inputs, self.num_outputs, self.hidden_layer)
model_old.load_state_dict(self.model.state_dict())
batch_states = Variable(torch.Tensor(batch_states))
batch_q_values = Variable(torch.Tensor(batch_q_values))
batch_actions = Variable(torch.Tensor(batch_actions))
mu_old, log_std_old, v_pred_old = model_old(batch_states)
loss = torch.mean((batch_actions-mu_old)**2)
if loss.data < self.current_best_validation:
self.current_best_validation = loss.data
print("validation error", self.current_best_validation)
def clear_memory(self):
self.memory.clear()
self.value_memory.clear()
def save_model(self, filename):
torch.save(self.model.state_dict(), filename)
def save_shared_obs_stas(self, filename):
with open(filename, 'wb') as output:
pickle.dump(self.shared_obs_stats, output, pickle.HIGHEST_PROTOCOL)
def save_statistics(self, filename):
statistics = [self.time_passed, self.num_samples, self.test_mean, self.test_std, self.noisy_test_mean, self.noisy_test_std]
with open(filename, 'wb') as output:
pickle.dump(statistics, output, pickle.HIGHEST_PROTOCOL)
def collect_samples_multithread(self):
#queue = Queue.Queue()
import time
self.start = time.time()
self.lr = 1e-4
self.weight = 10
num_threads = 50
self.num_samples = 0
self.time_passed = 0
score_counter = 0
total_thread = 0
max_samples = 25000
seeds = [
i * 100 for i in range(num_threads)
]
self.explore_noise = mp.Value("f", -1.5)
#self.base_noise = np.array([2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2])
self.base_noise = np.ones(self.num_outputs)
noise = self.base_noise * self.explore_noise.value
#noise[[0, 1, 5, 6]] = -3
ts = [
mp.Process(target=self.collect_samples,args=(500,), kwargs={'noise':noise, 'random_seed':seed})
for seed in seeds
]
for t in ts:
t.start()
#print("started")
self.model.set_noise(noise)
self.gpu_model.set_noise(noise)
while score_counter < 100:
if len(self.noisy_test_mean) % 100 == 1:
self.save_statistics("stats/walker2d_contact_seed16_Iter%d.stat"%(len(self.noisy_test_mean)))
#print(self.traffic_light.val.value)
#if len(self.test_mean) % 100 == 1 and self.test_mean[len(self.test_mean)-1] > 300:
# self.save_model("torch_model/multiskill/v4_cassie3dMirrorIter%d.pt"%(len(self.test_mean),))
# while len(self.memory.memory) < 50000:
# if self.counter.get() == num_threads:
# for i in range(num_threads):
# self.memory.push(self.queue.get())
# self.counter.increment()
# if len(self.memory.memory) < 50000 and self.counter.get() == num_threads + 1:
# self.counter.reset()
# self.traffic_light.switch()
self.save_model(self.model_name)
while len(self.memory.memory) < max_samples:
#print(self.counter.get())
if self.counter.get() == num_threads:
for i in range(num_threads):
#if random.randint(0, 1) == 0:
self.memory.push(self.queue.get())
#self.value_memory.push(self.value_queue.get())
total_thread += num_threads
# else:
# self.memory.push_half(self.queue.get())
self.counter.increment()
if self.counter.get() == num_threads + 1 and len(self.memory.memory) < max_samples:
self.traffic_light.switch()
self.counter.reset()
self.num_samples += len(self.memory.memory)
#while not self.best_score_queue.empty():
# self.best_trajectory.push_half(self.best_score_queue.get())
#self.normalize()
#self.model.to(device)
self.gpu_model.load_state_dict(self.model.state_dict())
self.gpu_model.to(device)
self.gpu_model.set_noise(self.model.noise)
if self.base_controller is not None:
self.base_controller.to(device)
self.update_critic(min(128, len(self.memory.memory)), (len(self.memory.memory)//3000 + 1)*64)
self.update_actor(min(128, len(self.memory.memory)), (len(self.memory.memory)//3000 + 1)*64, supervised=False)
#self.update_critic(128, 2560)
#self.update_actor(128, 2560, supervised=False)
self.gpu_model.to("cpu")
if self.base_controller is not None:
self.base_controller.to("cpu")
self.model.load_state_dict(self.gpu_model.state_dict())
self.clear_memory()
self.run_test(num_test=2)
self.run_test_with_noise(num_test=2)
print(self.num_samples, self.noisy_test_mean[-1])
if self.noisy_test_mean[-1] > 3500:
score_counter += 1
else:
score_counter = 0
if self.explore_noise.value > -1.5:
print("main", self.model.noise)
self.explore_noise.value *= 1.001
self.model.noise = self.base_noise * self.explore_noise.value
print(self.max_reward.value)
self.plot_statistics()
self.time_passed = time.time() - self.start
total_thread = 0
#print("main", self.model.p_fcs[1].bias.data[0])
self.traffic_light.switch()
self.counter.reset()
def add_env(self, env):
self.env_list.append(env)
def mkdir(base, name):
path = os.path.join(base, name)
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == '__main__':
seed = 16
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.set_num_threads(1)
import gym
env = gym.make("Walker2d-v2")
env.set_contact(1)
ppo = RL(env, [256, 256], contact=True)
#ppo.base_controller = ActorCriticNet(ppo.num_inputs, ppo.num_outputs, hidden_layer=[256, 256, 256, 256, 256], num_contact=2)
#ppo.base_controller.load_state_dict(torch.load("torch_model/StepperOct06.pt"))
ppo.model_name = "torch_model/walker2d_contact_seed16.pt"
#ppo.model.load_state_dict(torch.load("torch_model/Stepper256X5_65_10_seed8.pt"))
#ppo.env.set_difficulty([0.65, 0.65, 20, 20])
#ppo.max_reward.value = 50
#with open('torch_model/cassie3dMirror2kHz_shared_obs_stats.pkl', 'rb') as input:
# shared_obs_stats = pickle.load(input)
#ppo.normalize_data()
#ppo.save_shared_obs_stas("torch_model/cassie_terrain_obs_stats.pkl")
# ppo.collect_expert_samples(500, "torch_model/Stepper256X5_65_00_seed8.pt", noise=-2.0, difficulty = [0.65, 0])
# ppo.collect_expert_samples(500, "torch_model/Stepper256X5_75_00_seed8.pt", noise=-2.0, difficulty = [0.75, 0])
# ppo.collect_expert_samples(500, "torch_model/Stepper256X5_85_00_seed8.pt", noise=-2.0, difficulty = [0.85, 0])
# ppo.collect_expert_samples(500, "torch_model/Stepper256X5_65_10_seed8.pt", noise=-2.0, difficulty = [0.65, 10])
#ppo.save_model(ppo.model_name)
ppo.collect_samples_multithread()
#ppo.start = t.time() | [
"statistics.stdev",
"matplotlib.pyplot.ylabel",
"utils.Counter",
"torch.from_numpy",
"torch.min",
"torch.cuda.is_available",
"torch.sum",
"sys.path.append",
"gym.make",
"model.ActorCriticNet",
"os.path.exists",
"torch.multiprocessing.Queue",
"torch.mean",
"params.Params",
"matplotlib.pyp... | [((812, 879), 'sys.path.append', 'sys.path.append', (['"""/home/zhaoming/Documents/dev/gym/gym/envs/mujoco"""'], {}), "('/home/zhaoming/Documents/dev/gym/gym/envs/mujoco')\n", (827, 879), False, 'import sys\n'), ((2559, 2578), 'torch.sum', 'torch.sum', (['a'], {'dim': '(1)'}), '(a, dim=1)\n', (2568, 2578), False, 'import torch\n'), ((2587, 2612), 'torch.sum', 'torch.sum', (['log_std'], {'dim': '(1)'}), '(log_std, dim=1)\n', (2596, 2612), False, 'import torch\n'), ((32162, 32186), 'os.path.join', 'os.path.join', (['base', 'name'], {}), '(base, name)\n', (32174, 32186), False, 'import os\n'), ((32308, 32325), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (32319, 32325), False, 'import random\n'), ((32330, 32353), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (32347, 32353), False, 'import torch\n'), ((32358, 32386), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (32380, 32386), False, 'import torch\n'), ((32391, 32411), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (32405, 32411), True, 'import numpy as np\n'), ((32416, 32440), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (32437, 32440), False, 'import torch\n'), ((32466, 32489), 'gym.make', 'gym.make', (['"""Walker2d-v2"""'], {}), "('Walker2d-v2')\n", (32474, 32489), False, 'import gym\n'), ((687, 712), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (710, 712), False, 'import torch\n'), ((2234, 2261), 'random.shuffle', 'random.shuffle', (['self.memory'], {}), '(self.memory)\n', (2248, 2261), False, 'import random\n'), ((3000, 3008), 'params.Params', 'Params', ([], {}), '()\n', (3006, 3008), False, 'from params import Params\n'), ((3310, 3343), 'model.Shared_obs_stats', 'Shared_obs_stats', (['self.num_inputs'], {}), '(self.num_inputs)\n', (3326, 3343), False, 'from model import ActorCriticNet, Shared_obs_stats, ActorCriticNetWithContact\n'), ((3582, 3594), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3592, 3594), True, 'import matplotlib.pyplot as plt\n'), ((3670, 3691), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3678, 3691), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3786), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3784, 3786), True, 'import torch.multiprocessing as mp\n'), ((3814, 3824), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3822, 3824), True, 'import torch.multiprocessing as mp\n'), ((3953, 3967), 'utils.TrafficLight', 'TrafficLight', ([], {}), '()\n', (3965, 3967), False, 'from utils import TrafficLight\n'), ((3991, 4000), 'utils.Counter', 'Counter', ([], {}), '()\n', (3998, 4000), False, 'from utils import Counter\n'), ((4084, 4094), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (4092, 4094), True, 'import torch.multiprocessing as mp\n'), ((4121, 4137), 'torch.multiprocessing.Value', 'mp.Value', (['"""f"""', '(0)'], {}), "('f', 0)\n", (4129, 4137), True, 'import torch.multiprocessing as mp\n'), ((4164, 4180), 'torch.multiprocessing.Value', 'mp.Value', (['"""f"""', '(1)'], {}), "('f', 1)\n", (4172, 4180), True, 'import torch.multiprocessing as mp\n'), ((4404, 4423), 'model.Shared_obs_stats', 'Shared_obs_stats', (['(1)'], {}), '(1)\n', (4420, 4423), False, 'from model import ActorCriticNet, Shared_obs_stats, ActorCriticNetWithContact\n'), ((6986, 7016), 'statistics.mean', 'statistics.mean', (['total_rewards'], {}), '(total_rewards)\n', (7001, 7016), False, 'import statistics\n'), ((7038, 7069), 'statistics.stdev', 'statistics.stdev', (['total_rewards'], {}), '(total_rewards)\n', (7054, 7069), False, 'import statistics\n'), ((8555, 8585), 'statistics.mean', 'statistics.mean', (['total_rewards'], {}), '(total_rewards)\n', (8570, 8585), False, 'import statistics\n'), ((8607, 8638), 'statistics.stdev', 'statistics.stdev', (['total_rewards'], {}), '(total_rewards)\n', (8623, 8638), False, 'import statistics\n'), ((9378, 9402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (9388, 9402), True, 'import matplotlib.pyplot as plt\n'), ((9411, 9440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average rewards"""'], {}), "('average rewards')\n", (9421, 9440), True, 'import matplotlib.pyplot as plt\n'), ((9835, 9859), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (9846, 9859), False, 'import random\n'), ((9868, 9902), 'torch.manual_seed', 'torch.manual_seed', (['(random_seed + 1)'], {}), '(random_seed + 1)\n', (9885, 9902), False, 'import torch\n'), ((9909, 9940), 'numpy.random.seed', 'np.random.seed', (['(random_seed + 2)'], {}), '(random_seed + 2)\n', (9923, 9940), True, 'import numpy as np\n'), ((16178, 16222), 'gym.make', 'gym.make', (['"""mocca_envs:Walker3DStepperEnv-v0"""'], {}), "('mocca_envs:Walker3DStepperEnv-v0')\n", (16186, 16222), False, 'import gym\n'), ((26192, 26260), 'model.ActorCriticNet', 'ActorCriticNet', (['self.num_inputs', 'self.num_outputs', 'self.hidden_layer'], {}), '(self.num_inputs, self.num_outputs, self.hidden_layer)\n', (26206, 26260), False, 'from model import ActorCriticNet, Shared_obs_stats, ActorCriticNetWithContact\n'), ((26587, 26628), 'torch.mean', 'torch.mean', (['((batch_actions - mu_old) ** 2)'], {}), '((batch_actions - mu_old) ** 2)\n', (26597, 26628), False, 'import torch\n'), ((27553, 27564), 'time.time', 'time.time', ([], {}), '()\n', (27562, 27564), False, 'import time\n'), ((27880, 27899), 'torch.multiprocessing.Value', 'mp.Value', (['"""f"""', '(-1.5)'], {}), "('f', -1.5)\n", (27888, 27899), True, 'import torch.multiprocessing as mp\n'), ((28028, 28053), 'numpy.ones', 'np.ones', (['self.num_outputs'], {}), '(self.num_outputs)\n', (28035, 28053), True, 'import numpy as np\n'), ((32198, 32218), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (32212, 32218), False, 'import os\n'), ((32228, 32245), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (32239, 32245), False, 'import os\n'), ((3849, 3859), 'torch.multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (3857, 3859), True, 'import torch.multiprocessing as mp\n'), ((3861, 3871), 'torch.multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (3869, 3871), True, 'import torch.multiprocessing as mp\n'), ((3873, 3883), 'torch.multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (3881, 3883), True, 'import torch.multiprocessing as mp\n'), ((3885, 3895), 'torch.multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (3893, 3895), True, 'import torch.multiprocessing as mp\n'), ((5269, 5302), 'numpy.random.randn', 'np.random.randn', (['self.num_outputs'], {}), '(self.num_outputs)\n', (5284, 5302), True, 'import numpy as np\n'), ((5549, 5616), 'pickle.dump', 'pickle.dump', (['self.shared_obs_stats', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.shared_obs_stats, output, pickle.HIGHEST_PROTOCOL)\n', (5560, 5616), False, 'import pickle\n'), ((13008, 13019), 'torch.autograd.Variable', 'Variable', (['R'], {}), '(R)\n', (13016, 13019), False, 'from torch.autograd import Variable\n'), ((16583, 16603), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (16593, 16603), False, 'import torch\n'), ((16636, 16661), 'numpy.ones', 'np.ones', (['self.num_outputs'], {}), '(self.num_outputs)\n', (16643, 16661), True, 'import numpy as np\n'), ((18443, 18454), 'torch.autograd.Variable', 'Variable', (['R'], {}), '(R)\n', (18451, 18454), False, 'from torch.autograd import Variable\n'), ((21328, 21349), 'numpy.copy', 'np.copy', (['batch_states'], {}), '(batch_states)\n', (21335, 21349), True, 'import numpy as np\n'), ((26352, 26378), 'torch.Tensor', 'torch.Tensor', (['batch_states'], {}), '(batch_states)\n', (26364, 26378), False, 'import torch\n'), ((26414, 26442), 'torch.Tensor', 'torch.Tensor', (['batch_q_values'], {}), '(batch_q_values)\n', (26426, 26442), False, 'import torch\n'), ((26477, 26504), 'torch.Tensor', 'torch.Tensor', (['batch_actions'], {}), '(batch_actions)\n', (26489, 26504), False, 'import torch\n'), ((27081, 27148), 'pickle.dump', 'pickle.dump', (['self.shared_obs_stats', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.shared_obs_stats, output, pickle.HIGHEST_PROTOCOL)\n', (27092, 27148), False, 'import pickle\n'), ((27380, 27436), 'pickle.dump', 'pickle.dump', (['statistics', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(statistics, output, pickle.HIGHEST_PROTOCOL)\n', (27391, 27436), False, 'import pickle\n'), ((28174, 28276), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'self.collect_samples', 'args': '(500,)', 'kwargs': "{'noise': noise, 'random_seed': seed}"}), "(target=self.collect_samples, args=(500,), kwargs={'noise': noise,\n 'random_seed': seed})\n", (28184, 28276), True, 'import torch.multiprocessing as mp\n'), ((28343, 28352), 'time.start', 't.start', ([], {}), '()\n', (28350, 28352), True, 'import time as t\n'), ((1994, 2032), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (2007, 2032), False, 'import random\n'), ((2063, 2083), 'numpy.concatenate', 'np.concatenate', (['x', '(0)'], {}), '(x, 0)\n', (2077, 2083), True, 'import numpy as np\n'), ((2434, 2454), 'numpy.concatenate', 'np.concatenate', (['x', '(0)'], {}), '(x, 0)\n', (2448, 2454), True, 'import numpy as np\n'), ((12929, 12946), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (12940, 12946), False, 'import torch\n'), ((18321, 18338), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (18332, 18338), False, 'import torch\n'), ((19519, 19545), 'torch.Tensor', 'torch.Tensor', (['batch_states'], {}), '(batch_states)\n', (19531, 19545), False, 'import torch\n'), ((20488, 20510), 'torch.mean', 'torch.mean', (['loss_value'], {}), '(loss_value)\n', (20498, 20510), False, 'import torch\n'), ((23313, 23364), 'torch.mean', 'torch.mean', (['((batch_expert_actions - mu_expert) ** 2)'], {}), '((batch_expert_actions - mu_expert) ** 2)\n', (23323, 23364), False, 'import torch\n'), ((31876, 31887), 'time.time', 'time.time', ([], {}), '()\n', (31885, 31887), False, 'import time\n'), ((4712, 4731), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (4724, 4731), False, 'import torch\n'), ((5712, 5731), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (5724, 5731), False, 'import torch\n'), ((7358, 7377), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (7370, 7377), False, 'import torch\n'), ((10472, 10491), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (10484, 10491), False, 'import torch\n'), ((16761, 16780), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (16773, 16780), False, 'import torch\n'), ((18404, 18420), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (18414, 18420), False, 'import torch\n'), ((22572, 22595), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (22581, 22595), False, 'import torch\n'), ((5461, 5480), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (5473, 5480), False, 'import torch\n'), ((7741, 7754), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (7749, 7754), False, 'from torch.autograd import Variable\n'), ((15575, 15594), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (15587, 15594), False, 'import torch\n'), ((18560, 18588), 'torch.from_numpy', 'torch.from_numpy', (['rewards[i]'], {}), '(rewards[i])\n', (18576, 18588), False, 'import torch\n'), ((19093, 19112), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (19105, 19112), False, 'import torch\n'), ((21713, 21740), 'torch.Tensor', 'torch.Tensor', (['batch_actions'], {}), '(batch_actions)\n', (21725, 21740), False, 'import torch\n'), ((22141, 22170), 'torch.Tensor', 'torch.Tensor', (['batch_log_probs'], {}), '(batch_log_probs)\n', (22153, 22170), False, 'import torch\n'), ((6875, 6894), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (6887, 6894), False, 'import torch\n'), ((8444, 8463), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (8456, 8463), False, 'import torch\n'), ((12358, 12377), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (12370, 12377), False, 'import torch\n'), ((13111, 13144), 'torch.from_numpy', 'torch.from_numpy', (['real_rewards[i]'], {}), '(real_rewards[i])\n', (13127, 13144), False, 'import torch\n'), ((17844, 17863), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (17856, 17863), False, 'import torch\n'), ((20059, 20085), 'torch.Tensor', 'torch.Tensor', (['batch_states'], {}), '(batch_states)\n', (20071, 20085), False, 'import torch\n'), ((20137, 20165), 'torch.Tensor', 'torch.Tensor', (['batch_q_values'], {}), '(batch_q_values)\n', (20149, 20165), False, 'import torch\n'), ((21431, 21457), 'torch.Tensor', 'torch.Tensor', (['batch_states'], {}), '(batch_states)\n', (21443, 21457), False, 'import torch\n'), ((21509, 21537), 'torch.Tensor', 'torch.Tensor', (['batch_q_values'], {}), '(batch_q_values)\n', (21521, 21537), False, 'import torch\n'), ((23058, 23091), 'torch.Tensor', 'torch.Tensor', (['batch_expert_states'], {}), '(batch_expert_states)\n', (23070, 23091), False, 'import torch\n'), ((23152, 23186), 'torch.Tensor', 'torch.Tensor', (['batch_expert_actions'], {}), '(batch_expert_actions)\n', (23164, 23186), False, 'import torch\n'), ((6664, 6683), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (6676, 6683), False, 'import torch\n'), ((8233, 8252), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (8245, 8252), False, 'import torch\n'), ((12179, 12192), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (12189, 12192), False, 'import torch\n'), ((12279, 12292), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (12289, 12292), False, 'import torch\n'), ((17782, 17795), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (17792, 17795), False, 'import torch\n')] |
import os
import os.path as osp
import torch
from PIL import Image
import numpy as np
import json
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
import pickle
class Flickr(torch.utils.data.Dataset):
"""`Flickr30k Entities <http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, img_dir, anno_dir, split, transforms=None):
super(Flickr, self).__init__()
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++++")
# print(img_dir)
# print(anno_dir)
# print(split)
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++++")
self.transforms = transforms
self.img_root = img_dir
self.sent_anno = json.load(open(osp.join(anno_dir, 'sent_anno.json'), 'r'))
self.box_anno = json.load(open(osp.join(anno_dir, 'box_anno.json'), 'r'))
self.sg_anno = json.load(open(osp.join(anno_dir, 'sg_anno.json'), 'r'))
# with open(osp.join(anno_dir, 'topN_boxes_mesh_all.pkl'), 'rb') as load_f:
with open(osp.join(anno_dir, 'topN_boxes_mesh.pkl'), 'rb') as load_f:
self.topN_box_anno = pickle.load(load_f)
with open(osp.join(anno_dir, 'object_vocab_elmo_embed.pkl'), 'rb') as load_f:
self.vocab_embed = pickle.load(load_f)
self.vocab_embed = torch.FloatTensor(self.vocab_embed) ## 1600*1024
split_file = open(split, 'r')
data_ids = split_file.readlines()
self.ids = [i.strip() for i in data_ids]
def get_sentence(self, img_id, sent_id):
sent_anno = self.sent_anno[img_id]
select_sent = sent_anno[sent_id]
return select_sent
def get_gt_boxes(self, img_id):
box_anno = self.box_anno[img_id]
gt_boxes = []
box_ids = []
for k, v in box_anno['boxes'].items():
box_ids.append(k)
if len(v) == 1:
gt_boxes.append(v[0])
else:
# when a phrase respond to multiple regions, we take the union of them as paper given
v = np.array(v)
box = [v[:, 0].min(), v[:, 1].min(), v[:, 2].max(), v[:, 3].max()]
gt_boxes.append(box)
gt_boxes = np.array(gt_boxes)
return box_ids, gt_boxes
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
img_id, sent_id = self.ids[index].split('\t')[0], self.ids[index].split('\t')[1]
topN_box = self.topN_box_anno[img_id][int(sent_id)]
filename = os.path.join(self.img_root, img_id+'.jpg')
img = Image.open(filename).convert('RGB')
sent_sg = self.sg_anno[img_id]['relations'][int(sent_id)] #! [relation_num,3]
_, feature_map, precompute_bbox, img_scale, precompute_score, cls_label = self.get_precompute_img_feat(img_id) #! feature_map [1,1024,H,W] precompute_bbox [box_num,4] precompute_score [box_num,] cls_label [box_num.]
precompute_bbox = BoxList(precompute_bbox, img_scale, mode='xyxy') #! please pay attention to the precomputed box's scale, make sure which scale of the precomputed box, original image or resized image? here is resized image
if cfg.MODEL.VG.USE_BOTTOMUP_NMS:
precompute_bbox.add_field("scores", torch.FloatTensor(precompute_score))
precompute_bbox, keep_inds = boxlist_nms(precompute_bbox, cfg.MODEL.VG.BOTTOMUP_NMS_THRESH, require_keep_idx=True)
precompute_score = precompute_score[keep_inds.numpy()]
sentence = self.get_sentence(img_id, int(sent_id))
phrase_ids, gt_boxes = self.get_gt_boxes(img_id) #! phrase_ids [phrase_num,] gt_boxes [phrase_num,4]
target = BoxList(gt_boxes, img.size, mode="xyxy")
vocab_label_elmo = self.vocab_embed[cls_label]
if self.transforms is not None:
img, target, precompute_bbox, img_scale = self.transforms(img, target, precompute_bbox, img_scale)
return None, target, img_id, phrase_ids, sent_id, sentence, precompute_bbox, precompute_score, feature_map, vocab_label_elmo, sent_sg, topN_box #! sentence dict
def get_img_info(self, index):
img_id, sent_id = self.ids[index].split('\t')[0], self.ids[index].split('\t')[1]
box_anno = self.box_anno[img_id]
img_info = {'file_name': os.path.join(self.img_root, img_id+'.jpg'),
'height': box_anno['height'],
'width': box_anno['width'],
'id': img_id}
return img_info
def get_precompute_img_feat(self, img_id):
with open(osp.join('../flickr_datasets/flickr30k_feat_nms/{}.pkl'.format(img_id)), 'rb') as load_f:
res = pickle.load(load_f)
feature_map = torch.FloatTensor(res['features']) ## 1*1024*h*w ## feature map in res4
precompute_bbox = res['boxes'][:, :4]
img_scale = res['img_scale'] ## value to denote the image scale
cls_scores = res['boxes'][:, 4] ## (N,) denote the detection score
cls_label = res['boxes'][:, 5] - 1 ## for MSCOCO 0~80
cls_label = cls_label.astype(np.int32)
return None, feature_map, precompute_bbox, img_scale, cls_scores, cls_label
def get_object_detection_label(self, cls_label):
object_vocab = []
object_vocab_len = []
cls_label = cls_label.astype(np.int32)
for label in cls_label.tolist():
vocab = self.vocab_anno[str(label)].split()
object_vocab.append(vocab)
object_vocab_len.append(len(vocab))
return object_vocab, object_vocab_len
def __len__(self):
return len(self.ids)
| [
"PIL.Image.open",
"pickle.load",
"os.path.join",
"maskrcnn_benchmark.structures.bounding_box.BoxList",
"numpy.array",
"maskrcnn_benchmark.structures.boxlist_ops.boxlist_nms",
"torch.FloatTensor"
] | [((1875, 1910), 'torch.FloatTensor', 'torch.FloatTensor', (['self.vocab_embed'], {}), '(self.vocab_embed)\n', (1892, 1910), False, 'import torch\n'), ((2765, 2783), 'numpy.array', 'np.array', (['gt_boxes'], {}), '(gt_boxes)\n', (2773, 2783), True, 'import numpy as np\n'), ((3197, 3241), 'os.path.join', 'os.path.join', (['self.img_root', "(img_id + '.jpg')"], {}), "(self.img_root, img_id + '.jpg')\n", (3209, 3241), False, 'import os\n'), ((3633, 3681), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['precompute_bbox', 'img_scale'], {'mode': '"""xyxy"""'}), "(precompute_bbox, img_scale, mode='xyxy')\n", (3640, 3681), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((4352, 4392), 'maskrcnn_benchmark.structures.bounding_box.BoxList', 'BoxList', (['gt_boxes', 'img.size'], {'mode': '"""xyxy"""'}), "(gt_boxes, img.size, mode='xyxy')\n", (4359, 4392), False, 'from maskrcnn_benchmark.structures.bounding_box import BoxList\n'), ((5361, 5395), 'torch.FloatTensor', 'torch.FloatTensor', (["res['features']"], {}), "(res['features'])\n", (5378, 5395), False, 'import torch\n'), ((1689, 1708), 'pickle.load', 'pickle.load', (['load_f'], {}), '(load_f)\n', (1700, 1708), False, 'import pickle\n'), ((1827, 1846), 'pickle.load', 'pickle.load', (['load_f'], {}), '(load_f)\n', (1838, 1846), False, 'import pickle\n'), ((4009, 4098), 'maskrcnn_benchmark.structures.boxlist_ops.boxlist_nms', 'boxlist_nms', (['precompute_bbox', 'cfg.MODEL.VG.BOTTOMUP_NMS_THRESH'], {'require_keep_idx': '(True)'}), '(precompute_bbox, cfg.MODEL.VG.BOTTOMUP_NMS_THRESH,\n require_keep_idx=True)\n', (4020, 4098), False, 'from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms\n'), ((4975, 5019), 'os.path.join', 'os.path.join', (['self.img_root', "(img_id + '.jpg')"], {}), "(self.img_root, img_id + '.jpg')\n", (4987, 5019), False, 'import os\n'), ((5318, 5337), 'pickle.load', 'pickle.load', (['load_f'], {}), '(load_f)\n', (5329, 5337), False, 'import pickle\n'), ((1287, 1323), 'os.path.join', 'osp.join', (['anno_dir', '"""sent_anno.json"""'], {}), "(anno_dir, 'sent_anno.json')\n", (1295, 1323), True, 'import os.path as osp\n'), ((1370, 1405), 'os.path.join', 'osp.join', (['anno_dir', '"""box_anno.json"""'], {}), "(anno_dir, 'box_anno.json')\n", (1378, 1405), True, 'import os.path as osp\n'), ((1451, 1485), 'os.path.join', 'osp.join', (['anno_dir', '"""sg_anno.json"""'], {}), "(anno_dir, 'sg_anno.json')\n", (1459, 1485), True, 'import os.path as osp\n'), ((1596, 1637), 'os.path.join', 'osp.join', (['anno_dir', '"""topN_boxes_mesh.pkl"""'], {}), "(anno_dir, 'topN_boxes_mesh.pkl')\n", (1604, 1637), True, 'import os.path as osp\n'), ((1728, 1777), 'os.path.join', 'osp.join', (['anno_dir', '"""object_vocab_elmo_embed.pkl"""'], {}), "(anno_dir, 'object_vocab_elmo_embed.pkl')\n", (1736, 1777), True, 'import os.path as osp\n'), ((2614, 2625), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2622, 2625), True, 'import numpy as np\n'), ((3254, 3274), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3264, 3274), False, 'from PIL import Image\n'), ((3931, 3966), 'torch.FloatTensor', 'torch.FloatTensor', (['precompute_score'], {}), '(precompute_score)\n', (3948, 3966), False, 'import torch\n')] |
"""
Encapsulates the functionality for representing
and operating on the chess environment.
"""
import copy
import enum
from logging import getLogger
import chess.pgn
import numpy as np
logger = getLogger(__name__)
# noinspection PyArgumentList
Winner = enum.Enum("Winner", "black white draw")
# input planes
# noinspection SpellCheckingInspection
pieces_order = 'KQRBNPkqrbnp' # 12x8x8
castling_order = 'KQkq' # 4x8x8
# fifty-move-rule # 1x8x8
# en en_passant # 1x8x8
ind = {pieces_order[i]: i for i in range(12)}
class ChessEnv:
"""
Represents a chess environment where a chess game is played/
Attributes:
:ivar chess.Board board: current board state
:ivar int num_halfmoves: number of half moves performed in total by each player
:ivar Winner winner: winner of the game
:ivar boolean resigned: whether non-winner resigned
:ivar str result: str encoding of the result, 1-0, 0-1, or 1/2-1/2
"""
def __init__(self):
self.board = None
self.num_halfmoves = 0
self.winner = None # type: Winner
self.resigned = False
self.result = None
def reset(self):
"""
Resets to begin a new game
:return ChessEnv: self
"""
self.board = chess.Board()
self.num_halfmoves = 0
self.winner = None
self.resigned = False
return self
def update(self, board):
"""
Like reset, but resets the position to whatever was supplied for board
:param chess.Board board: position to reset to
:return ChessEnv: self
"""
self.board = chess.Board(board)
self.winner = None
self.resigned = False
return self
@property
def done(self):
return self.winner is not None
@property
def white_won(self):
return self.winner == Winner.white
@property
def white_to_move(self):
return self.board.turn == chess.WHITE
def step(self, action: str, check_over=True):
"""
Takes an action and updates the game state
:param str action: action to take in uci notation
:param boolean check_over: whether to check if game is over
"""
if check_over and action is None:
self._resign()
return
self.board.push_uci(action)
self.num_halfmoves += 1
if check_over and self.board.result(claim_draw=True) != "*":
self._game_over()
def _game_over(self):
if self.winner is None:
self.result = self.board.result(claim_draw=True)
if self.result == '1-0':
self.winner = Winner.white
elif self.result == '0-1':
self.winner = Winner.black
else:
self.winner = Winner.draw
def _resign(self):
self.resigned = True
if self.white_to_move: # WHITE RESIGNED!
self.winner = Winner.black
self.result = "0-1"
else:
self.winner = Winner.white
self.result = "1-0"
def adjudicate(self):
score = self.testeval(absolute=True)
if abs(score) < 0.01:
self.winner = Winner.draw
self.result = "1/2-1/2"
elif score > 0:
self.winner = Winner.white
self.result = "1-0"
else:
self.winner = Winner.black
self.result = "0-1"
def ending_average_game(self):
self.winner = Winner.draw
self.result = "1/2-1/2"
def copy(self):
env = copy.copy(self)
env.board = copy.copy(self.board)
return env
def render(self):
print("\n")
print(self.board)
print("\n")
@property
def observation(self):
return self.board.fen()
def deltamove(self, fen_next):
moves = list(self.board.legal_moves)
for mov in moves:
self.board.push(mov)
fee = self.board.fen()
self.board.pop()
if fee == fen_next:
return mov.uci()
return None
def replace_tags(self):
return replace_tags_board(self.board.fen())
def canonical_input_planes(self):
"""
:return: a representation of the board using an (18, 8, 8) shape, good as input to a policy / value network
"""
return canon_input_planes(self.board.fen())
def testeval(self, absolute=False) -> float:
return testeval(self.board.fen(), absolute)
def testeval(fen, absolute=False) -> float:
piece_vals = {'K': 3, 'Q': 14, 'R': 5, 'B': 3.25, 'N': 3, 'P': 1} # somehow it doesn't know how to keep its queen
ans = 0.0
tot = 0
for c in fen.split(' ')[0]:
if not c.isalpha():
continue
if c.isupper():
ans += piece_vals[c]
tot += piece_vals[c]
else:
ans -= piece_vals[c.upper()]
tot += piece_vals[c.upper()]
v = ans / tot
if not absolute and is_black_turn(fen):
v = -v
assert abs(v) < 1
return np.tanh(v * 3) # arbitrary
def check_current_planes(realfen, planes):
cur = planes[0:12]
assert cur.shape == (12, 8, 8)
fakefen = ["1"] * 64
for i in range(12):
for rank in range(8):
for file in range(8):
if cur[i][rank][file] == 1:
assert fakefen[rank * 8 + file] == '1'
fakefen[rank * 8 + file] = pieces_order[i]
castling = planes[12:16]
fiftymove = planes[16][0][0]
ep = planes[17]
castlingstring = ""
for i in range(4):
if castling[i][0][0] == 1:
castlingstring += castling_order[i]
if len(castlingstring) == 0:
castlingstring = '-'
epstr = "-"
for rank in range(8):
for file in range(8):
if ep[rank][file] == 1:
epstr = coord_to_alg((rank, file))
realfen = maybe_flip_fen(realfen, flip=is_black_turn(realfen))
realparts = realfen.split(' ')
assert realparts[1] == 'w'
assert realparts[2] == castlingstring
assert realparts[3] == epstr
assert int(realparts[4]) == fiftymove
# realparts[5] is the fifty-move clock, discard that
return "".join(fakefen) == replace_tags_board(realfen)
def canon_input_planes(fen):
"""
:param fen:
:return : (18, 8, 8) representation of the game state
"""
fen = maybe_flip_fen(fen, is_black_turn(fen))
return all_input_planes(fen)
def all_input_planes(fen):
current_aux_planes = aux_planes(fen)
history_both = to_planes(fen)
ret = np.vstack((history_both, current_aux_planes))
assert ret.shape == (18, 8, 8)
return ret
def maybe_flip_fen(fen, flip=False):
if not flip:
return fen
foo = fen.split(' ')
rows = foo[0].split('/')
def swapcase(a):
if a.isalpha():
return a.lower() if a.isupper() else a.upper()
return a
def swapall(aa):
return "".join([swapcase(a) for a in aa])
return "/".join([swapall(row) for row in reversed(rows)]) \
+ " " + ('w' if foo[1] == 'b' else 'b') \
+ " " + "".join(sorted(swapall(foo[2]))) \
+ " " + foo[3] + " " + foo[4] + " " + foo[5]
def aux_planes(fen):
foo = fen.split(' ')
en_passant = np.zeros((8, 8), dtype=np.float32)
if foo[3] != '-':
eps = alg_to_coord(foo[3])
en_passant[eps[0]][eps[1]] = 1
fifty_move_count = int(foo[4])
fifty_move = np.full((8, 8), fifty_move_count, dtype=np.float32)
castling = foo[2]
auxiliary_planes = [np.full((8, 8), int('K' in castling), dtype=np.float32),
np.full((8, 8), int('Q' in castling), dtype=np.float32),
np.full((8, 8), int('k' in castling), dtype=np.float32),
np.full((8, 8), int('q' in castling), dtype=np.float32),
fifty_move,
en_passant]
ret = np.asarray(auxiliary_planes, dtype=np.float32)
assert ret.shape == (6, 8, 8)
return ret
# FEN board is like this:
# a8 b8 .. h8
# a7 b7 .. h7
# .. .. .. ..
# a1 b1 .. h1
#
# FEN string is like this:
# 0 1 .. 7
# 8 9 .. 15
# .. .. .. ..
# 56 57 .. 63
# my planes are like this:
# 00 01 .. 07
# 10 11 .. 17
# .. .. .. ..
# 70 71 .. 77
#
def alg_to_coord(alg):
rank = 8 - int(alg[1]) # 0-7
file = ord(alg[0]) - ord('a') # 0-7
return rank, file
def coord_to_alg(coord):
letter = chr(ord('a') + coord[1])
number = str(8 - coord[0])
return letter + number
def to_planes(fen):
board_state = replace_tags_board(fen)
pieces_both = np.zeros(shape=(12, 8, 8), dtype=np.float32)
for rank in range(8):
for file in range(8):
v = board_state[rank * 8 + file]
if v.isalpha():
pieces_both[ind[v]][rank][file] = 1
assert pieces_both.shape == (12, 8, 8)
return pieces_both
def replace_tags_board(board_san):
board_san = board_san.split(" ")[0]
board_san = board_san.replace("2", "11")
board_san = board_san.replace("3", "111")
board_san = board_san.replace("4", "1111")
board_san = board_san.replace("5", "11111")
board_san = board_san.replace("6", "111111")
board_san = board_san.replace("7", "1111111")
board_san = board_san.replace("8", "11111111")
return board_san.replace("/", "")
def is_black_turn(fen):
return fen.split(" ")[1] == 'b'
| [
"logging.getLogger",
"numpy.asarray",
"numpy.tanh",
"numpy.zeros",
"numpy.vstack",
"enum.Enum",
"numpy.full",
"copy.copy"
] | [((197, 216), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (206, 216), False, 'from logging import getLogger\n'), ((257, 296), 'enum.Enum', 'enum.Enum', (['"""Winner"""', '"""black white draw"""'], {}), "('Winner', 'black white draw')\n", (266, 296), False, 'import enum\n'), ((5127, 5141), 'numpy.tanh', 'np.tanh', (['(v * 3)'], {}), '(v * 3)\n', (5134, 5141), True, 'import numpy as np\n'), ((6662, 6707), 'numpy.vstack', 'np.vstack', (['(history_both, current_aux_planes)'], {}), '((history_both, current_aux_planes))\n', (6671, 6707), True, 'import numpy as np\n'), ((7375, 7409), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {'dtype': 'np.float32'}), '((8, 8), dtype=np.float32)\n', (7383, 7409), True, 'import numpy as np\n'), ((7559, 7610), 'numpy.full', 'np.full', (['(8, 8)', 'fifty_move_count'], {'dtype': 'np.float32'}), '((8, 8), fifty_move_count, dtype=np.float32)\n', (7566, 7610), True, 'import numpy as np\n'), ((8041, 8087), 'numpy.asarray', 'np.asarray', (['auxiliary_planes'], {'dtype': 'np.float32'}), '(auxiliary_planes, dtype=np.float32)\n', (8051, 8087), True, 'import numpy as np\n'), ((8720, 8764), 'numpy.zeros', 'np.zeros', ([], {'shape': '(12, 8, 8)', 'dtype': 'np.float32'}), '(shape=(12, 8, 8), dtype=np.float32)\n', (8728, 8764), True, 'import numpy as np\n'), ((3614, 3629), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (3623, 3629), False, 'import copy\n'), ((3650, 3671), 'copy.copy', 'copy.copy', (['self.board'], {}), '(self.board)\n', (3659, 3671), False, 'import copy\n')] |
import matplotlib.pyplot as plt
import numpy as np
from beprof import profile
import os
data_sets = {}
profiles = []
values = []
files = os.listdir('.')
plot_data_files = []
positions, weights = np.loadtxt("result.dat", delimiter=";", usecols=(0, 1), unpack=True)
print(positions, weights)
weights = weights[::-1]
for ff in files:
if "mm.dat" in ff:
plot_data_files.append(ff)
print(plot_data_files)
idx = 0
for datafile in plot_data_files:
print("\nFile {0}:".format(datafile))
dose_name = datafile.strip('.dat')
dose_range = datafile.strip('mm.dat')
dose_range = float(dose_range) * 10
print("Processing data for: %s [mm]" % dose_range)
data_sets[dose_name] = np.loadtxt(datafile)
print("Max = ", data_sets[dose_name][:, 1].max())
data_sets[dose_name][:, 1] /= data_sets[dose_name][:, 1].max()
data_sets[dose_name][:, 0] *= 10
tmp_prof = profile.Profile(data_sets[dose_name][:, :2])
profiles.append(tmp_prof)
plt.plot(tmp_prof.x, tmp_prof.y * weights[idx])
values.append(tmp_prof.y * weights[idx])
idx += 1
tmp_sum = sum(values)
plt.plot(tmp_prof.x, tmp_sum)
plt.xlim([0, 16])
plt.title("Symulacja modulatora r15 m15")
plt.xlabel("Zasięg w wodzie [mm]")
plt.ylabel("Relatywna dawka")
plt.show()
| [
"os.listdir",
"matplotlib.pyplot.xlim",
"beprof.profile.Profile",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((139, 154), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (149, 154), False, 'import os\n'), ((198, 266), 'numpy.loadtxt', 'np.loadtxt', (['"""result.dat"""'], {'delimiter': '""";"""', 'usecols': '(0, 1)', 'unpack': '(True)'}), "('result.dat', delimiter=';', usecols=(0, 1), unpack=True)\n", (208, 266), True, 'import numpy as np\n'), ((1115, 1144), 'matplotlib.pyplot.plot', 'plt.plot', (['tmp_prof.x', 'tmp_sum'], {}), '(tmp_prof.x, tmp_sum)\n', (1123, 1144), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1163), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 16]'], {}), '([0, 16])\n', (1154, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1206), 'matplotlib.pyplot.title', 'plt.title', (['"""Symulacja modulatora r15 m15"""'], {}), "('Symulacja modulatora r15 m15')\n", (1174, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1241), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Zasięg w wodzie [mm]"""'], {}), "('Zasięg w wodzie [mm]')\n", (1217, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1271), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relatywna dawka"""'], {}), "('Relatywna dawka')\n", (1252, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1282), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1280, 1282), True, 'import matplotlib.pyplot as plt\n'), ((705, 725), 'numpy.loadtxt', 'np.loadtxt', (['datafile'], {}), '(datafile)\n', (715, 725), True, 'import numpy as np\n'), ((905, 949), 'beprof.profile.Profile', 'profile.Profile', (['data_sets[dose_name][:, :2]'], {}), '(data_sets[dose_name][:, :2])\n', (920, 949), False, 'from beprof import profile\n'), ((985, 1032), 'matplotlib.pyplot.plot', 'plt.plot', (['tmp_prof.x', '(tmp_prof.y * weights[idx])'], {}), '(tmp_prof.x, tmp_prof.y * weights[idx])\n', (993, 1032), True, 'import matplotlib.pyplot as plt\n')] |
import cv2
import numpy as np
from .image import Image
RADIUS = 1324
CENTER_X = 2184
CENTER_Y = 1456
class CloudCoverage:
"""Cloud cover index calculation related functions.
"""
@classmethod
def get_mask(cls, image):
"""Return the transparency mask for the pixels outside of de image
circunference. Calculates the circumference of the image that can
be used for de calculus of cloud coverage by using the radius of
the circle, calculates the distance from the center of every pixel
and creates a mask that makes the pixels outside of the circle
completly translucid by setting to 0 all of its values in BGRA format.
Args:
image: A numpy ndarray. The array that contains all the pixels of the image.
Returns:
The mask of translucid pixels outside of the circle.
"""
rows, cols, _ = image.shape
y, x = np.ogrid[:rows, :cols]
distance_from_center = np.sqrt((x - CENTER_X) ** 2 + (y - CENTER_Y) ** 2)
mask = distance_from_center > RADIUS
return mask
@classmethod
def apply_mask(cls, image):
"""Return the modified mumpy ndarray by aplying the mask of translucid
pixels to it, the pixels outside of the circle will be translucid by
setting the pixel in the mask to 0.
Args:
image: A numpy ndarray. The array that contains all the pixels of the image.
Returns:
numpy.ndarray: The modified ndarray with the pixels outside of the circle translucid.
"""
image[cls.get_mask(image)] = 0
return image
@classmethod
def classify(cls, image):
"""Return the modified mumpy ndarray where all pixels have been
setting to black or white by obtaining the original BGRA format of
every pixel in the array and dividing its red value and its blue value,
the result of the division will be a mask for the ndarray;
if the division of red and blue values
of a pixel its greater than or equal to 0.95, the pixel will be set to
black, other case the pixel will be set to white, for all the pixels in
the array that are not translucid.
Args:
image: A numpy ndarray. The array that contains all the pixels of the image.
Returns:
numpy.ndarray: A numpy ndarray.
The modified array whith the pixels inside of the circle setting to black or
white depending of its ratio red/blue.
"""
blue, red, alpha = image[:, :, 0], image[:, :, 2], image[:, :, 3]
ratios = np.divide(red, blue, out=np.ones(red.shape, dtype=float), where=blue != 0)
mask = ratios >= 0.95
image[(alpha != 0) & mask] = [255,255,255,255]
image[(alpha != 0) & ~mask] = [0,0,0,255]
return image
@classmethod
def ratio(cls, image):
"""Return the cloud cover index by dividing the number of pixels that
are cloud and the total number of pixels in the circle.
Args:
image: A numpy ndarray. The array that contains all the pixels of the image.
Returns:
int: Cloud cover index.
The result of dividing the cloud pixels and the total pixels in the circle.
"""
sky = np.count_nonzero(np.all(image==[0, 0, 0, 255],axis=2))
cloud = np.count_nonzero(np.all(image==[255, 255, 255, 255],axis=2))
return cloud / (cloud + sky)
@classmethod
def compute(cls, image):
"""Return a tuple with cloud cover index and the new image with its
cloud pixels in white, the sky pixels in black and the pixels out of the
circle settint translucid.
Args:
image: An image object containing the pixels as a numpy ndarray.
Returns:
(int, numpy.ndarray): Cloud cover index and the modified image.
The result of dividing the cloud pixels and the total pixels in
the circle, and a copy of the image with black and white pixels
correspondig to sky and cloud.
"""
modified_image = image.pixels.copy()
modified_image = cls.apply_mask(image.pixels)
modified_image = cls.classify(modified_image)
count = cls.ratio(modified_image)
new_image = Image(modified_image)
return (count, new_image)
| [
"numpy.all",
"numpy.sqrt",
"numpy.ones"
] | [((988, 1038), 'numpy.sqrt', 'np.sqrt', (['((x - CENTER_X) ** 2 + (y - CENTER_Y) ** 2)'], {}), '((x - CENTER_X) ** 2 + (y - CENTER_Y) ** 2)\n', (995, 1038), True, 'import numpy as np\n'), ((3363, 3402), 'numpy.all', 'np.all', (['(image == [0, 0, 0, 255])'], {'axis': '(2)'}), '(image == [0, 0, 0, 255], axis=2)\n', (3369, 3402), True, 'import numpy as np\n'), ((3434, 3479), 'numpy.all', 'np.all', (['(image == [255, 255, 255, 255])'], {'axis': '(2)'}), '(image == [255, 255, 255, 255], axis=2)\n', (3440, 3479), True, 'import numpy as np\n'), ((2677, 2708), 'numpy.ones', 'np.ones', (['red.shape'], {'dtype': 'float'}), '(red.shape, dtype=float)\n', (2684, 2708), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import seaborn as sns
import random
import operator as op
import numpy as np
data = {}
names = [('human', 174, 5), ('ape', 150, 10)]
for name, mu, sigma in names:
trial = []
trial_length = random.randint(200, 500)
for _ in range(trial_length):
trial.append(random.randint(0, 1000))
# data.append(trial)
data[name] = list(np.random.normal(mu, sigma, trial_length))
for key, values in data.items():
print(key)
p025 = np.percentile(values, 25)
p075 = np.percentile(values, 75)
p_dist = p075 - p025
print("\t[{}, {}]".format(p025, p075))
print("\tlower whisker: {}".format(p025 - p_dist * 1.5))
print("\tupper whisker: {}".format(p075 + p_dist * 1.5))
sorted_keys, sorted_vals = zip(*sorted(data.items(), key=op.itemgetter(1)))
ax = sns.violinplot(data=sorted_vals, orient="h", palette="Set2")
# category labels
sns.plt.yticks(sns.plt.yticks()[0], sorted_keys)
ax.set(xlabel='Size in cm')
sns.plt.savefig("violinplot.png")
| [
"numpy.random.normal",
"operator.itemgetter",
"seaborn.violinplot",
"numpy.percentile",
"seaborn.plt.yticks",
"random.randint",
"seaborn.plt.savefig"
] | [((813, 873), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'sorted_vals', 'orient': '"""h"""', 'palette': '"""Set2"""'}), "(data=sorted_vals, orient='h', palette='Set2')\n", (827, 873), True, 'import seaborn as sns\n'), ((969, 1002), 'seaborn.plt.savefig', 'sns.plt.savefig', (['"""violinplot.png"""'], {}), "('violinplot.png')\n", (984, 1002), True, 'import seaborn as sns\n'), ((221, 245), 'random.randint', 'random.randint', (['(200)', '(500)'], {}), '(200, 500)\n', (235, 245), False, 'import random\n'), ((477, 502), 'numpy.percentile', 'np.percentile', (['values', '(25)'], {}), '(values, 25)\n', (490, 502), True, 'import numpy as np\n'), ((514, 539), 'numpy.percentile', 'np.percentile', (['values', '(75)'], {}), '(values, 75)\n', (527, 539), True, 'import numpy as np\n'), ((374, 415), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'trial_length'], {}), '(mu, sigma, trial_length)\n', (390, 415), True, 'import numpy as np\n'), ((907, 923), 'seaborn.plt.yticks', 'sns.plt.yticks', ([], {}), '()\n', (921, 923), True, 'import seaborn as sns\n'), ((302, 325), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (316, 325), False, 'import random\n'), ((788, 804), 'operator.itemgetter', 'op.itemgetter', (['(1)'], {}), '(1)\n', (801, 804), True, 'import operator as op\n')] |
import numpy as np
def reorder_south2north(data, lat):
# if latitude is not indexed from SP to NP, then reorder
if lat[0]>lat[1]:
lat = lat[::-1]
data = data[::-1]
return data, lat
def get_itczposition_adam(pr, lat, latboundary, dlat):
pr, lat = reorder_south2north(pr, lat)
# interpolate lat and pr on dlat grid
lati = np.arange(-latboundary, latboundary, dlat)
pri = np.interp(lati, lat, pr)
areai = np.cos(lati*np.pi/180)
return np.nansum(lati * areai * pri) / np.nansum(areai * pri)
def test_itczposition_adam(pr, lat, latboundary, dlat):
pr, lat = reorder_south2north(pr, lat)
# interpolate lat and pr on dlat grid
lati = np.arange(-latboundary, latboundary, dlat)
pri = np.interp(lati, lat, pr)
areai = np.cos(lati*np.pi/180)
# calculate itcz position according to Adam
itcz = get_itczposition_adam(pr, lat, latboundary, dlat)
# lat index corresponding to itcz
ilati = np.argmin(np.abs(itcz - lati))
aux1=np.abs(np.nansum((lati[0:ilati+1]-itcz)*pri[0:ilati+1]*areai[0:ilati+1]))
aux2=np.abs(np.nansum((lati[ilati+1:]-itcz)*pri[ilati+1:]*areai[ilati+1:]))
return aux1, aux2
def get_itczposition_voigt(pr, lat, latboundary, dlat):
pr, lat = reorder_south2north(pr, lat)
# interpolate lat and pr on dlat grid
lati = np.arange(-latboundary, latboundary, dlat)
pri = np.interp(lati, lat, pr)
areai = np.cos(lati*np.pi/180)
# area-integrated precip (up to constant factor)
tot = np.sum(pri*areai)
# integrated pri from southern latboundary to lati
pri_int = np.zeros(lati.size) + np.nan
for j in range(0, lati.size):
pri_int[j] = np.sum(pri[0:j+1]*areai[0:j+1])
# itcz is where integrated pri is 0.5 of total area-integrated pri
return lati[np.argmin(np.abs(pri_int - 0.5*tot))]
def test_itczposition_voigt(pr, lat, latboundary, dlat):
pr, lat = reorder_south2north(pr, lat)
# interpolate lat and pr on dlat grid
lati = np.arange(-latboundary, latboundary, dlat)
pri = np.interp(lati, lat, pr)
areai = np.cos(lati*np.pi/180)
# calculate itcz position according to Adam
itcz = get_itczposition_voigt(pr, lat, latboundary, dlat)
# lat index corresponding to itcz
ilati = np.argmin(np.abs(itcz - lati))
aux1=np.nansum(pri[0:ilati+1]*areai[0:ilati+1])
aux2=np.nansum(pri[ilati+1:]*areai[ilati+1:])
return aux1, aux2 | [
"numpy.abs",
"numpy.sum",
"numpy.zeros",
"numpy.cos",
"numpy.interp",
"numpy.nansum",
"numpy.arange"
] | [((364, 406), 'numpy.arange', 'np.arange', (['(-latboundary)', 'latboundary', 'dlat'], {}), '(-latboundary, latboundary, dlat)\n', (373, 406), True, 'import numpy as np\n'), ((419, 443), 'numpy.interp', 'np.interp', (['lati', 'lat', 'pr'], {}), '(lati, lat, pr)\n', (428, 443), True, 'import numpy as np\n'), ((456, 482), 'numpy.cos', 'np.cos', (['(lati * np.pi / 180)'], {}), '(lati * np.pi / 180)\n', (462, 482), True, 'import numpy as np\n'), ((699, 741), 'numpy.arange', 'np.arange', (['(-latboundary)', 'latboundary', 'dlat'], {}), '(-latboundary, latboundary, dlat)\n', (708, 741), True, 'import numpy as np\n'), ((754, 778), 'numpy.interp', 'np.interp', (['lati', 'lat', 'pr'], {}), '(lati, lat, pr)\n', (763, 778), True, 'import numpy as np\n'), ((791, 817), 'numpy.cos', 'np.cos', (['(lati * np.pi / 180)'], {}), '(lati * np.pi / 180)\n', (797, 817), True, 'import numpy as np\n'), ((1352, 1394), 'numpy.arange', 'np.arange', (['(-latboundary)', 'latboundary', 'dlat'], {}), '(-latboundary, latboundary, dlat)\n', (1361, 1394), True, 'import numpy as np\n'), ((1407, 1431), 'numpy.interp', 'np.interp', (['lati', 'lat', 'pr'], {}), '(lati, lat, pr)\n', (1416, 1431), True, 'import numpy as np\n'), ((1444, 1470), 'numpy.cos', 'np.cos', (['(lati * np.pi / 180)'], {}), '(lati * np.pi / 180)\n', (1450, 1470), True, 'import numpy as np\n'), ((1530, 1549), 'numpy.sum', 'np.sum', (['(pri * areai)'], {}), '(pri * areai)\n', (1536, 1549), True, 'import numpy as np\n'), ((2017, 2059), 'numpy.arange', 'np.arange', (['(-latboundary)', 'latboundary', 'dlat'], {}), '(-latboundary, latboundary, dlat)\n', (2026, 2059), True, 'import numpy as np\n'), ((2072, 2096), 'numpy.interp', 'np.interp', (['lati', 'lat', 'pr'], {}), '(lati, lat, pr)\n', (2081, 2096), True, 'import numpy as np\n'), ((2109, 2135), 'numpy.cos', 'np.cos', (['(lati * np.pi / 180)'], {}), '(lati * np.pi / 180)\n', (2115, 2135), True, 'import numpy as np\n'), ((2333, 2381), 'numpy.nansum', 'np.nansum', (['(pri[0:ilati + 1] * areai[0:ilati + 1])'], {}), '(pri[0:ilati + 1] * areai[0:ilati + 1])\n', (2342, 2381), True, 'import numpy as np\n'), ((2385, 2431), 'numpy.nansum', 'np.nansum', (['(pri[ilati + 1:] * areai[ilati + 1:])'], {}), '(pri[ilati + 1:] * areai[ilati + 1:])\n', (2394, 2431), True, 'import numpy as np\n'), ((490, 519), 'numpy.nansum', 'np.nansum', (['(lati * areai * pri)'], {}), '(lati * areai * pri)\n', (499, 519), True, 'import numpy as np\n'), ((522, 544), 'numpy.nansum', 'np.nansum', (['(areai * pri)'], {}), '(areai * pri)\n', (531, 544), True, 'import numpy as np\n'), ((984, 1003), 'numpy.abs', 'np.abs', (['(itcz - lati)'], {}), '(itcz - lati)\n', (990, 1003), True, 'import numpy as np\n'), ((1021, 1098), 'numpy.nansum', 'np.nansum', (['((lati[0:ilati + 1] - itcz) * pri[0:ilati + 1] * areai[0:ilati + 1])'], {}), '((lati[0:ilati + 1] - itcz) * pri[0:ilati + 1] * areai[0:ilati + 1])\n', (1030, 1098), True, 'import numpy as np\n'), ((1104, 1178), 'numpy.nansum', 'np.nansum', (['((lati[ilati + 1:] - itcz) * pri[ilati + 1:] * areai[ilati + 1:])'], {}), '((lati[ilati + 1:] - itcz) * pri[ilati + 1:] * areai[ilati + 1:])\n', (1113, 1178), True, 'import numpy as np\n'), ((1617, 1636), 'numpy.zeros', 'np.zeros', (['lati.size'], {}), '(lati.size)\n', (1625, 1636), True, 'import numpy as np\n'), ((1701, 1738), 'numpy.sum', 'np.sum', (['(pri[0:j + 1] * areai[0:j + 1])'], {}), '(pri[0:j + 1] * areai[0:j + 1])\n', (1707, 1738), True, 'import numpy as np\n'), ((2303, 2322), 'numpy.abs', 'np.abs', (['(itcz - lati)'], {}), '(itcz - lati)\n', (2309, 2322), True, 'import numpy as np\n'), ((1830, 1857), 'numpy.abs', 'np.abs', (['(pri_int - 0.5 * tot)'], {}), '(pri_int - 0.5 * tot)\n', (1836, 1857), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
hycom.py
Functions for dealing with the HYCOM model for importation into ROMS
Written by <NAME> on 07/24/15
Copyright (c)2020 University of Hawaii under the MIT-License.
"""
import numpy as np
from datetime import datetime
import netCDF4
from seapy.lib import default_epoch, chunker
from seapy.model.grid import asgrid
from seapy.roms import ncgen, num2date
# _url = "http://tds.hycom.org/thredds/dodsC/GLBu0.08/expt_19.1/2010"
_url = "http://tds.hycom.org/thredds/dodsC/GLBu0.08/expt_91.1"
_maxrecs = 5
def load_history(filename,
start_time=datetime(1, 1, 1),
end_time=datetime(1, 1, 1),
grid=None,
epoch=default_epoch, url=_url, load_data=False):
"""
Download HYCOM data and save into local file
Parameters
----------
filename: string
name of output file
start_time: datetime
starting date to load HYCOM data
end_time: datetime
ending date for loading HYCOM data
grid: seapy.model.grid, optional
if specified, only load SODA data that covers the grid
epoch: datetime, optional
reference time for new file
url: string, optional
URL to load SODA data from
load_data: bool, optional
If true actually load the data. If false (default), it
displays the information needed to load the data using ncks
Returns
-------
None
"""
# Load the grid
grid = asgrid(grid)
# Open the HYCOM data
hycom = netCDF4.Dataset(url)
# Figure out the time records that are required
hycom_time = num2date(hycom, "time")
time_list = np.where(np.logical_and(hycom_time >= start_time,
hycom_time <= end_time))
if not np.any(time_list):
raise Exception("Cannot find valid times")
# Get the latitude and longitude ranges
minlat = np.min(grid.lat_rho) - 0.5
maxlat = np.max(grid.lat_rho) + 0.5
minlon = np.min(grid.lon_rho) - 0.5
maxlon = np.max(grid.lon_rho) + 0.5
hycom_lon = hycom.variables["lon"][:]
hycom_lat = hycom.variables["lat"][:]
# Ensure same convention
if not grid.east():
hycom_lon[hycom_lon > 180] -= 360
latlist = np.where(np.logical_and(hycom_lat >= minlat,
hycom_lat <= maxlat))
lonlist = np.where(np.logical_and(hycom_lon >= minlon,
hycom_lon <= maxlon))
if not np.any(latlist) or not np.any(lonlist):
raise Exception("Bounds not found")
# Build the history file
if load_data:
his = ncgen.create_zlevel(filename, len(latlist[0]),
len(lonlist[0]),
len(hycom.variables["depth"][:]), epoch,
"HYCOM history from " + url, dims=1)
# Write out the data
his.variables["lat"][:] = hycom_lat[latlist]
his.variables["lon"][:] = hycom_lon[lonlist]
his.variables["depth"][:] = hycom.variables["depth"]
his.variables["time"][:] = seapy.roms.date2num(
hycom_time[time_list], his, 'time')
# Loop over the variables
hycomvars = {"surf_el": 3, "water_u": 4, "water_v": 4, "water_temp": 4,
"salinity": 4}
hisvars = {"surf_el": "zeta", "water_u": "u", "water_v": "v",
"water_temp": "temp", "salinity": "salt"}
if not load_data:
print("ncks -v {:s} -d time,{:d},{:d} -d lat,{:d},{:d} -d lon,{:d},{:d} {:s} {:s}".format(
",".join(hycomvars.keys()),
time_list[0][0], time_list[0][-1], latlist[0][0],
latlist[0][-1], lonlist[0][0], lonlist[0][-1], url, filename))
else:
for rn, recs in enumerate(chunker(time_list[0], _maxrecs)):
print("{:s}-{:s}: ".format(hycom_time[recs[0]].strftime("%m/%d/%Y"),
hycom_time[recs[-1]].strftime("%m/%d/%Y")),
end='', flush=True)
for var in hycomvars:
print("{:s} ".format(var), end='', flush=True)
hisrange = np.arange(
rn * _maxrecs, (rn * _maxrecs) + len(recs))
if hycomvars[var] == 3:
his.variables[hisvars[var]][hisrange, :, :] = \
hycom.variables[var][recs, latlist[0], lonlist[0]].filled(
fill_value=9.99E10)
else:
his.variables[hisvars[var]][hisrange, :, :, :] = \
hycom.variables[var][recs, :, latlist[0],
lonlist[0]].filled(fill_value=9.99E10)
his.sync()
print("", flush=True)
pass
| [
"datetime.datetime",
"seapy.roms.num2date",
"numpy.logical_and",
"seapy.model.grid.asgrid",
"netCDF4.Dataset",
"numpy.any",
"numpy.max",
"numpy.min",
"seapy.lib.chunker"
] | [((600, 617), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (608, 617), False, 'from datetime import datetime\n'), ((645, 662), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (653, 662), False, 'from datetime import datetime\n'), ((1488, 1500), 'seapy.model.grid.asgrid', 'asgrid', (['grid'], {}), '(grid)\n', (1494, 1500), False, 'from seapy.model.grid import asgrid\n'), ((1540, 1560), 'netCDF4.Dataset', 'netCDF4.Dataset', (['url'], {}), '(url)\n', (1555, 1560), False, 'import netCDF4\n'), ((1631, 1654), 'seapy.roms.num2date', 'num2date', (['hycom', '"""time"""'], {}), "(hycom, 'time')\n", (1639, 1654), False, 'from seapy.roms import ncgen, num2date\n'), ((1681, 1745), 'numpy.logical_and', 'np.logical_and', (['(hycom_time >= start_time)', '(hycom_time <= end_time)'], {}), '(hycom_time >= start_time, hycom_time <= end_time)\n', (1695, 1745), True, 'import numpy as np\n'), ((1798, 1815), 'numpy.any', 'np.any', (['time_list'], {}), '(time_list)\n', (1804, 1815), True, 'import numpy as np\n'), ((1926, 1946), 'numpy.min', 'np.min', (['grid.lat_rho'], {}), '(grid.lat_rho)\n', (1932, 1946), True, 'import numpy as np\n'), ((1966, 1986), 'numpy.max', 'np.max', (['grid.lat_rho'], {}), '(grid.lat_rho)\n', (1972, 1986), True, 'import numpy as np\n'), ((2006, 2026), 'numpy.min', 'np.min', (['grid.lon_rho'], {}), '(grid.lon_rho)\n', (2012, 2026), True, 'import numpy as np\n'), ((2046, 2066), 'numpy.max', 'np.max', (['grid.lon_rho'], {}), '(grid.lon_rho)\n', (2052, 2066), True, 'import numpy as np\n'), ((2277, 2333), 'numpy.logical_and', 'np.logical_and', (['(hycom_lat >= minlat)', '(hycom_lat <= maxlat)'], {}), '(hycom_lat >= minlat, hycom_lat <= maxlat)\n', (2291, 2333), True, 'import numpy as np\n'), ((2396, 2452), 'numpy.logical_and', 'np.logical_and', (['(hycom_lon >= minlon)', '(hycom_lon <= maxlon)'], {}), '(hycom_lon >= minlon, hycom_lon <= maxlon)\n', (2410, 2452), True, 'import numpy as np\n'), ((2503, 2518), 'numpy.any', 'np.any', (['latlist'], {}), '(latlist)\n', (2509, 2518), True, 'import numpy as np\n'), ((2526, 2541), 'numpy.any', 'np.any', (['lonlist'], {}), '(lonlist)\n', (2532, 2541), True, 'import numpy as np\n'), ((3799, 3830), 'seapy.lib.chunker', 'chunker', (['time_list[0]', '_maxrecs'], {}), '(time_list[0], _maxrecs)\n', (3806, 3830), False, 'from seapy.lib import default_epoch, chunker\n')] |
# -*- coding: utf-8 -*-
import sys, os
sys.path.insert(0, os.path.abspath('../..'))
import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt
class ClassicalOperators(unittest.TestCase):
"""Test cases for CVRP problem."""
grid_search = False
def test_1(self):
"""
Chromosome: sequence of clients separed by an 'X' when a new vehicle is assigned
Selection: roulette-wheel
Crossover operator: one-point
Mutation operator: permutation
Elitism is enabled
Termination criteria: number of generations = 100
Parameters:
population_size: 4096
reproduction rate: 0.375
crossover rate: 0
mutation rate: 0.625
"""
fname = './input/A-n32-k5.vrp'
nodes, capacity, distances, demand = self.load_test(fname)
individual_factory = cvrp.CVRPIndividualFactory(nodes, capacity, distances, demand, 'classical')
termination_criteria = ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100)
solver = ga.GeneticAlgorithm(individual_factory, population_size=4096, reproduction=0.375, crossover=0.0, mutation=0.625, elitism=True, termination_criteria=termination_criteria)
if self.grid_search:
params = {
"population_size": numpy.logspace(3, 12, base=2, num=6, dtype=int),
"operators_rate": map(lambda x: (x[0], 0.0, x[1]), filter(lambda x: sum(x) == 1.0, itertools.product(numpy.arange(.125, 0.875, .125), repeat=2))),
"elitism": [True],
"termination_criteria": [ ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100) ]
}
grid = grid_search.GridSearch(solver, params)
grid.search(0.0)
grid_scores = grid.get_grid_scores()
fname = './results/classical_operators/A-n32-k5.vrp.grid.csv'
grid_scores.to_csv(fname, sep=',', index=False)
sys.stdout.write("Finished. Results are at: ./results/classical_operators/A-n32-k5.vrp.grid.csv\n")
else:
sys.stdout.write("Starting test_1: CLASSICAL OPERATORS, ELITISM ENABLED\n")
sys.stdout.write("Input: ./tests/cvrp/A-n32-k5.vrp\n")
solver.init_population()
solver.evolve()
info = solver.get_generation_info()
fname = './results/classical_operators/A-n32-k5.vrp.csv'
info.to_csv(fname, sep=',', index=False)
plt.plot(info['generation'], info['min'], "r", label="melhor", linewidth=2)
plt.plot(info['generation'], info['mean'], "b", label="media", linewidth=2)
plt.plot(info['generation'], info['std'], "k.", label="desvio")
legend = plt.legend(loc='lower right', numpoints=1)
plt.xlabel("geracoes")
plt.ylabel("fitness")
plt.yscale('log')
plt.show()
sys.stdout.write("Finished. Results are at: ./results/classical_operators/A-n32-k5.vrp.csv\n")
assert True
def test_2(self):
"""
Chromosome: sequence of clients separed by an 'X' when a new vehicle is assigned
Selection: roulette-wheel
Crossover operator: one-point
Mutation operator: permutation
Elitism is enabled
Termination criteria: number of generations = 100
Parameters:
population_size: 4096
reproduction rate: 0.375
crossover rate: 0
mutation rate: 0.625
"""
fname = './input/B-n31-k5.vrp'
nodes, capacity, distances, demand = self.load_test(fname)
individual_factory = cvrp.CVRPIndividualFactory(nodes, capacity, distances, demand)
termination_criteria = ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100)
solver = ga.GeneticAlgorithm(individual_factory, population_size=4096, reproduction=0.375, crossover=0.0, mutation=0.625, elitism=True, termination_criteria=termination_criteria)
if self.grid_search:
params = {
"population_size": numpy.logspace(3, 12, base=2, num=6, dtype=int),
"operators_rate": map(lambda x: (x[0], 0.0, x[1]), filter(lambda x: sum(x) == 1.0, itertools.product(numpy.arange(.125, 0.875, .125), repeat=2))),
"elitism": [True],
"termination_criteria": [ ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100) ]
}
grid = grid_search.GridSearch(solver, params)
grid.search(0.0)
grid_scores = grid.get_grid_scores()
fname = './results/classical_operators/B-n31-k5.vrp.grid.csv'
grid_scores.to_csv(fname, sep=',', index=False)
sys.stdout.write("Finished. Results are at: ./results/classical_operators/B-n31-k5.vrp.grid.csv\n")
else:
sys.stdout.write("Starting test_2: CLASSICAL OPERATORS, ELITISM ENABLED\n")
sys.stdout.write("Input: ./tests/vrp/B-n31-k5.vrp\n")
solver.init_population()
solver.evolve()
info = solver.get_generation_info()
fname = './results/classical_operators/B-n31-k5.vrp.csv'
info.to_csv(fname, sep=',', index=False)
plt.plot(info['generation'], info['min'], "r", label="melhor", linewidth=2)
plt.plot(info['generation'], info['mean'], "b", label="media", linewidth=2)
plt.plot(info['generation'], info['std'], "k.", label="desvio")
legend = plt.legend(loc='lower right', numpoints=1)
plt.xlabel("geracoes")
plt.ylabel("fitness")
plt.yscale('log')
plt.show()
sys.stdout.write("Finished. Results are at: ./results/classical_operators/B-n31-k5.vrp.csv\n")
assert True
def test_3(self):
"""
Chromosome: sequence of clients separed by an 'X' when a new vehicle is assigned
Selection: roulette-wheel
Crossover operator: one-point
Mutation operator: permutation
Elitism is enabled
Termination criteria: number of generations = 100
Parameters:
population_size: 4096
reproduction rate: 0.375
crossover rate: 0
mutation rate: 0.625
"""
fname = './input/P-n16-k8.vrp'
nodes, capacity, distances, demand = self.load_test(fname)
individual_factory = cvrp.CVRPIndividualFactory(nodes, capacity, distances, demand)
termination_criteria = ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100)
solver = ga.GeneticAlgorithm(individual_factory, population_size=4096, reproduction=0.375, crossover=0.0, mutation=0.625, elitism=True, termination_criteria=termination_criteria)
if self.grid_search:
params = {
"population_size": numpy.logspace(3, 12, base=2, num=6, dtype=int),
"operators_rate": map(lambda x: (x[0], 0.0, x[1]), filter(lambda x: sum(x) == 1.0, itertools.product(numpy.arange(.125, 0.875, .125), repeat=2))),
"elitism": [True],
"termination_criteria": [ ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100) ]
}
grid = grid_search.GridSearch(solver, params)
grid.search(0.0)
grid_scores = grid.get_grid_scores()
fname = './results/classical_operators/P-n16-k8.vrp.grid.csv'
grid_scores.to_csv(fname, sep=',', index=False)
sys.stdout.write("Finished. Results are at: ./results/classical_operators/P-n16-k8.vrp.grid.csv\n")
else:
sys.stdout.write("Starting test_3: CLASSICAL OPERATORS, ELITISM ENABLED\n")
sys.stdout.write("Input: ./tests/vrp/P-n16-k8.vrp\n")
solver.init_population()
solver.evolve()
info = solver.get_generation_info()
fname = './results/classical_operators/P-n16-k8.vrp.csv'
info.to_csv(fname, sep=',', index=False)
plt.plot(info['generation'], info['min'], "r", label="melhor", linewidth=2)
plt.plot(info['generation'], info['mean'], "b", label="media", linewidth=2)
plt.plot(info['generation'], info['std'], "k.", label="desvio")
legend = plt.legend(loc='lower right', numpoints=1)
plt.xlabel("geracoes")
plt.ylabel("fitness")
plt.yscale('log')
plt.show()
sys.stdout.write("Finished. Results are at: ./results/classical_operators/P-n16-k8.vrp.csv\n")
assert True
def test_4(self):
"""
Chromosome: sequence of clients separed by an 'X' when a new vehicle is assigned
Selection: roulette-wheel
Crossover operator: one-point
Mutation operator: permutation
Elitism is enabled
Termination criteria: number of generations = 100
Parameters:
population_size: 4096
reproduction rate: 0.25
crossover rate: 0
mutation rate: 0.75
"""
fname = './input/A-n80-k10.vrp'
nodes, capacity, distances, demand = self.load_test(fname)
individual_factory = cvrp.CVRPIndividualFactory(nodes, capacity, distances, demand)
termination_criteria = ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100)
solver = ga.GeneticAlgorithm(individual_factory, population_size=4096, reproduction=0.25, crossover=0.0, mutation=0.75, elitism=True, termination_criteria=termination_criteria)
if self.grid_search:
params = {
"population_size": numpy.logspace(3, 12, base=2, num=6, dtype=int),
"operators_rate": map(lambda x: (x[0], 0.0, x[1]), filter(lambda x: sum(x) == 1.0, itertools.product(numpy.arange(.125, 0.875, .125), repeat=2))),
"elitism": [True],
"termination_criteria": [ ga.NumberOfGenerationsTerminationCriteria(number_of_generations=100) ]
}
grid = grid_search.GridSearch(solver, params)
grid.search(0.0)
grid_scores = grid.get_grid_scores()
fname = './results/classical_operators/A-n80-k10.vrp.grid.csv'
grid_scores.to_csv(fname, sep=',', index=False)
sys.stdout.write("Finished. Results are at: ./results/classical_operators/A-n80-k10.vrp.grid.csv\n")
else:
sys.stdout.write("Starting test_4: CLASSICAL OPERATORS, ELITISM ENABLED\n")
sys.stdout.write("Input: ./tests/vrp/A-n80-k10.vrp\n")
solver.init_population()
solver.evolve()
info = solver.get_generation_info()
fname = './results/classical_operators/A-n80-k10.vrp.csv'
info.to_csv(fname, sep=',', index=False)
plt.plot(info['generation'], info['min'], "r", label="melhor", linewidth=2)
plt.plot(info['generation'], info['mean'], "b", label="media", linewidth=2)
plt.plot(info['generation'], info['std'], "k.", label="desvio")
legend = plt.legend(loc='lower right', numpoints=1)
plt.xlabel("geracoes")
plt.ylabel("fitness")
plt.yscale('log')
plt.show()
sys.stdout.write("Finished. Results are at: ./results/classical_operators/A-n80-k10.vrp.csv\n")
assert True
def load_test(self, fname):
content = open(fname).read().replace('\r', ' ').replace('\n', ' ')
nodes = int(re.search('DIMENSION : ([0-9]+)', content).group(1))
capacity = int(re.search('CAPACITY : ([0-9]+)', content).group(1))
coords = re.search('NODE_COORD_SECTION\s*(.*)\s*DEMAND_SECTION', content).group(1)
coords = re.findall('([0-9]+) ([0-9]+) ([0-9]+)', coords)
distances = []
for node, x1, y1 in coords:
row = []
for node, x2, y2 in coords:
xd = int(x2) - int(x1)
yd = int(y2) - int(y1)
row.append(math.sqrt(xd*xd + yd*yd))
distances.append(row)
distances = numpy.matrix(distances)
demand_section = re.search('DEMAND_SECTION\s*(.*)\s*DEPOT_SECTION', content).group(1)
demand_section = re.findall('([0-9]+) ([0-9]+)', demand_section)
node_demand = dict()
for node, demand in demand_section:
node_demand[int(node)] = int(demand)
return nodes, capacity, distances, node_demand
if __name__ == '__main__':
unittest.main() | [
"matplotlib.pyplot.ylabel",
"grid_search.GridSearch",
"math.sqrt",
"cvrp.CVRPIndividualFactory",
"unittest.main",
"numpy.arange",
"re.search",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yscale",
"numpy.logspace",
"ga.GeneticAlgorithm",
"re.findall",
"matplotli... | [((59, 83), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (74, 83), False, 'import sys, os\n'), ((12647, 12662), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12660, 12662), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((910, 985), 'cvrp.CVRPIndividualFactory', 'cvrp.CVRPIndividualFactory', (['nodes', 'capacity', 'distances', 'demand', '"""classical"""'], {}), "(nodes, capacity, distances, demand, 'classical')\n", (936, 985), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((1017, 1085), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (1058, 1085), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((1103, 1281), 'ga.GeneticAlgorithm', 'ga.GeneticAlgorithm', (['individual_factory'], {'population_size': '(4096)', 'reproduction': '(0.375)', 'crossover': '(0.0)', 'mutation': '(0.625)', 'elitism': '(True)', 'termination_criteria': 'termination_criteria'}), '(individual_factory, population_size=4096, reproduction=\n 0.375, crossover=0.0, mutation=0.625, elitism=True,\n termination_criteria=termination_criteria)\n', (1122, 1281), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((3738, 3800), 'cvrp.CVRPIndividualFactory', 'cvrp.CVRPIndividualFactory', (['nodes', 'capacity', 'distances', 'demand'], {}), '(nodes, capacity, distances, demand)\n', (3764, 3800), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((3832, 3900), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (3873, 3900), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((3918, 4096), 'ga.GeneticAlgorithm', 'ga.GeneticAlgorithm', (['individual_factory'], {'population_size': '(4096)', 'reproduction': '(0.375)', 'crossover': '(0.0)', 'mutation': '(0.625)', 'elitism': '(True)', 'termination_criteria': 'termination_criteria'}), '(individual_factory, population_size=4096, reproduction=\n 0.375, crossover=0.0, mutation=0.625, elitism=True,\n termination_criteria=termination_criteria)\n', (3937, 4096), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((6539, 6601), 'cvrp.CVRPIndividualFactory', 'cvrp.CVRPIndividualFactory', (['nodes', 'capacity', 'distances', 'demand'], {}), '(nodes, capacity, distances, demand)\n', (6565, 6601), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((6633, 6701), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (6674, 6701), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((6719, 6897), 'ga.GeneticAlgorithm', 'ga.GeneticAlgorithm', (['individual_factory'], {'population_size': '(4096)', 'reproduction': '(0.375)', 'crossover': '(0.0)', 'mutation': '(0.625)', 'elitism': '(True)', 'termination_criteria': 'termination_criteria'}), '(individual_factory, population_size=4096, reproduction=\n 0.375, crossover=0.0, mutation=0.625, elitism=True,\n termination_criteria=termination_criteria)\n', (6738, 6897), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((9339, 9401), 'cvrp.CVRPIndividualFactory', 'cvrp.CVRPIndividualFactory', (['nodes', 'capacity', 'distances', 'demand'], {}), '(nodes, capacity, distances, demand)\n', (9365, 9401), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((9433, 9501), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (9474, 9501), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((9519, 9696), 'ga.GeneticAlgorithm', 'ga.GeneticAlgorithm', (['individual_factory'], {'population_size': '(4096)', 'reproduction': '(0.25)', 'crossover': '(0.0)', 'mutation': '(0.75)', 'elitism': '(True)', 'termination_criteria': 'termination_criteria'}), '(individual_factory, population_size=4096, reproduction=\n 0.25, crossover=0.0, mutation=0.75, elitism=True, termination_criteria=\n termination_criteria)\n', (9538, 9696), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11886, 11934), 're.findall', 're.findall', (['"""([0-9]+) ([0-9]+) ([0-9]+)"""', 'coords'], {}), "('([0-9]+) ([0-9]+) ([0-9]+)', coords)\n", (11896, 11934), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((12240, 12263), 'numpy.matrix', 'numpy.matrix', (['distances'], {}), '(distances)\n', (12252, 12263), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((12384, 12431), 're.findall', 're.findall', (['"""([0-9]+) ([0-9]+)"""', 'demand_section'], {}), "('([0-9]+) ([0-9]+)', demand_section)\n", (12394, 12431), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((1754, 1792), 'grid_search.GridSearch', 'grid_search.GridSearch', (['solver', 'params'], {}), '(solver, params)\n', (1776, 1792), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2030, 2139), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/A-n32-k5.vrp.grid.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/A-n32-k5.vrp.grid.csv\\n'\n )\n", (2046, 2139), False, 'import sys, os\n'), ((2156, 2231), 'sys.stdout.write', 'sys.stdout.write', (['"""Starting test_1: CLASSICAL OPERATORS, ELITISM ENABLED\n"""'], {}), "('Starting test_1: CLASSICAL OPERATORS, ELITISM ENABLED\\n')\n", (2172, 2231), False, 'import sys, os\n'), ((2244, 2298), 'sys.stdout.write', 'sys.stdout.write', (['"""Input: ./tests/cvrp/A-n32-k5.vrp\n"""'], {}), "('Input: ./tests/cvrp/A-n32-k5.vrp\\n')\n", (2260, 2298), False, 'import sys, os\n'), ((2561, 2636), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['min']", '"""r"""'], {'label': '"""melhor"""', 'linewidth': '(2)'}), "(info['generation'], info['min'], 'r', label='melhor', linewidth=2)\n", (2569, 2636), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2649, 2724), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['mean']", '"""b"""'], {'label': '"""media"""', 'linewidth': '(2)'}), "(info['generation'], info['mean'], 'b', label='media', linewidth=2)\n", (2657, 2724), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2737, 2800), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['std']", '"""k."""'], {'label': '"""desvio"""'}), "(info['generation'], info['std'], 'k.', label='desvio')\n", (2745, 2800), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2823, 2865), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'numpoints': '(1)'}), "(loc='lower right', numpoints=1)\n", (2833, 2865), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2878, 2900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""geracoes"""'], {}), "('geracoes')\n", (2888, 2900), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2913, 2934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fitness"""'], {}), "('fitness')\n", (2923, 2934), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2947, 2964), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2957, 2964), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((2977, 2987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2985, 2987), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((3001, 3105), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/A-n32-k5.vrp.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/A-n32-k5.vrp.csv\\n'\n )\n", (3017, 3105), False, 'import sys, os\n'), ((4569, 4607), 'grid_search.GridSearch', 'grid_search.GridSearch', (['solver', 'params'], {}), '(solver, params)\n', (4591, 4607), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((4845, 4954), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/B-n31-k5.vrp.grid.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/B-n31-k5.vrp.grid.csv\\n'\n )\n", (4861, 4954), False, 'import sys, os\n'), ((4971, 5046), 'sys.stdout.write', 'sys.stdout.write', (['"""Starting test_2: CLASSICAL OPERATORS, ELITISM ENABLED\n"""'], {}), "('Starting test_2: CLASSICAL OPERATORS, ELITISM ENABLED\\n')\n", (4987, 5046), False, 'import sys, os\n'), ((5059, 5112), 'sys.stdout.write', 'sys.stdout.write', (['"""Input: ./tests/vrp/B-n31-k5.vrp\n"""'], {}), "('Input: ./tests/vrp/B-n31-k5.vrp\\n')\n", (5075, 5112), False, 'import sys, os\n'), ((5362, 5437), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['min']", '"""r"""'], {'label': '"""melhor"""', 'linewidth': '(2)'}), "(info['generation'], info['min'], 'r', label='melhor', linewidth=2)\n", (5370, 5437), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5450, 5525), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['mean']", '"""b"""'], {'label': '"""media"""', 'linewidth': '(2)'}), "(info['generation'], info['mean'], 'b', label='media', linewidth=2)\n", (5458, 5525), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5538, 5601), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['std']", '"""k."""'], {'label': '"""desvio"""'}), "(info['generation'], info['std'], 'k.', label='desvio')\n", (5546, 5601), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5624, 5666), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'numpoints': '(1)'}), "(loc='lower right', numpoints=1)\n", (5634, 5666), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5679, 5701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""geracoes"""'], {}), "('geracoes')\n", (5689, 5701), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5714, 5735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fitness"""'], {}), "('fitness')\n", (5724, 5735), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5748, 5765), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5758, 5765), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5778, 5788), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5786, 5788), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((5802, 5906), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/B-n31-k5.vrp.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/B-n31-k5.vrp.csv\\n'\n )\n", (5818, 5906), False, 'import sys, os\n'), ((7370, 7408), 'grid_search.GridSearch', 'grid_search.GridSearch', (['solver', 'params'], {}), '(solver, params)\n', (7392, 7408), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((7646, 7755), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/P-n16-k8.vrp.grid.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/P-n16-k8.vrp.grid.csv\\n'\n )\n", (7662, 7755), False, 'import sys, os\n'), ((7772, 7847), 'sys.stdout.write', 'sys.stdout.write', (['"""Starting test_3: CLASSICAL OPERATORS, ELITISM ENABLED\n"""'], {}), "('Starting test_3: CLASSICAL OPERATORS, ELITISM ENABLED\\n')\n", (7788, 7847), False, 'import sys, os\n'), ((7860, 7913), 'sys.stdout.write', 'sys.stdout.write', (['"""Input: ./tests/vrp/P-n16-k8.vrp\n"""'], {}), "('Input: ./tests/vrp/P-n16-k8.vrp\\n')\n", (7876, 7913), False, 'import sys, os\n'), ((8163, 8238), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['min']", '"""r"""'], {'label': '"""melhor"""', 'linewidth': '(2)'}), "(info['generation'], info['min'], 'r', label='melhor', linewidth=2)\n", (8171, 8238), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8251, 8326), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['mean']", '"""b"""'], {'label': '"""media"""', 'linewidth': '(2)'}), "(info['generation'], info['mean'], 'b', label='media', linewidth=2)\n", (8259, 8326), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8339, 8402), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['std']", '"""k."""'], {'label': '"""desvio"""'}), "(info['generation'], info['std'], 'k.', label='desvio')\n", (8347, 8402), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8425, 8467), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'numpoints': '(1)'}), "(loc='lower right', numpoints=1)\n", (8435, 8467), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8480, 8502), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""geracoes"""'], {}), "('geracoes')\n", (8490, 8502), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8515, 8536), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fitness"""'], {}), "('fitness')\n", (8525, 8536), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8549, 8566), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8559, 8566), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8579, 8589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8587, 8589), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((8603, 8707), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/P-n16-k8.vrp.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/P-n16-k8.vrp.csv\\n'\n )\n", (8619, 8707), False, 'import sys, os\n'), ((10168, 10206), 'grid_search.GridSearch', 'grid_search.GridSearch', (['solver', 'params'], {}), '(solver, params)\n', (10190, 10206), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((10445, 10555), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/A-n80-k10.vrp.grid.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/A-n80-k10.vrp.grid.csv\\n'\n )\n", (10461, 10555), False, 'import sys, os\n'), ((10572, 10647), 'sys.stdout.write', 'sys.stdout.write', (['"""Starting test_4: CLASSICAL OPERATORS, ELITISM ENABLED\n"""'], {}), "('Starting test_4: CLASSICAL OPERATORS, ELITISM ENABLED\\n')\n", (10588, 10647), False, 'import sys, os\n'), ((10660, 10714), 'sys.stdout.write', 'sys.stdout.write', (['"""Input: ./tests/vrp/A-n80-k10.vrp\n"""'], {}), "('Input: ./tests/vrp/A-n80-k10.vrp\\n')\n", (10676, 10714), False, 'import sys, os\n'), ((10965, 11040), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['min']", '"""r"""'], {'label': '"""melhor"""', 'linewidth': '(2)'}), "(info['generation'], info['min'], 'r', label='melhor', linewidth=2)\n", (10973, 11040), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11053, 11128), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['mean']", '"""b"""'], {'label': '"""media"""', 'linewidth': '(2)'}), "(info['generation'], info['mean'], 'b', label='media', linewidth=2)\n", (11061, 11128), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11141, 11204), 'matplotlib.pyplot.plot', 'plt.plot', (["info['generation']", "info['std']", '"""k."""'], {'label': '"""desvio"""'}), "(info['generation'], info['std'], 'k.', label='desvio')\n", (11149, 11204), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11227, 11269), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'numpoints': '(1)'}), "(loc='lower right', numpoints=1)\n", (11237, 11269), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11282, 11304), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""geracoes"""'], {}), "('geracoes')\n", (11292, 11304), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11317, 11338), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fitness"""'], {}), "('fitness')\n", (11327, 11338), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11351, 11368), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (11361, 11368), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11381, 11391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11389, 11391), True, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11405, 11510), 'sys.stdout.write', 'sys.stdout.write', (['"""Finished. Results are at: ./results/classical_operators/A-n80-k10.vrp.csv\n"""'], {}), "(\n 'Finished. Results are at: ./results/classical_operators/A-n80-k10.vrp.csv\\n'\n )\n", (11421, 11510), False, 'import sys, os\n'), ((1361, 1408), 'numpy.logspace', 'numpy.logspace', (['(3)', '(12)'], {'base': '(2)', 'num': '(6)', 'dtype': 'int'}), '(3, 12, base=2, num=6, dtype=int)\n', (1375, 1408), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((4176, 4223), 'numpy.logspace', 'numpy.logspace', (['(3)', '(12)'], {'base': '(2)', 'num': '(6)', 'dtype': 'int'}), '(3, 12, base=2, num=6, dtype=int)\n', (4190, 4223), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((6977, 7024), 'numpy.logspace', 'numpy.logspace', (['(3)', '(12)'], {'base': '(2)', 'num': '(6)', 'dtype': 'int'}), '(3, 12, base=2, num=6, dtype=int)\n', (6991, 7024), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((9775, 9822), 'numpy.logspace', 'numpy.logspace', (['(3)', '(12)'], {'base': '(2)', 'num': '(6)', 'dtype': 'int'}), '(3, 12, base=2, num=6, dtype=int)\n', (9789, 9822), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11795, 11861), 're.search', 're.search', (['"""NODE_COORD_SECTION\\\\s*(.*)\\\\s*DEMAND_SECTION"""', 'content'], {}), "('NODE_COORD_SECTION\\\\s*(.*)\\\\s*DEMAND_SECTION', content)\n", (11804, 11861), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((12290, 12351), 're.search', 're.search', (['"""DEMAND_SECTION\\\\s*(.*)\\\\s*DEPOT_SECTION"""', 'content'], {}), "('DEMAND_SECTION\\\\s*(.*)\\\\s*DEPOT_SECTION', content)\n", (12299, 12351), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((1650, 1718), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (1691, 1718), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((4465, 4533), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (4506, 4533), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((7266, 7334), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (7307, 7334), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((10064, 10132), 'ga.NumberOfGenerationsTerminationCriteria', 'ga.NumberOfGenerationsTerminationCriteria', ([], {'number_of_generations': '(100)'}), '(number_of_generations=100)\n', (10105, 10132), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11650, 11692), 're.search', 're.search', (['"""DIMENSION : ([0-9]+)"""', 'content'], {}), "('DIMENSION : ([0-9]+)', content)\n", (11659, 11692), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((11726, 11767), 're.search', 're.search', (['"""CAPACITY : ([0-9]+)"""', 'content'], {}), "('CAPACITY : ([0-9]+)', content)\n", (11735, 11767), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((12160, 12188), 'math.sqrt', 'math.sqrt', (['(xd * xd + yd * yd)'], {}), '(xd * xd + yd * yd)\n', (12169, 12188), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((1527, 1560), 'numpy.arange', 'numpy.arange', (['(0.125)', '(0.875)', '(0.125)'], {}), '(0.125, 0.875, 0.125)\n', (1539, 1560), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((4342, 4375), 'numpy.arange', 'numpy.arange', (['(0.125)', '(0.875)', '(0.125)'], {}), '(0.125, 0.875, 0.125)\n', (4354, 4375), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((7143, 7176), 'numpy.arange', 'numpy.arange', (['(0.125)', '(0.875)', '(0.125)'], {}), '(0.125, 0.875, 0.125)\n', (7155, 7176), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n'), ((9941, 9974), 'numpy.arange', 'numpy.arange', (['(0.125)', '(0.875)', '(0.125)'], {}), '(0.125, 0.875, 0.125)\n', (9953, 9974), False, 'import unittest, ga, cvrp, grid_search, re, math, numpy, itertools, matplotlib.pyplot as plt\n')] |
import os
import numpy as np
import networkx as nx
from tqdm import tqdm
from utils import load_networks
# Get File Names
data_path = os.path.join(os.path.dirname(__file__), '..', '..', 'Data')
networks_dir = load_networks(os.path.join(data_path, 'Generated', 'Barabasi'))
for net_dir in networks_dir:
print('Calculating Matrix degree for', os.path.basename(net_dir))
G = nx.read_gpickle(net_dir)
N = G.number_of_nodes()
# Degrees through time
degrees_t = np.zeros((N, N))
for num, t in tqdm(enumerate(degrees_t)):
index = range(num+1)
H = G.subgraph(index)
degrees_sub = np.fromiter(dict(nx.degree(H)).values(), dtype=int)
degrees_t[num] = np.pad(degrees_sub, (0, N - num - 1), 'constant', constant_values=0)
with open(net_dir.replace('.gz', '.npy'), 'wb') as f:
np.save(f, degrees_t) | [
"networkx.degree",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"os.path.basename",
"networkx.read_gpickle",
"numpy.pad",
"numpy.save"
] | [((151, 176), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n'), ((227, 275), 'os.path.join', 'os.path.join', (['data_path', '"""Generated"""', '"""Barabasi"""'], {}), "(data_path, 'Generated', 'Barabasi')\n", (239, 275), False, 'import os\n'), ((385, 409), 'networkx.read_gpickle', 'nx.read_gpickle', (['net_dir'], {}), '(net_dir)\n', (400, 409), True, 'import networkx as nx\n'), ((482, 498), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (490, 498), True, 'import numpy as np\n'), ((350, 375), 'os.path.basename', 'os.path.basename', (['net_dir'], {}), '(net_dir)\n', (366, 375), False, 'import os\n'), ((703, 771), 'numpy.pad', 'np.pad', (['degrees_sub', '(0, N - num - 1)', '"""constant"""'], {'constant_values': '(0)'}), "(degrees_sub, (0, N - num - 1), 'constant', constant_values=0)\n", (709, 771), True, 'import numpy as np\n'), ((839, 860), 'numpy.save', 'np.save', (['f', 'degrees_t'], {}), '(f, degrees_t)\n', (846, 860), True, 'import numpy as np\n'), ((643, 655), 'networkx.degree', 'nx.degree', (['H'], {}), '(H)\n', (652, 655), True, 'import networkx as nx\n')] |
"""
A module to handle metrics
"""
import copy
import json
import numbers
from functools import partial
from typing import Any, Dict
import numpy as np
from utils import pairwise
def format_time(seconds):
""" Format time in h:mm:ss.ss format """
hour = 60 * 60
hours = int(seconds // hour)
minutes = int((seconds % hour) // 60)
return f"{hours}:{minutes:>02}:{seconds % 60.:>05.2f}"
def format_basic(value, format_spec=""):
""" Wrapper around format() for use in functools.partial """
return format(value, format_spec)
def format_dynamic(value, format_funcs=(format_basic,)):
"""
Wrapper around a number of format functions that chooses the shortest
output string.
"""
return sorted((f(value) for f in format_funcs), key=len)[0]
# pylint:disable=invalid-name
format_int = partial(format_basic, format_spec=".0f")
format_percent = partial(format_basic, format_spec=".1%")
format_float = partial(format_basic, format_spec=".1f")
format_scientific = partial(format_basic, format_spec=".3g")
format_dynamic_float = partial(
format_dynamic, format_funcs=(format_float, format_scientific)
)
# pylint:enable=invalid-name
FORMATTERS = {
"format_int": format_int,
"format_time": format_time,
"format_basic": format_basic,
"format_dynamic": format_dynamic,
"format_percent": format_percent,
"format_float": format_float,
"format_scientific": format_scientific,
"format_dynamic_float": format_dynamic_float,
}
class Metric:
""" Class that represents a metric """
def __init__(
self,
name,
formatter="format_basic",
default_format_str="g(a)",
max_history=None,
):
self.name = name
self.max_history = max_history
self._formatter = formatter
self.default_format_str = default_format_str
self.counts, self.values, self.min, self.max = self.reset()
@classmethod
def from_dict(cls, state: Dict[str, Any]):
"""
Create a metric from the passed in dictionary
"""
metric = Metric("")
metric.__dict__.update(state)
return metric
@property
def formatter(self):
"""
Get the formatter function for this metric
"""
return FORMATTERS[self._formatter]
def reset(self):
""" Reset the metrics """
self.counts = []
self.values = []
self.min = float("inf")
self.max = float("-inf")
return self.counts, self.values, self.min, self.max
def update(self, value, count=1):
""" Update the value and counts """
self.counts.append(count)
self.values.append(value)
average = value / count
self.min = min(self.min, average)
self.max = max(self.max, average)
if self.max_history and len(self.counts) > self.max_history:
self.counts = self.counts[1:]
self.values = self.values[1:]
def updates(self, values, counts=1):
""" Update multiple values at once """
if isinstance(counts, numbers.Number):
counts = [counts] * len(values)
self.counts.extend(counts)
self.values.extend(values)
if self.max_history:
# pylint thinks self.max_history is None...
# pylint:disable=invalid-unary-operand-type
self.counts = self.counts[-self.max_history :]
self.values = self.values[-self.max_history :]
# pylint:enable=invalid-unary-operand-type
averages = [value / count for count, value in zip(counts, values)]
self.min = min(self.min, min(averages))
self.max = max(self.max, max(averages))
@property
def last_count(self):
""" Return the last recorded count of the metric"""
# fancy way to return the last count or zero
return len(self.counts) and self.counts[-1]
@property
def last_value(self):
""" Return the last recorded value of the metric """
# fancy way to return the last value or zero
return len(self.values) and self.values[-1]
@property
def last_average(self):
""" Return the last recorded value of the metric """
# fancy way to return the last value or zero
return self.last_value / max(self.last_count, 1)
@property
def total(self):
""" Return the current total """
return sum(self.values)
@property
def total_count(self):
""" Return the current total count """
return sum(self.counts)
@property
def average(self):
""" Return the current average value """
return self.total / max(self.total_count, 1)
@property
def var(self):
""" Return the variance of the values """
# Need to use a weighted average since each value has an associated count
counts = np.array(self.counts)
values = np.array(self.values)
weights = counts / self.total_count
return np.average((values - self.average) ** 2, weights=weights)
@property
def std(self):
""" Return the standard deviation of the values """
return np.sqrt(self.var)
def __format__(self, format_str):
""" Return a formatted version of the metric """
format_str = format_str or self.default_format_str
formatted = f"{self.name}="
compact = True
paren_depth = 0
for format_spec, next_format_spec in pairwise(format_str, True):
if format_spec == "l":
compact = False
elif format_spec == "c":
compact = True
elif format_spec == "(":
formatted += "("
paren_depth += 1
elif format_spec == ")":
formatted += ")"
paren_depth -= 1
elif format_spec == "C":
if not compact:
formatted += f"last_count="
formatted += f"{self.formatter(self.last_count)}"
elif format_spec == "V":
if not compact:
formatted += f"last_value="
formatted += f"{self.formatter(self.last_value)}"
elif format_spec == "g":
if not compact:
formatted += f"last_avg="
formatted += f"{self.formatter(self.last_average)}"
elif format_spec == "a":
if not compact:
formatted += f"avg="
formatted += f"{self.formatter(self.average)}"
elif format_spec == "t":
if not compact:
formatted += f"total="
formatted += f"{self.formatter(self.total)}"
elif format_spec == "m":
if not compact:
formatted += f"min="
formatted += f"{self.formatter(self.min)}"
elif format_spec == "x":
if not compact:
formatted += f"max="
formatted += f"{self.formatter(self.max)}"
elif format_spec == "s":
if not compact:
formatted += f"std="
formatted += f"{self.formatter(self.std)}"
elif format_spec == "v":
if not compact:
formatted += f"var="
formatted += f"{self.formatter(self.var)}"
else:
raise ValueError(f"Unknown format specifier {format_spec}")
if paren_depth and format_spec != "(" and next_format_spec != ")":
formatted += ","
if not compact:
formatted += " "
return formatted
def __str__(self):
""" Return a string representation of the metric """
return self.__format__(self.default_format_str)
class MetricStore(object):
""" A collection of metrics """
def __init__(self, default_format_str="c"):
super(MetricStore, self).__init__()
self.metrics = {}
self.default_format_str = default_format_str
def keys(self):
""" Return the metrics keys """
return self.metrics.keys()
def values(self):
""" Return the metrics values """
return self.metrics.values()
def items(self):
""" Return the metrics items """
return self.metrics.items()
def __getitem__(self, key):
""" Return the requested metric """
return self.metrics[key]
def __contains__(self, key):
""" See if we are tracking the named metric """
return key in self.metrics
def __len__(self):
""" Count of the metrics being tracked """
return len(self.metrics)
def add(self, metric: Metric):
""" Adds a copy of the Metric to the store if it does not already exist """
if metric.name not in self.metrics:
self.metrics[metric.name] = copy.deepcopy(metric)
def save(self, path):
""" Save the metrics to disk """
with open(path, "wt") as metric_file:
json.dump(
self.metrics,
metric_file,
indent=2,
default=lambda obj: getattr(obj, "__dict__", {}),
)
def load(self, path):
""" Load the metrics from disk """
with open(path, "rt") as metric_file:
for name, metric_state in json.load(metric_file).items():
self.metrics[name] = Metric.from_dict(metric_state)
def __str__(self):
""" Return a string representation of the metric store """
return self.__format__(self.default_format_str)
def __format__(self, format_str):
""" Return a formatted version of the metric """
format_str = format_str or self.default_format_str
if format_str == "l":
return "\n".join(str(m) for m in self.metrics.values())
else:
return ", ".join(str(m) for m in self.metrics.values())
| [
"numpy.sqrt",
"numpy.average",
"numpy.array",
"functools.partial",
"utils.pairwise",
"copy.deepcopy",
"json.load"
] | [((830, 870), 'functools.partial', 'partial', (['format_basic'], {'format_spec': '""".0f"""'}), "(format_basic, format_spec='.0f')\n", (837, 870), False, 'from functools import partial\n'), ((888, 928), 'functools.partial', 'partial', (['format_basic'], {'format_spec': '""".1%"""'}), "(format_basic, format_spec='.1%')\n", (895, 928), False, 'from functools import partial\n'), ((944, 984), 'functools.partial', 'partial', (['format_basic'], {'format_spec': '""".1f"""'}), "(format_basic, format_spec='.1f')\n", (951, 984), False, 'from functools import partial\n'), ((1005, 1045), 'functools.partial', 'partial', (['format_basic'], {'format_spec': '""".3g"""'}), "(format_basic, format_spec='.3g')\n", (1012, 1045), False, 'from functools import partial\n'), ((1069, 1140), 'functools.partial', 'partial', (['format_dynamic'], {'format_funcs': '(format_float, format_scientific)'}), '(format_dynamic, format_funcs=(format_float, format_scientific))\n', (1076, 1140), False, 'from functools import partial\n'), ((4886, 4907), 'numpy.array', 'np.array', (['self.counts'], {}), '(self.counts)\n', (4894, 4907), True, 'import numpy as np\n'), ((4925, 4946), 'numpy.array', 'np.array', (['self.values'], {}), '(self.values)\n', (4933, 4946), True, 'import numpy as np\n'), ((5006, 5063), 'numpy.average', 'np.average', (['((values - self.average) ** 2)'], {'weights': 'weights'}), '((values - self.average) ** 2, weights=weights)\n', (5016, 5063), True, 'import numpy as np\n'), ((5173, 5190), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (5180, 5190), True, 'import numpy as np\n'), ((5475, 5501), 'utils.pairwise', 'pairwise', (['format_str', '(True)'], {}), '(format_str, True)\n', (5483, 5501), False, 'from utils import pairwise\n'), ((8940, 8961), 'copy.deepcopy', 'copy.deepcopy', (['metric'], {}), '(metric)\n', (8953, 8961), False, 'import copy\n'), ((9418, 9440), 'json.load', 'json.load', (['metric_file'], {}), '(metric_file)\n', (9427, 9440), False, 'import json\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper for motion imitation environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
class ImitationWrapperEnv(object):
"""An env using for training policy with motion imitation."""
def __init__(self,
gym_env,
episode_length_start=1000,
episode_length_end=1000,
curriculum_steps=0,
num_parallel_envs=1):
"""Initialzes the wrapped env.
Args:
gym_env: An instance of LocomotionGymEnv.
"""
self._gym_env = gym_env
self.observation_space = self._build_observation_space()
self._episode_length_start = episode_length_start
self._episode_length_end = episode_length_end
self._curriculum_steps = int(np.ceil(curriculum_steps / num_parallel_envs))
self._total_step_count = 0
if self._enable_curriculum():
self._update_time_limit()
else:
self._max_episode_steps = episode_length_end
self.seed()
return
def __getattr__(self, attr):
return getattr(self._gym_env, attr)
def step(self, action):
"""Steps the wrapped environment.
Args:
action: Numpy array. The input action from an NN agent.
Returns:
The tuple containing the modified observation, the reward, the epsiode end
indicator.
Raises:
ValueError if input action is None.
"""
original_observation, reward, done, _ = self._gym_env.step(action)
observation = self._modify_observation(original_observation)
terminated = done
done |= (self.env_step_counter >= self._max_episode_steps)
if not done:
self._total_step_count += 1
info = {
"terminated":
terminated,
"max_torque":
np.max(np.abs(self._gym_env._robot._observed_motor_torques)),
"metrics":
self._get_metrics()
}
return observation, reward, done, info
def reset(self, initial_motor_angles=None, reset_duration=0.0):
"""Resets the robot's position in the world or rebuild the sim world.
The simulation world will be rebuilt if self._hard_reset is True.
Args:
initial_motor_angles: A list of Floats. The desired joint angles after
reset. If None, the robot will use its built-in value.
reset_duration: Float. The time (in seconds) needed to rotate all motors
to the desired initial values.
Returns:
A numpy array contains the initial observation after reset.
"""
original_observation = self._gym_env.reset(initial_motor_angles, reset_duration)
observation = self._modify_observation(original_observation)
if self._enable_curriculum():
self._update_time_limit()
return observation
def _modify_observation(self, original_observation):
"""Appends target observations from the reference motion to the observations.
Args:
original_observation: A numpy array containing the original observations.
Returns:
A numpy array contains the initial original concatenated with target
observations from the reference motion.
"""
target_observation = self._task.build_target_obs()
observation = np.concatenate([original_observation, target_observation], axis=-1)
return observation
def _build_observation_space(self):
"""Constructs the observation space, including target observations from
the reference motion.
Returns:
Observation space representing the concatenations of the original
observations and target observations.
"""
obs_space0 = self._gym_env.observation_space
low0 = obs_space0.low
high0 = obs_space0.high
task_low, task_high = self._task.get_target_obs_bounds()
low = np.concatenate([low0, task_low], axis=-1)
high = np.concatenate([high0, task_high], axis=-1)
obs_space = gym.spaces.Box(low, high)
return obs_space
def _enable_curriculum(self):
"""Check if curriculum is enabled."""
return self._curriculum_steps > 0
def _update_time_limit(self):
"""Updates the current episode length depending on the number of environment steps taken so far."""
t = float(self._total_step_count) / self._curriculum_steps
t = np.clip(t, 0.0, 1.0)
t = np.power(t, 3.0)
new_steps = int((1.0 - t) * self._episode_length_start +
t * self._episode_length_end)
self._max_episode_steps = new_steps
return
def _get_metrics(self):
x, y, _ = self._gym_env.last_base_position
(forward, sideways, _), (_, _, yaw) = self._gym_env.robot.RelativeTransformSinceReset()
yaw = np.rad2deg(yaw)
# First element is value; second is aggregator function.
return {
# Aggregator finds the farthest value from the origin.
"Position/Final_Robot_X": (x, lambda vec: max(vec, key=abs)),
"Position/Final_Robot_Y": (y, lambda vec: max(vec, key=abs)),
"Position/Robot_Travel_Forward": (forward, np.mean),
"Position/Robot_Travel_Sideways": (sideways, np.mean),
"Position/Robot_Travel_Yaw_Deg": (yaw, np.mean),
}
def set_task(self, new_task):
self._gym_env.set_task(new_task)
self.observation_space = self._build_observation_space()
| [
"numpy.clip",
"numpy.abs",
"numpy.ceil",
"numpy.power",
"gym.spaces.Box",
"numpy.concatenate",
"numpy.rad2deg"
] | [((3790, 3857), 'numpy.concatenate', 'np.concatenate', (['[original_observation, target_observation]'], {'axis': '(-1)'}), '([original_observation, target_observation], axis=-1)\n', (3804, 3857), True, 'import numpy as np\n'), ((4335, 4376), 'numpy.concatenate', 'np.concatenate', (['[low0, task_low]'], {'axis': '(-1)'}), '([low0, task_low], axis=-1)\n', (4349, 4376), True, 'import numpy as np\n'), ((4388, 4431), 'numpy.concatenate', 'np.concatenate', (['[high0, task_high]'], {'axis': '(-1)'}), '([high0, task_high], axis=-1)\n', (4402, 4431), True, 'import numpy as np\n'), ((4449, 4474), 'gym.spaces.Box', 'gym.spaces.Box', (['low', 'high'], {}), '(low, high)\n', (4463, 4474), False, 'import gym\n'), ((4818, 4838), 'numpy.clip', 'np.clip', (['t', '(0.0)', '(1.0)'], {}), '(t, 0.0, 1.0)\n', (4825, 4838), True, 'import numpy as np\n'), ((4847, 4863), 'numpy.power', 'np.power', (['t', '(3.0)'], {}), '(t, 3.0)\n', (4855, 4863), True, 'import numpy as np\n'), ((5205, 5220), 'numpy.rad2deg', 'np.rad2deg', (['yaw'], {}), '(yaw)\n', (5215, 5220), True, 'import numpy as np\n'), ((1384, 1429), 'numpy.ceil', 'np.ceil', (['(curriculum_steps / num_parallel_envs)'], {}), '(curriculum_steps / num_parallel_envs)\n', (1391, 1429), True, 'import numpy as np\n'), ((2379, 2431), 'numpy.abs', 'np.abs', (['self._gym_env._robot._observed_motor_torques'], {}), '(self._gym_env._robot._observed_motor_torques)\n', (2385, 2431), True, 'import numpy as np\n')] |
import torch
import random
import numpy as np
def set_seed(seed=0):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return True | [
"torch.manual_seed",
"numpy.random.seed",
"random.seed"
] | [((73, 93), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (87, 93), True, 'import numpy as np\n'), ((98, 115), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (109, 115), False, 'import random\n'), ((120, 143), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (137, 143), False, 'import torch\n')] |
import numpy as np
from skipi.function import Function, Integral
from ..helper import assert_equal, randspace
def test_integration():
x_domain = np.linspace(0, 10, 100)
f = Function(x_domain, lambda x: 6 * x)
F = Integral.from_function(f)
F2 = Function(x_domain, lambda x: 3 * x ** 2)
assert_equal(F, F2)
def abstract_test_integration(domain):
x_domain = domain
f = Function(x_domain, lambda x: 6 * x)
F = Integral.from_function(f)
F2 = Function(x_domain, lambda x: 3 * x ** 2)
assert_equal(F, F2)
def test_linearity():
x_domain = np.linspace(0, 10, 1000)
f1 = Function(x_domain, lambda x: 1)
f2 = Function(x_domain, lambda x: 2 * x)
f = f1 + f2
F = Integral.from_function(f)
f3 = Function(x_domain, lambda x: x + x ** 2)
assert_equal(F, f3)
def test_intergation_bounds():
x_domain = np.linspace(-0.7, 1, 1001)
f = Function(x_domain, lambda x: 2 * x + 1)
F = Integral.from_function(f)
F2 = Function(x_domain, lambda x: x ** 2 + x - (x_domain[0] ** 2 + x_domain[0]))
assert_equal(F, F2)
# Note here: since we're not starting the integration from exactly 0, but
# from close to zero, we have to add this region to the integral via C ...
f3 = f.vremesh((0, None))
F3 = Integral.from_function(f.vremesh((0, None)), C=(f3.get_domain()[0] ** 2 + f3.get_domain()[0]))
assert_equal(F3, Function(F3.get_domain(), lambda x: x ** 2 + x))
F = Integral.from_function(f, x0=0)
F4 = Function(x_domain, lambda x: x ** 2 + x)
assert_equal(F, F4, TOL=1e-6)
def test_strechted_exponential():
x_domain = np.linspace(0, 10, 50000)
f = Function(x_domain, lambda x: np.exp(-np.sqrt(x)))
F = Integral.from_function(f, 0)
F_exact = Function(x_domain, lambda x: -2 * np.exp(-np.sqrt(x)) * (1 + np.sqrt(x)) + 2)
assert_equal(F, F_exact, TOL=1e-6)
| [
"numpy.linspace",
"skipi.function.Function",
"numpy.sqrt",
"skipi.function.Integral.from_function"
] | [((152, 175), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (163, 175), True, 'import numpy as np\n'), ((184, 219), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 6 * x)'], {}), '(x_domain, lambda x: 6 * x)\n', (192, 219), False, 'from skipi.function import Function, Integral\n'), ((228, 253), 'skipi.function.Integral.from_function', 'Integral.from_function', (['f'], {}), '(f)\n', (250, 253), False, 'from skipi.function import Function, Integral\n'), ((263, 303), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 3 * x ** 2)'], {}), '(x_domain, lambda x: 3 * x ** 2)\n', (271, 303), False, 'from skipi.function import Function, Integral\n'), ((400, 435), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 6 * x)'], {}), '(x_domain, lambda x: 6 * x)\n', (408, 435), False, 'from skipi.function import Function, Integral\n'), ((444, 469), 'skipi.function.Integral.from_function', 'Integral.from_function', (['f'], {}), '(f)\n', (466, 469), False, 'from skipi.function import Function, Integral\n'), ((479, 519), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 3 * x ** 2)'], {}), '(x_domain, lambda x: 3 * x ** 2)\n', (487, 519), False, 'from skipi.function import Function, Integral\n'), ((584, 608), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (595, 608), True, 'import numpy as np\n'), ((618, 649), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 1)'], {}), '(x_domain, lambda x: 1)\n', (626, 649), False, 'from skipi.function import Function, Integral\n'), ((659, 694), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 2 * x)'], {}), '(x_domain, lambda x: 2 * x)\n', (667, 694), False, 'from skipi.function import Function, Integral\n'), ((719, 744), 'skipi.function.Integral.from_function', 'Integral.from_function', (['f'], {}), '(f)\n', (741, 744), False, 'from skipi.function import Function, Integral\n'), ((754, 794), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: x + x ** 2)'], {}), '(x_domain, lambda x: x + x ** 2)\n', (762, 794), False, 'from skipi.function import Function, Integral\n'), ((868, 894), 'numpy.linspace', 'np.linspace', (['(-0.7)', '(1)', '(1001)'], {}), '(-0.7, 1, 1001)\n', (879, 894), True, 'import numpy as np\n'), ((903, 942), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: 2 * x + 1)'], {}), '(x_domain, lambda x: 2 * x + 1)\n', (911, 942), False, 'from skipi.function import Function, Integral\n'), ((952, 977), 'skipi.function.Integral.from_function', 'Integral.from_function', (['f'], {}), '(f)\n', (974, 977), False, 'from skipi.function import Function, Integral\n'), ((988, 1063), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: x ** 2 + x - (x_domain[0] ** 2 + x_domain[0]))'], {}), '(x_domain, lambda x: x ** 2 + x - (x_domain[0] ** 2 + x_domain[0]))\n', (996, 1063), False, 'from skipi.function import Function, Integral\n'), ((1460, 1491), 'skipi.function.Integral.from_function', 'Integral.from_function', (['f'], {'x0': '(0)'}), '(f, x0=0)\n', (1482, 1491), False, 'from skipi.function import Function, Integral\n'), ((1501, 1541), 'skipi.function.Function', 'Function', (['x_domain', '(lambda x: x ** 2 + x)'], {}), '(x_domain, lambda x: x ** 2 + x)\n', (1509, 1541), False, 'from skipi.function import Function, Integral\n'), ((1628, 1653), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(50000)'], {}), '(0, 10, 50000)\n', (1639, 1653), True, 'import numpy as np\n'), ((1720, 1748), 'skipi.function.Integral.from_function', 'Integral.from_function', (['f', '(0)'], {}), '(f, 0)\n', (1742, 1748), False, 'from skipi.function import Function, Integral\n'), ((1699, 1709), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (1706, 1709), True, 'import numpy as np\n'), ((1825, 1835), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (1832, 1835), True, 'import numpy as np\n'), ((1806, 1816), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (1813, 1816), True, 'import numpy as np\n')] |
from nose.tools import assert_equal, assert_true, assert_raises, assert_almost_equal
import shannon.discrete as discrete
from numpy import array, mod, arange, histogram
from numpy.random import randint, randn
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pdb
def test_entropy():
# test some simple cases
#pdb.set_trace()
prob = array([1]) # x is a probability distribution
H = discrete.entropy(prob=prob)
assert_equal(H, 0)
prob = array([.5]*2)
H = discrete.entropy(prob=prob)
assert_equal(H, 1)
prob = array([.25]*4)
H = discrete.entropy(prob=prob)
assert_equal(H,2)
x = randint(0,4, size=1e5)
H = discrete.entropy(x)
assert_almost_equal(H, 2, places=3)
def test_symbols_to_prob():
#pdb.set_trace()
# a trivial example
x=[1]*5 + [2]*5
prob0=array((.5, .5)) # this is what the probabilities should be
prob1=discrete.symbols_to_prob(x).prob()
assert_true((prob0==prob1).all())
# an example combining different symbols
x = [1,1,1,1]
y = ['r', 'r', 'b', 'b']
z = [1,'r',1,'r']
s0 = discrete.combine_symbols(x,y) # instead of running discrete.combine_symbols(x,y,z) I
s1 = discrete.combine_symbols(s0, z) # split combination of symbols in two lines and test
# chain rule for combining symbols as well
prob0 = array([.25]*4)
prob1 = discrete.symbols_to_prob(s1).prob()
assert_true((prob0==prob1).all())
def test_mi():
# a trivial example
x = [1,1,1,1,2,2,2,2]
y = [1,2,1,2,1,2,1,2]
assert_equal(discrete.mi(x,y), 0)
# a simpe example with 0 entorpy
x = randint(0, 8, 100000)
y = randint(0, 8, 100000)
assert_almost_equal(discrete.mi(x,y), 0, places=3)
# another example
y = mod(x,4)
assert_almost_equal(discrete.mi(x,y), 2, places=2)
def test_combine_symbols():
'''
test infomration.combine_symbols, takes a bunch of 1D
symbols and generates another 1D symbol
'''
x = [1,1,2,2]
y = [1,2,1,2]
z = ((1,1),(1,2),(2,1),(2,2))
assert_equal(z, discrete.combine_symbols(x,y))
# Combine categorical symbols
x = ('r', 'r', 'g', 'g', 'b', 'b')
y = ('r', 1, 'r', 1, 'r', 1)
z = (('r', 'r'), ('r', 1), ('g', 'r'), ('g', 1), ('b', 'r'), ('b', 1))
assert_equal(z, discrete.combine_symbols(x,y))
# Combine 4 sequences at once
x0 = (1, 1, 1, 1)
x1 = (1, 1, 2, 2)
x2 = (1, 2, 2, 1)
x3 = (2, 2, 1, 1)
z = ((1,1,1,2), (1,1,2,2), (1,2,2,1), (1,2,1,1))
assert_equal(z, discrete.combine_symbols(x0, x1, x2, x3))
# raise an exception when trying to combine iterables with different lengths
x = (1,2,3,4)
y = (1,2,3)
assert_raises(ValueError, discrete.combine_symbols, x, y)
def test_bin():
x = arange(0, 1, 0.001)
# bin into 10 bins (each with 100 symbols)
y, _ = discrete.bin(x, 10)
h, _ = histogram(y,10)
z = [100]*10
assert_array_equal(h,z)
| [
"shannon.discrete.bin",
"numpy.histogram",
"nose.tools.assert_almost_equal",
"numpy.testing.assert_array_equal",
"shannon.discrete.combine_symbols",
"numpy.array",
"numpy.random.randint",
"nose.tools.assert_raises",
"shannon.discrete.entropy",
"shannon.discrete.mi",
"nose.tools.assert_equal",
... | [((374, 384), 'numpy.array', 'array', (['[1]'], {}), '([1])\n', (379, 384), False, 'from numpy import array, mod, arange, histogram\n'), ((432, 459), 'shannon.discrete.entropy', 'discrete.entropy', ([], {'prob': 'prob'}), '(prob=prob)\n', (448, 459), True, 'import shannon.discrete as discrete\n'), ((464, 482), 'nose.tools.assert_equal', 'assert_equal', (['H', '(0)'], {}), '(H, 0)\n', (476, 482), False, 'from nose.tools import assert_equal, assert_true, assert_raises, assert_almost_equal\n'), ((499, 515), 'numpy.array', 'array', (['([0.5] * 2)'], {}), '([0.5] * 2)\n', (504, 515), False, 'from numpy import array, mod, arange, histogram\n'), ((521, 548), 'shannon.discrete.entropy', 'discrete.entropy', ([], {'prob': 'prob'}), '(prob=prob)\n', (537, 548), True, 'import shannon.discrete as discrete\n'), ((553, 571), 'nose.tools.assert_equal', 'assert_equal', (['H', '(1)'], {}), '(H, 1)\n', (565, 571), False, 'from nose.tools import assert_equal, assert_true, assert_raises, assert_almost_equal\n'), ((584, 601), 'numpy.array', 'array', (['([0.25] * 4)'], {}), '([0.25] * 4)\n', (589, 601), False, 'from numpy import array, mod, arange, histogram\n'), ((607, 634), 'shannon.discrete.entropy', 'discrete.entropy', ([], {'prob': 'prob'}), '(prob=prob)\n', (623, 634), True, 'import shannon.discrete as discrete\n'), ((639, 657), 'nose.tools.assert_equal', 'assert_equal', (['H', '(2)'], {}), '(H, 2)\n', (651, 657), False, 'from nose.tools import assert_equal, assert_true, assert_raises, assert_almost_equal\n'), ((666, 694), 'numpy.random.randint', 'randint', (['(0)', '(4)'], {'size': '(100000.0)'}), '(0, 4, size=100000.0)\n', (673, 694), False, 'from numpy.random import randint, randn\n'), ((697, 716), 'shannon.discrete.entropy', 'discrete.entropy', (['x'], {}), '(x)\n', (713, 716), True, 'import shannon.discrete as discrete\n'), ((721, 756), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['H', '(2)'], {'places': '(3)'}), '(H, 2, places=3)\n', (740, 756), False, 'from nose.tools import assert_equal, assert_true, assert_raises, assert_almost_equal\n'), ((861, 878), 'numpy.array', 'array', (['(0.5, 0.5)'], {}), '((0.5, 0.5))\n', (866, 878), False, 'from numpy import array, mod, arange, histogram\n'), ((1137, 1167), 'shannon.discrete.combine_symbols', 'discrete.combine_symbols', (['x', 'y'], {}), '(x, y)\n', (1161, 1167), True, 'import shannon.discrete as discrete\n'), ((1238, 1269), 'shannon.discrete.combine_symbols', 'discrete.combine_symbols', (['s0', 'z'], {}), '(s0, z)\n', (1262, 1269), True, 'import shannon.discrete as discrete\n'), ((1422, 1439), 'numpy.array', 'array', (['([0.25] * 4)'], {}), '([0.25] * 4)\n', (1427, 1439), False, 'from numpy import array, mod, arange, histogram\n'), ((1699, 1720), 'numpy.random.randint', 'randint', (['(0)', '(8)', '(100000)'], {}), '(0, 8, 100000)\n', (1706, 1720), False, 'from numpy.random import randint, randn\n'), ((1729, 1750), 'numpy.random.randint', 'randint', (['(0)', '(8)', '(100000)'], {}), '(0, 8, 100000)\n', (1736, 1750), False, 'from numpy.random import randint, randn\n'), ((1837, 1846), 'numpy.mod', 'mod', (['x', '(4)'], {}), '(x, 4)\n', (1840, 1846), False, 'from numpy import array, mod, arange, histogram\n'), ((2761, 2818), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'discrete.combine_symbols', 'x', 'y'], {}), '(ValueError, discrete.combine_symbols, x, y)\n', (2774, 2818), False, 'from nose.tools import assert_equal, assert_true, assert_raises, assert_almost_equal\n'), ((2844, 2863), 'numpy.arange', 'arange', (['(0)', '(1)', '(0.001)'], {}), '(0, 1, 0.001)\n', (2850, 2863), False, 'from numpy import array, mod, arange, histogram\n'), ((2923, 2942), 'shannon.discrete.bin', 'discrete.bin', (['x', '(10)'], {}), '(x, 10)\n', (2935, 2942), True, 'import shannon.discrete as discrete\n'), ((2954, 2970), 'numpy.histogram', 'histogram', (['y', '(10)'], {}), '(y, 10)\n', (2963, 2970), False, 'from numpy import array, mod, arange, histogram\n'), ((2991, 3015), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['h', 'z'], {}), '(h, z)\n', (3009, 3015), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1632, 1649), 'shannon.discrete.mi', 'discrete.mi', (['x', 'y'], {}), '(x, y)\n', (1643, 1649), True, 'import shannon.discrete as discrete\n'), ((1775, 1792), 'shannon.discrete.mi', 'discrete.mi', (['x', 'y'], {}), '(x, y)\n', (1786, 1792), True, 'import shannon.discrete as discrete\n'), ((1870, 1887), 'shannon.discrete.mi', 'discrete.mi', (['x', 'y'], {}), '(x, y)\n', (1881, 1887), True, 'import shannon.discrete as discrete\n'), ((2138, 2168), 'shannon.discrete.combine_symbols', 'discrete.combine_symbols', (['x', 'y'], {}), '(x, y)\n', (2162, 2168), True, 'import shannon.discrete as discrete\n'), ((2371, 2401), 'shannon.discrete.combine_symbols', 'discrete.combine_symbols', (['x', 'y'], {}), '(x, y)\n', (2395, 2401), True, 'import shannon.discrete as discrete\n'), ((2599, 2639), 'shannon.discrete.combine_symbols', 'discrete.combine_symbols', (['x0', 'x1', 'x2', 'x3'], {}), '(x0, x1, x2, x3)\n', (2623, 2639), True, 'import shannon.discrete as discrete\n'), ((940, 967), 'shannon.discrete.symbols_to_prob', 'discrete.symbols_to_prob', (['x'], {}), '(x)\n', (964, 967), True, 'import shannon.discrete as discrete\n'), ((1449, 1477), 'shannon.discrete.symbols_to_prob', 'discrete.symbols_to_prob', (['s1'], {}), '(s1)\n', (1473, 1477), True, 'import shannon.discrete as discrete\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the Kramers-Kronig Calculator software package.
#
# Copyright (c) 2013 <NAME>, <NAME>
#
# The software is licensed under the terms of the zlib/libpng license.
# For details see LICENSE.txt
"""This module implements a GUI using the wxPython toolkit."""
import logging
logger = logging.getLogger(__name__)
if __name__ == '__main__':
import sys
logging.basicConfig(level=logging.DEBUG)
logging.StreamHandler(stream=sys.stdout)
import wx
import wx.lib.plot as plot
import numpy
import os
import kk, data
try:
import scipy.optimize
SCIPY_FLAG = True
except ImportError:
SCIPY_FLAG = False
logger.info('Failed to import the scipy.optimize module - disabling the \'fix distortions\' checkbox.')
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "Kramers-Kronig Calculator", size=(500, 800))
# Initialise variables
self.dirname = ''
self.raw_file = None
self.total_asf = None
self.total_Im_coeffs = None
self.merged_Im = None
self.nexafs_CutOut = []
self.MolecularMass = 1
self.asf_bg = None
#New set of variables to initialise. All those above might want to be removed.
self.ChemicalFormula = None
self.Stoichiometry = None
self.Relativistic_Correction = None
self.NearEdgeData = None
self.splice_ind = None
self.ASF_E = None
self.ASF_Data = None
self.Full_E = None
self.Imaginary_Spectrum = None
self.KK_Real_Spectrum = None
# Setting up the menus.
filemenu = wx.Menu()
filemenu.Append(wx.ID_OPEN, "L&oad", " Load photoabsorption data from file")
filemenu.AppendSeparator()
filemenu.Append(wx.ID_SAVE, "&Save", " Export results to file")
exportmenu = wx.Menu()
exportmenu.Append(201,"Photoabsorption", " Export X-ray absorption data")
exportmenu.Append(202,"Refractive Index", " Export beta and delta")
filemenu.AppendMenu(200,"Export",exportmenu) # Adding the "exportmenu" to the filemenu
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT, "E&xit", " Terminate the program")
helpmenu = wx.Menu()
helpmenu.Append(wx.ID_HELP, "&Help", " How to use this program")
helpmenu.AppendSeparator()
helpmenu.Append(wx.ID_ABOUT, "&About", " Information about this program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu, "&File") # Adding the "filemenu" to the MenuBar
menuBar.Append(helpmenu, "&Help") # Adding the "helpmenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
wx.EVT_MENU(self, wx.ID_OPEN, self.OnOpen)
wx.EVT_MENU(self, wx.ID_SAVE, self.OnSave)
wx.EVT_MENU(self, 201, self.OnSave) # will set convert_to="photoabsorption" when ID is recognised
wx.EVT_MENU(self, 202, self.OnSave) # will set convert_to="refractive_index" when ID is recognised
wx.EVT_MENU(self, wx.ID_EXIT, self.OnExit)
wx.EVT_MENU(self, wx.ID_ABOUT, self.OnAbout)
wx.EVT_MENU(self, wx.ID_HELP, self.OnHelp)
Sizer1 = wx.BoxSizer(wx.HORIZONTAL) # create outer sizer
SizerL = wx.BoxSizer(wx.VERTICAL) # create left-hand sizer for controls
SizerR = wx.BoxSizer(wx.VERTICAL) # create right-hand sizer for plots
############################Data box
DataBox = wx.StaticBoxSizer(wx.StaticBox(self, label="Near-Edge Data"), wx.VERTICAL)
self.FileText = wx.StaticText(self, -1, "File: (None)")
DataBox.Add(self.FileText, 1, wx.GROW)
DataTypeLabel = wx.StaticText(self, -1, "Data Type: ")
self.DataTypeCombo = wx.ComboBox(self, -1, value='Photoabsorption', style=wx.CB_READONLY)
self.DataTypeCombo.Append('Photoabsorption')
self.DataTypeCombo.Append('Beta')
self.DataTypeCombo.Append('Scattering Factor')
self.DataTypeCombo.Bind(wx.EVT_COMBOBOX, self.MergeAdd_check)
DataTypeSizer = wx.BoxSizer(wx.HORIZONTAL)
DataTypeSizer.Add(DataTypeLabel)
DataTypeSizer.Add(self.DataTypeCombo, 2, wx.GROW)
DataBox.Add(DataTypeSizer, 1, wx.GROW)
SpliceSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SpliceText1 = wx.TextCtrl(self, -1, "Start", style=wx.TE_PROCESS_ENTER)
self.SpliceText1.Bind(wx.EVT_KILL_FOCUS, self.Splice_Text_check)
self.SpliceText1.Bind(wx.EVT_TEXT_ENTER, self.Splice_Text_check)
SpliceSizer.Add(self.SpliceText1, 1)
self.SpliceText2 = wx.TextCtrl(self, -1, "End", style=wx.TE_PROCESS_ENTER)
self.SpliceText2.Bind(wx.EVT_KILL_FOCUS, self.Splice_Text_check)
self.SpliceText2.Bind(wx.EVT_TEXT_ENTER, self.Splice_Text_check)
SpliceSizer.Add(self.SpliceText2, 1)
DataBox.Add(SpliceSizer, 1, wx.GROW)
# Background_CloseSizer = wx.BoxSizer(wx.HORIZONTAL)
# self.InvertDataCheckBox = wx.CheckBox(self, -1, "Invert Data")
# self.InvertDataCheckBox.Bind(wx.EVT_CHECKBOX, self.Splice_Text_check)
# DataBox.Add(self.InvertDataCheckBox, 0)
self.AddBackgroundCheckBox = wx.CheckBox(self, -1, "Add background")
self.AddBackgroundCheckBox.Bind(wx.EVT_CHECKBOX, self.Splice_Text_check)
self.AddBackgroundCheckBox.Disable()
self.AddBackgroundCheckBox.SetToolTip(wx.ToolTip("Not implemented"))
DataBox.Add(self.AddBackgroundCheckBox, 0)
self.FixDistortionsCheckBox = wx.CheckBox(self, -1, "Fix distortions")
self.FixDistortionsCheckBox.Bind(wx.EVT_CHECKBOX, self.Splice_Text_check)
if not SCIPY_FLAG:
self.FixDistortionsCheckBox.Disable()
self.FixDistortionsCheckBox.SetToolTip(wx.ToolTip("Install the SciPy module to use this feature"))
DataBox.Add(self.FixDistortionsCheckBox, 0)
# Background_CloseSizer.Add(self.AddBackgroundCheckBox, 0)
# self.AddBackgroundCheckBox.Bind(wx.EVT_CHECKBOX, self.MergeAdd_check)
# Background_CloseSizer.AddStretchSpacer(1)
# self.CloseFile = wx.Button(self, -1, "X", style= wx.BU_EXACTFIT)
# Background_CloseSizer.Add(self.CloseFile, 0)
# DataBox.Add(Background_CloseSizer, 1, wx.GROW)
############################Material box
self.MaterialBox = wx.StaticBoxSizer(wx.StaticBox(self, label="Material"), wx.VERTICAL)
DensitySizer = wx.BoxSizer(wx.HORIZONTAL)
DensitySizer.Add(wx.StaticText(self, -1, "Density: "))
self.DensityText = wx.TextCtrl(self, -1, "1", style=wx.TE_PROCESS_ENTER)
self.DensityText.Bind(wx.EVT_KILL_FOCUS, self.Splice_Text_check)
self.DensityText.Bind(wx.EVT_TEXT_ENTER, self.Splice_Text_check)
DensitySizer.Add(self.DensityText, 1)
DensitySizer.Add(wx.StaticText(self, -1, " g/ml"))
self.MaterialBox.Add(DensitySizer, 0)
StoichiometrySizer = wx.BoxSizer(wx.HORIZONTAL)
StoichiometrySizer.Add(wx.StaticText(self, -1, "Stoichiometry: "))
self.StoichiometryText = wx.TextCtrl(self, -1, "", style=wx.TE_PROCESS_ENTER)
self.StoichiometryText.Bind(wx.EVT_KILL_FOCUS, self.Stoichiometry_Text_check)
self.StoichiometryText.Bind(wx.EVT_TEXT_ENTER, self.Stoichiometry_Text_check)
StoichiometrySizer.Add(self.StoichiometryText, 1)
self.MaterialBox.Add(StoichiometrySizer, 0)
############################Calc box
CalcBox = wx.StaticBoxSizer(wx.StaticBox(self, label="Calculation"), wx.VERTICAL)
CalcButton = wx.Button(self, -1, "Calculate")
CalcBox.Add(CalcButton, 1, wx.GROW)
CalcButton.Bind(wx.EVT_BUTTON, self.calculate)
SizerL.Add(DataBox, 0, wx.GROW)
SizerL.Add(self.MaterialBox, 1, wx.GROW)
SizerL.AddStretchSpacer(1)
SizerL.Add(CalcBox, 0, wx.GROW)
self.PlotAxes = plot.PlotCanvas(self)
SizerR.Add(self.PlotAxes, 1, wx.GROW)
#SizerR.Add(self.Rplot, 1, wx.GROW)
# enable the zoom feature (drag a box around area of interest)
self.PlotAxes.SetEnableZoom(True)
#self.Rplot.SetEnableZoom(True)
Sizer1.Add(SizerL, 1, wx.GROW)
Sizer1.Add(SizerR, 3, wx.GROW)
self.SetAutoLayout(True)
self.SetSizer(Sizer1) # add outer sizer to frame
self.Fit()
self.Show(True)
self.plot_data()
#self.Test()
def Test(self):
"""Convenience function for repetitive testing"""
self.filename = "NC-Xy_norm_bgsub.txt"
self.dirname = "data"
self.FileText.SetLabel("File: "+self.filename)
#self.raw_file = self.LoadData(os.path.join(self.dirname, self.filename))
self.AddBackgroundCheckBox.SetValue(True)
self.combine_data()
self.PP_AlgorithmRadio.SetValue(True)
self.plot_data()
def OnAbout(self, e):
d = wx.MessageDialog(self, " A utility for calculating the real part of soft X-ray spectra.\nWritten by Dr. <NAME> at the Paul Scherrer Institut", "About KKcalc", wx.OK)
# Create a message dialog box
d.ShowModal() # Shows it
d.Destroy() # finally destroy it when finished.
def OnExit(self, e):
self.Close(True) # Close the frame.
def OnOpen(self, e):
"""Load data from a file."""
success = False
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
success = True
self.dirname, self.filename = os.path.split(dlg.GetPath())
dlg.Destroy()
if success:
self.FileText.SetLabel("File: "+self.filename)
self.raw_file = data.load_data(os.path.join(self.dirname, self.filename))
self.combine_data()
self.plot_data()
def OnHelp(self, e):
logger.info("Opening web browser for help files.")
import webbrowser
webbrowser.open("README.rst")
def OnSave(self, e):
"""Write data to file."""
convert_to = None
if e.Id == 201:
convert_to = "photoabsorption"
elif e.Id == 202:
convert_to = "refractive_index"
logger.info("Save")
fd = wx.FileDialog(self, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if fd.ShowModal()==wx.ID_OK:
metadata = {"Density": float(self.DensityText.GetValue()), "Molecular Formula":self.StoichiometryText.GetValue(),"Formula Mass":data.calculate_FormulaMass(self.Stoichiometry)}
data.export_data(fd.GetPath(), numpy.transpose(numpy.vstack((self.Full_E,self.KK_Real_Spectrum,data.coeffs_to_ASF(self.Full_E,self.Imaginary_Spectrum)))), header_info=metadata, convert_to=convert_to)
def combine_data(self):
"""Combine users near-edge data with extended spectrum data."""
self.Full_E = None
self.Imaginary_Spectrum = None
if self.raw_file is not None:
logger.info("Convert to scattering factors")
self.NearEdgeData = data.convert_data(self.raw_file,self.DataTypeCombo.GetValue(),'ASF')
# if self.InvertDataCheckBox.GetValue():
# self.NearEdgeData[:,1] = numpy.abs(self.NearEdgeData[:,1] - 2*numpy.mean(self.NearEdgeData[:,1]))
logger.info("Combine Data")
# Get splice points
splice_eV = numpy.array([10.0, 30000.0]) # Henke limits
if self.SpliceText1.GetValue() == "Start":
if self.raw_file is not None:
splice_eV[0] = self.NearEdgeData[0, 0]
else:
splice_eV[0] = float(self.SpliceText1.GetValue())
if self.SpliceText2.GetValue() == "End":
if self.raw_file is not None:
splice_eV[1] = self.NearEdgeData[-1, 0]
else:
splice_eV[1] = float(self.SpliceText2.GetValue())
if self.raw_file is not None and self.ASF_Data is None:
self.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), plotting_extras=True)
elif self.raw_file is None and self.ASF_Data is not None:
self.Full_E = self.ASF_E
self.Imaginary_Spectrum = self.ASF_Data
elif self.raw_file is not None and self.ASF_Data is not None:
self.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), fix_distortions=self.FixDistortionsCheckBox.GetValue(), plotting_extras=True)
### get start and end Y values from nexafs and asf data
##splice_nexafs_Im = numpy.interp(splice_eV, raw_Im[:, 0], raw_Im[:, 1])
###splice_asf_Im = numpy.interp(splice_eV, self.total_asf[:, 0], self.total_asf[:, 2])
##splice_asf_Im = (data.coeffs_to_ASF(splice_eV[0],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[0])[0][-1]]),data.coeffs_to_ASF(splice_eV[1],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[1])[0][-1]]))
##cut_boolean = (splice_eV[0]<raw_Im[:, 0]) == (raw_Im[:, 0]<splice_eV[1])
### Merge Y values
##if not self.AddBackgroundCheckBox.GetValue():
##logger.info("Merge data sets")
##scale = (splice_asf_Im[1]-splice_asf_Im[0])/(splice_nexafs_Im[1]-splice_nexafs_Im[0])
##scaled_nexafs_Im = ((raw_Im[:, 1]-splice_nexafs_Im[0])*scale)+splice_asf_Im[0]
##self.asf_bg = None # We won't be using this variable this time
##else:
##logger.info("Add data sets (this will currently only work at energies below 30 keV)")
### Set up background function
### We trust this point to be just before the absorption edge
##trusted_ind = max(0, numpy.where(self.total_asf[:, 0]>splice_eV[0])[0][0]-1)
##Log_total_asf = numpy.log(self.total_asf[:, 2])
### Lets trust the 5 points before our trusted point and make an initial guess at the background function
##p = numpy.polyfit(self.total_asf[(trusted_ind-5):trusted_ind, 0], Log_total_asf[(trusted_ind-5):trusted_ind], 1)
### Now lets look for the points up util the absorption edge
##p_vals = numpy.exp(numpy.polyval(p, self.total_asf[(trusted_ind-5):-1, 0]))
##p_err = max(p_vals[0:5]-self.total_asf[(trusted_ind-5):trusted_ind, 2])
##edge_ind = numpy.where(self.total_asf[trusted_ind:-1, 2]-p_vals[4:-1]>p_err*10)
##if len(edge_ind[0])!=0:
##edge_ind = edge_ind[0][0]
##else:
##edge_ind = trusted_ind
### Redo background using the 5 points before the background point
##p = numpy.polyfit(self.total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind, 0], Log_total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind], 1)
##asf_bg = numpy.exp(numpy.polyval(p, raw_Im[:, 0]))
##logger.info("Background defined as: y=exp(%(p1)ex %(p0)+e)" % {"p1":p[1], "p0":p[0]})
### Apply background function
##scale = (splice_asf_Im[1]-numpy.exp(numpy.polyval(p, splice_eV[1])))/splice_nexafs_Im[1]
##scaled_nexafs_Im = raw_Im[:, 1]*scale+asf_bg
### store background data for plotting
##cut_boolean_wide = numpy.roll(cut_boolean, -1) + numpy.roll(cut_boolean, 1)
##self.asf_bg = [[trusted_ind+edge_ind-5, trusted_ind+edge_ind], numpy.vstack((raw_Im[cut_boolean_wide, 0], asf_bg[cut_boolean_wide])).T]
##nexafs_cut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T
####Merge point-wise data sets together
##asf_cut_high = self.total_asf[self.total_asf[:, 0]>splice_eV[1], :]
##asf_cut_low = self.total_asf[self.total_asf[:, 0]<splice_eV[0], :]
##self.merged_Im = numpy.vstack((asf_cut_low[:, [0, 2]], (splice_eV[0], splice_asf_Im[0]), nexafs_cut, (splice_eV[1], splice_asf_Im[1]), asf_cut_high[:, [0, 2]]))
####Merge coeff data together
##coeffs_cut_high = self.total_Im_coeffs[self.total_E[:-1]>splice_eV[1],:]
##coeffs_cut_low = self.total_Im_coeffs[self.total_E[:-1]<splice_eV[0],:]
###convert points to coeffs
##nexafs_coeffs_cut = numpy.zeros((len(nexafs_cut)+1,5))
##Y = numpy.append(numpy.insert(nexafs_cut[:,1],0,splice_asf_Im[0]),splice_asf_Im[1])
##nexafs_E = numpy.append(numpy.insert(nexafs_cut[:,0],0,splice_eV[0]),splice_eV[1])
##M = (Y[1:]-Y[:-1])/(nexafs_E[1:]-nexafs_E[:-1])
##nexafs_coeffs_cut[:,0] = M
##nexafs_coeffs_cut[:,1] = Y[:-1]-M*nexafs_E[:-1]
###assemble merged coeffs and energy values
##self.merged_Im_coeffs = numpy.vstack((coeffs_cut_low, nexafs_coeffs_cut, self.total_Im_coeffs[-coeffs_cut_high.shape[0]-2,:], coeffs_cut_high))
##self.merged_E = numpy.concatenate((self.total_E[self.total_E<splice_eV[0]], nexafs_E, self.total_E[self.total_E>splice_eV[1]]))
### Extras for plotting
##self.splice_ind = (len(asf_cut_low[:, 0]), -len(asf_cut_high[:, 0]))
##cut_boolean = (splice_eV[0]<=raw_Im[:, 0]) != (raw_Im[:, 0]<=splice_eV[1])
##self.nexafs_CutOut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T
### Previous calculation of f_1 is no longer matching displayed f_2 data
##self.KK_Real_Spectrum = None
def plot_data(self):
"""Plot data.
Parameters:
-----------
self.Full_E : vector of floats
photon energies at which the real and imaginary scattering factor data will be plotted.
self.Imaginary_Spectrum : Array of float
polynomial coefficients that can be evaluated to give the values of the imaginary scattering factors.
self.KK_Real_Spectrum : vector of float
the values of the real scattering factors.
Returns
-------
The GUI is updated, but nothing is returned.
"""
logger.info("plotting data")
# List of things to plot
plotlist = []
# get initial guess at X limits
X_min = 0
X_max = 30000
Y_max = 1
Y_min = 0
if self.NearEdgeData is not None:
X_min = self.NearEdgeData[0, 0]
X_max = self.NearEdgeData[-1, 0]
if self.SpliceText1.GetValue() != "Start":
X_min = float(self.SpliceText1.GetValue())
if self.SpliceText2.GetValue() != "End":
X_max = float(self.SpliceText2.GetValue())
if self.Imaginary_Spectrum is not None:
if self.Stoichiometry is not None:
scale = sum([Z*count for Z, count in self.Stoichiometry])
else:
scale = 1.
Im_energies, Im_values = data.coeffs_to_linear(self.Full_E, self.Imaginary_Spectrum, 0.001*scale)
plotlist.append(plot.PolyLine(zip(Im_energies,Im_values), colour='black', width=1))
# get Y limits
if self.splice_ind is None:
Y_max = max(Im_values)
Y_min = min(Im_values)
else:
Y_max = max(Im_values[self.splice_ind[0]:self.splice_ind[1]])
Y_min = min(Im_values[self.splice_ind[0]:self.splice_ind[1]])
if self.NearEdgeData is not None:
Y_max = max(self.NearEdgeData[:,1])
Y_min = min(self.NearEdgeData[:,1])
plotlist.append(plot.PolyMarker(zip(self.NearEdgeData[:,0], self.NearEdgeData[:,1]), colour='blue', marker='plus', size=1))
if self.splice_ind is not None:
splice_values = data.coeffs_to_ASF(self.Full_E[self.splice_ind], self.Imaginary_Spectrum[[self.splice_ind[0],min(self.splice_ind[1],self.Imaginary_Spectrum.shape[0]-1)]])
plotlist.append(plot.PolyMarker(zip(self.Full_E[self.splice_ind], splice_values), colour='red', marker='cross', size=1))
if self.raw_file is not None and self.Imaginary_Spectrum is None:
logger.info("plot raw data only")
plotlist.append(plot.PolyLine(self.NearEdgeData, colour='blue', width=1)) # User data
if self.asf_bg is not None:
plotlist.append(plot.PolyMarker(self.total_asf[self.asf_bg[0][0]:self.asf_bg[0][1], [0, 2]], colour='red', marker='cross', size=1))
plotlist.append(plot.PolyLine(self.asf_bg[1], colour='red', width=1))
# Real part
#plotlist.append(plot.PolyLine(self.total_asf[:, [0, 1]], colour='black', width=1))
if self.KK_Real_Spectrum is not None:
if self.splice_ind is None:
Y_max = max(self.KK_Real_Spectrum)
Y_min = min(self.KK_Real_Spectrum)
else:
Y_max = max(Y_max, max(self.KK_Real_Spectrum[self.splice_ind[0]:self.splice_ind[1]]))
Y_min = min(Y_min, min(self.KK_Real_Spectrum[self.splice_ind[0]:self.splice_ind[1]]))
plotlist.append(plot.PolyLine(zip(self.Full_E, self.KK_Real_Spectrum), colour='green', width=1))
# Expand plotting limits for prettiness
window_width = X_max-X_min
X_max = X_max+window_width*0.1
X_min = max(X_min-window_width*0.1, 0)
window_Im_height = Y_max-Y_min
window_Re_height = Y_max-Y_min
Y_max = Y_max+window_Im_height*0.1
Y_min = Y_min-window_Im_height*0.1
Y_max = Y_max+window_Re_height*0.1
Y_min = Y_min-window_Re_height*0.1
# set up text, axis and draw
#print plotlist
#print X_min, X_max, Y_min, Y_max
self.PlotAxes.Draw(plot.PlotGraphics(plotlist, '', 'Energy (eV)', 'Magnitude'), xAxis=(X_min, X_max), yAxis=(0, Y_max))
#print "Plotlist =", len(plotlist)
def Splice_Text_check(self, evt):
self.combine_data()
self.plot_data()
def MergeAdd_check(self, evt):
self.combine_data()
self.plot_data()
def Stoichiometry_Text_check(self, evt):
if len(self.StoichiometryText.GetValue()) == 0:
self.ChemicalFormula = None
self.Stoichiometry = None
self.Relativistic_Correction = None
self.ASF_E = None
self.ASF_Data = None
else:
self.ChemicalFormula = self.StoichiometryText.GetValue()
self.Stoichiometry = data.ParseChemicalFormula(self.ChemicalFormula)
self.Relativistic_Correction = kk.calc_relativistic_correction(self.Stoichiometry)
self.ASF_E, self.ASF_Data = data.calculate_asf(self.Stoichiometry)
self.combine_data()
self.plot_data()
def calculate(self, button):
"""Calculate Button."""
logger.debug("Calculate button")
if self.Imaginary_Spectrum is not None:
logger.info("Calculate Kramers-Kronig transform (PP)")
self.KK_Real_Spectrum = kk.KK_PP(self.Full_E, self.Full_E, self.Imaginary_Spectrum, self.Relativistic_Correction)
logger.info("Done!")
self.plot_data()
def start_wx():
app = wx.App()
f = MyFrame()
app.SetTopWindow(f)
app.MainLoop()
if __name__ == '__main__':
start_wx() | [
"logging.getLogger",
"data.coeffs_to_linear",
"logging.StreamHandler",
"wx.lib.plot.PolyLine",
"webbrowser.open",
"numpy.array",
"wx.lib.plot.PolyMarker",
"wx.StaticBox",
"wx.App",
"kk.KK_PP",
"kk.calc_relativistic_correction",
"wx.lib.plot.PlotGraphics",
"wx.CheckBox",
"data.calculate_For... | [((347, 374), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'import logging\n'), ((415, 455), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (434, 455), False, 'import logging\n'), ((457, 497), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (478, 497), False, 'import logging\n'), ((20713, 20721), 'wx.App', 'wx.App', ([], {}), '()\n', (20719, 20721), False, 'import wx\n'), ((819, 910), 'wx.Frame.__init__', 'wx.Frame.__init__', (['self', 'None', 'wx.ID_ANY', '"""Kramers-Kronig Calculator"""'], {'size': '(500, 800)'}), "(self, None, wx.ID_ANY, 'Kramers-Kronig Calculator', size=\n (500, 800))\n", (836, 910), False, 'import wx\n'), ((1523, 1532), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (1530, 1532), False, 'import wx\n'), ((1722, 1731), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (1729, 1731), False, 'import wx\n'), ((2076, 2085), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (2083, 2085), False, 'import wx\n'), ((2296, 2308), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (2306, 2308), False, 'import wx\n'), ((2534, 2576), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', 'wx.ID_OPEN', 'self.OnOpen'], {}), '(self, wx.ID_OPEN, self.OnOpen)\n', (2545, 2576), False, 'import wx\n'), ((2579, 2621), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', 'wx.ID_SAVE', 'self.OnSave'], {}), '(self, wx.ID_SAVE, self.OnSave)\n', (2590, 2621), False, 'import wx\n'), ((2624, 2659), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', '(201)', 'self.OnSave'], {}), '(self, 201, self.OnSave)\n', (2635, 2659), False, 'import wx\n'), ((2726, 2761), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', '(202)', 'self.OnSave'], {}), '(self, 202, self.OnSave)\n', (2737, 2761), False, 'import wx\n'), ((2829, 2871), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', 'wx.ID_EXIT', 'self.OnExit'], {}), '(self, wx.ID_EXIT, self.OnExit)\n', (2840, 2871), False, 'import wx\n'), ((2874, 2918), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', 'wx.ID_ABOUT', 'self.OnAbout'], {}), '(self, wx.ID_ABOUT, self.OnAbout)\n', (2885, 2918), False, 'import wx\n'), ((2921, 2963), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self', 'wx.ID_HELP', 'self.OnHelp'], {}), '(self, wx.ID_HELP, self.OnHelp)\n', (2932, 2963), False, 'import wx\n'), ((2977, 3003), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (2988, 3003), False, 'import wx\n'), ((3037, 3061), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (3048, 3061), False, 'import wx\n'), ((3112, 3136), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (3123, 3136), False, 'import wx\n'), ((3318, 3357), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', '"""File: (None)"""'], {}), "(self, -1, 'File: (None)')\n", (3331, 3357), False, 'import wx\n'), ((3417, 3455), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', '"""Data Type: """'], {}), "(self, -1, 'Data Type: ')\n", (3430, 3455), False, 'import wx\n'), ((3479, 3547), 'wx.ComboBox', 'wx.ComboBox', (['self', '(-1)'], {'value': '"""Photoabsorption"""', 'style': 'wx.CB_READONLY'}), "(self, -1, value='Photoabsorption', style=wx.CB_READONLY)\n", (3490, 3547), False, 'import wx\n'), ((3762, 3788), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (3773, 3788), False, 'import wx\n'), ((3933, 3959), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (3944, 3959), False, 'import wx\n'), ((3981, 4038), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)', '"""Start"""'], {'style': 'wx.TE_PROCESS_ENTER'}), "(self, -1, 'Start', style=wx.TE_PROCESS_ENTER)\n", (3992, 4038), False, 'import wx\n'), ((4233, 4288), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)', '"""End"""'], {'style': 'wx.TE_PROCESS_ENTER'}), "(self, -1, 'End', style=wx.TE_PROCESS_ENTER)\n", (4244, 4288), False, 'import wx\n'), ((4770, 4809), 'wx.CheckBox', 'wx.CheckBox', (['self', '(-1)', '"""Add background"""'], {}), "(self, -1, 'Add background')\n", (4781, 4809), False, 'import wx\n'), ((5072, 5112), 'wx.CheckBox', 'wx.CheckBox', (['self', '(-1)', '"""Fix distortions"""'], {}), "(self, -1, 'Fix distortions')\n", (5083, 5112), False, 'import wx\n'), ((5900, 5926), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (5911, 5926), False, 'import wx\n'), ((6005, 6058), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)', '"""1"""'], {'style': 'wx.TE_PROCESS_ENTER'}), "(self, -1, '1', style=wx.TE_PROCESS_ENTER)\n", (6016, 6058), False, 'import wx\n'), ((6352, 6378), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (6363, 6378), False, 'import wx\n'), ((6475, 6527), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)', '""""""'], {'style': 'wx.TE_PROCESS_ENTER'}), "(self, -1, '', style=wx.TE_PROCESS_ENTER)\n", (6486, 6527), False, 'import wx\n'), ((6925, 6957), 'wx.Button', 'wx.Button', (['self', '(-1)', '"""Calculate"""'], {}), "(self, -1, 'Calculate')\n", (6934, 6957), False, 'import wx\n'), ((7210, 7231), 'wx.lib.plot.PlotCanvas', 'plot.PlotCanvas', (['self'], {}), '(self)\n', (7225, 7231), True, 'import wx.lib.plot as plot\n'), ((8081, 8258), 'wx.MessageDialog', 'wx.MessageDialog', (['self', '""" A utility for calculating the real part of soft X-ray spectra.\nWritten by Dr. <NAME> at the Paul Scherrer Institut"""', '"""About KKcalc"""', 'wx.OK'], {}), '(self,\n """ A utility for calculating the real part of soft X-ray spectra.\nWritten by Dr. <NAME> at the Paul Scherrer Institut"""\n , \'About KKcalc\', wx.OK)\n', (8097, 8258), False, 'import wx\n'), ((8498, 8571), 'wx.FileDialog', 'wx.FileDialog', (['self', '"""Choose a file"""', 'self.dirname', '""""""', '"""*.*"""', 'wx.FD_OPEN'], {}), "(self, 'Choose a file', self.dirname, '', '*.*', wx.FD_OPEN)\n", (8511, 8571), False, 'import wx\n'), ((8984, 9013), 'webbrowser.open', 'webbrowser.open', (['"""README.rst"""'], {}), "('README.rst')\n", (8999, 9013), False, 'import webbrowser\n'), ((9221, 9283), 'wx.FileDialog', 'wx.FileDialog', (['self'], {'style': '(wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)'}), '(self, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n', (9234, 9283), False, 'import wx\n'), ((10226, 10254), 'numpy.array', 'numpy.array', (['[10.0, 30000.0]'], {}), '([10.0, 30000.0])\n', (10237, 10254), False, 'import numpy\n'), ((3243, 3285), 'wx.StaticBox', 'wx.StaticBox', (['self'], {'label': '"""Near-Edge Data"""'}), "(self, label='Near-Edge Data')\n", (3255, 3285), False, 'import wx\n'), ((4964, 4993), 'wx.ToolTip', 'wx.ToolTip', (['"""Not implemented"""'], {}), "('Not implemented')\n", (4974, 4993), False, 'import wx\n'), ((5832, 5868), 'wx.StaticBox', 'wx.StaticBox', (['self'], {'label': '"""Material"""'}), "(self, label='Material')\n", (5844, 5868), False, 'import wx\n'), ((5946, 5982), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', '"""Density: """'], {}), "(self, -1, 'Density: ')\n", (5959, 5982), False, 'import wx\n'), ((6252, 6284), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', '""" g/ml"""'], {}), "(self, -1, ' g/ml')\n", (6265, 6284), False, 'import wx\n'), ((6404, 6446), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', '"""Stoichiometry: """'], {}), "(self, -1, 'Stoichiometry: ')\n", (6417, 6446), False, 'import wx\n'), ((6856, 6895), 'wx.StaticBox', 'wx.StaticBox', (['self'], {'label': '"""Calculation"""'}), "(self, label='Calculation')\n", (6868, 6895), False, 'import wx\n'), ((17041, 17115), 'data.coeffs_to_linear', 'data.coeffs_to_linear', (['self.Full_E', 'self.Imaginary_Spectrum', '(0.001 * scale)'], {}), '(self.Full_E, self.Imaginary_Spectrum, 0.001 * scale)\n', (17062, 17115), False, 'import kk, data\n'), ((19471, 19530), 'wx.lib.plot.PlotGraphics', 'plot.PlotGraphics', (['plotlist', '""""""', '"""Energy (eV)"""', '"""Magnitude"""'], {}), "(plotlist, '', 'Energy (eV)', 'Magnitude')\n", (19488, 19530), True, 'import wx.lib.plot as plot\n'), ((20089, 20136), 'data.ParseChemicalFormula', 'data.ParseChemicalFormula', (['self.ChemicalFormula'], {}), '(self.ChemicalFormula)\n', (20114, 20136), False, 'import kk, data\n'), ((20171, 20222), 'kk.calc_relativistic_correction', 'kk.calc_relativistic_correction', (['self.Stoichiometry'], {}), '(self.Stoichiometry)\n', (20202, 20222), False, 'import kk, data\n'), ((20254, 20292), 'data.calculate_asf', 'data.calculate_asf', (['self.Stoichiometry'], {}), '(self.Stoichiometry)\n', (20272, 20292), False, 'import kk, data\n'), ((20555, 20649), 'kk.KK_PP', 'kk.KK_PP', (['self.Full_E', 'self.Full_E', 'self.Imaginary_Spectrum', 'self.Relativistic_Correction'], {}), '(self.Full_E, self.Full_E, self.Imaginary_Spectrum, self.\n Relativistic_Correction)\n', (20563, 20649), False, 'import kk, data\n'), ((5293, 5351), 'wx.ToolTip', 'wx.ToolTip', (['"""Install the SciPy module to use this feature"""'], {}), "('Install the SciPy module to use this feature')\n", (5303, 5351), False, 'import wx\n'), ((8800, 8841), 'os.path.join', 'os.path.join', (['self.dirname', 'self.filename'], {}), '(self.dirname, self.filename)\n', (8812, 8841), False, 'import os\n'), ((9444, 9490), 'data.calculate_FormulaMass', 'data.calculate_FormulaMass', (['self.Stoichiometry'], {}), '(self.Stoichiometry)\n', (9470, 9490), False, 'import kk, data\n'), ((18152, 18208), 'wx.lib.plot.PolyLine', 'plot.PolyLine', (['self.NearEdgeData'], {'colour': '"""blue"""', 'width': '(1)'}), "(self.NearEdgeData, colour='blue', width=1)\n", (18165, 18208), True, 'import wx.lib.plot as plot\n'), ((18274, 18392), 'wx.lib.plot.PolyMarker', 'plot.PolyMarker', (['self.total_asf[self.asf_bg[0][0]:self.asf_bg[0][1], [0, 2]]'], {'colour': '"""red"""', 'marker': '"""cross"""', 'size': '(1)'}), "(self.total_asf[self.asf_bg[0][0]:self.asf_bg[0][1], [0, 2]],\n colour='red', marker='cross', size=1)\n", (18289, 18392), True, 'import wx.lib.plot as plot\n'), ((18410, 18462), 'wx.lib.plot.PolyLine', 'plot.PolyLine', (['self.asf_bg[1]'], {'colour': '"""red"""', 'width': '(1)'}), "(self.asf_bg[1], colour='red', width=1)\n", (18423, 18462), True, 'import wx.lib.plot as plot\n'), ((9590, 9646), 'data.coeffs_to_ASF', 'data.coeffs_to_ASF', (['self.Full_E', 'self.Imaginary_Spectrum'], {}), '(self.Full_E, self.Imaginary_Spectrum)\n', (9608, 9646), False, 'import kk, data\n')] |
# This file is a derivative of repeat_copy.py created by SiliconSloth.
# The license header of the original file is retained here.
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from collections import OrderedDict
from scipy.sparse import csr_matrix
"""
Generates multiplication task samples.
"""
class MultiplicationTask():
def __init__(self, config):
self.rng = np.random.RandomState(seed=config['seed'])
self.feature_width = config['feature_width']
# self.tokens = {'rep': 'REP'}
self.config = config
self.samples = self.create_samples(config['set_list'], self.feature_width)
def create_samples(self, set_list, feature_width):
print('### MultiplicationTask: create data')
samples = {}
# for set, conf in set_list.items():
# samples[set] = []
# for i in range(conf["quantity"]):
# if conf["min_length"] < conf["max_length"]:
# length = self.rng.randint(conf["min_length"], conf["max_length"])
# else:
# length = conf["min_length"]
# samples[set].append(self.create_sample(length, self.feature_width))
return samples
def create_sample(self, length, feature_width):
# Random number between 0 and the max value, inclusive.
result = self.rng.randint(feature_width**length)
# Choose a factor between 1 and the result, or any valid number if the result is 0.
factor1 = self.rng.randint(result) + 1 if result > 0 else self.rng.randint(feature_width**length)
# Get the second factor by dividing by the first and rounding down.
factor2 = int(result / factor1)
# Compute the new result with the rounded factor.
result = factor1 * factor2
sequence1 = self.int_to_sequence(factor1, length, feature_width)
sequence2 = self.int_to_sequence(factor2, length, feature_width)
answer = self.int_to_sequence(result, length, feature_width)
x_word = np.concatenate([sequence1, [feature_width], sequence2, [feature_width], [0 for _ in range(length)]])
y_word = np.concatenate([[0 for _ in range(length*2 + 2)], answer])
sample = OrderedDict()
sample['x_word'] = x_word
sample['x'] = self._numbers_to_onehot(x_word, feature_width + 1)
sample['y'] = self._numbers_to_onehot(y_word, feature_width)
sample['m'] = np.concatenate([[0 for _ in range(length*2 + 2)], [1 for _ in range(length)]])
return sample
@staticmethod
def int_to_sequence(value, length, feature_width):
seq = np.ndarray(length, dtype=np.int32)
for i in range(length):
seq[i] = value % feature_width
value = int(value / feature_width)
return seq
@staticmethod
def _numbers_to_onehot(numbers, size):
length = numbers.__len__()
row = np.arange(length)
data = np.ones(length)
matrix = csr_matrix((data, (row, numbers)), shape=(length, size)).toarray() # super fast
return matrix
@staticmethod
def _zeros_matrix(len, width):
row = np.arange(len)
col = np.zeros(len)
data = np.zeros(len)
padding = csr_matrix((data, (row, col)), shape=(len, width)).toarray()
return padding
def get_sample(self, set_name, number):
conf = self.config['set_list'][set_name]
return self.create_sample(self.rng.randint(conf["min_length"], conf["max_length"]), self.feature_width)
@property
def vocabulary_size(self):
return self.feature_width + 1
@property
def x_size(self):
return self.feature_width + 1
@property
def y_size(self):
return self.feature_width
def sample_amount(self, set_name):
return self.config['set_list'][set_name]['quantity']
def decode_output(self, sample, prediction):
pass
def patch_batch(self, list_of_samples):
batch = {'x': [], 'y': [], 'm': [], 'x_word': []}
len = []
for sample in list_of_samples:
len.append(sample['x'].shape[0])
batch['x_word'].append(sample['x_word'])
max_len = np.max(len)
for sample in list_of_samples:
cur_len = sample['x'].shape[0]
if cur_len < max_len:
add_len = max_len - cur_len
x_add = self._zeros_matrix(add_len, self.x_size)
batch['x'].append(np.concatenate([sample['x'], x_add], axis=0))
y_add = self._zeros_matrix(add_len, self.y_size)
batch['y'].append(np.concatenate([sample['y'], y_add], axis=0))
m_add = np.zeros([add_len])
batch['m'].append(np.concatenate([sample['m'], m_add], axis=0))
else:
for key in ['x', 'y', 'm']:
batch[key].append(sample[key])
for key in ['x', 'y', 'm']:
batch[key] = np.stack(batch[key], axis=0)
batch['x'] = np.transpose(batch['x'], axes=(1, 0, 2))
batch['y'] = np.transpose(batch['y'], axes=(1, 0, 2))
batch['m'] = np.transpose(batch['m'], axes=(1, 0))
return batch
@staticmethod
def decode_output(sample, prediction):
if prediction.shape.__len__() == 3:
prediction_decode_list = []
target_decode_list = []
for b in range(prediction.shape[1]):
target_decode_list.append([np.argmax(sample['y'][i, b, :]) for i in range(sample['y'].shape[0])])
prediction_decode_list.append([np.argmax(prediction[i, b, :]) for i in range(prediction.shape[0])])
return target_decode_list, prediction_decode_list
else:
target_decode = [np.argmax(sample['y'][i, :]) for i in range(sample['y'].shape[0])]
prediction_decode = [np.argmax(prediction[i, :]) for i in range(prediction.shape[0])]
return target_decode, prediction_decode
if __name__ == '__main__':
feature_width = 20
set_list = {"train": {"quantity": 20, "min_length": 20, "max_length": 50},
"valid": {"quantity": 20, "min_length": 50, "max_length": 70}}
config = {'seed': 221, 'feature_width': feature_width, 'set_list': set_list}
sd = AdditionTask(config)
samples = sd.get_sample('train', 2)
print("Sample Shape")
print("Data: ", samples['x'].shape)
print("Target: ", samples['y'].shape)
print("Mask: ", samples['m'].shape)
# PRINT COPY TASK SAMPLE
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=False, figsize=(13, 8))
pad = 5
length = int((samples['x'].shape[0] - 1) / 2)
plot_x = np.argmax(samples['x'], axis=1)
plot_x[length + 1:] = -1
ax1.plot(plot_x, 's', color='midnightblue')
respons_flag = np.ones(plot_x.shape) * -1
respons_flag[length] = feature_width
ax1.plot(respons_flag, 's', color='deeppink')
ax1.set_xticklabels([])
ax1.set_yticks(np.arange(0, feature_width + 1, 1))
ax1.set_ylim(-0.5, feature_width + 0.5)
ax1.set_xlim(-0.5, length * 2 + 1.5)
ax1.annotate('data', xy=(0, 0.5), xytext=(-ax1.yaxis.labelpad - pad, 0), xycoords=ax1.yaxis.label,
textcoords='offset points', size='24', ha='right', va='center')
plot_y = np.argmax(samples['y'], axis=1)
plot_y[:length + 1] = -1
ax2.plot(plot_y, 's', color='midnightblue')
ax2.set_xticklabels([])
ax2.set_yticks(np.arange(0, feature_width, 1))
ax2.set_ylim(-0.5, feature_width - 0.5)
ax2.set_xlim(-0.5, length * 2 + 1.5)
ax2.annotate('target', xy=(0, 0.5), xytext=(-ax2.yaxis.labelpad - pad, 0), xycoords=ax2.yaxis.label,
textcoords='offset points', size='24', ha='right', va='center')
fig.subplots_adjust(wspace=0, hspace=0)
fig.tight_layout()
fig.subplots_adjust(left=0.13)
plt.show()
| [
"collections.OrderedDict",
"numpy.ones",
"numpy.arange",
"numpy.argmax",
"numpy.max",
"numpy.stack",
"numpy.zeros",
"numpy.ndarray",
"numpy.concatenate",
"scipy.sparse.csr_matrix",
"numpy.transpose",
"matplotlib.pyplot.subplots",
"numpy.random.RandomState",
"matplotlib.pyplot.show"
] | [((7213, 7272), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)', 'sharey': '(False)', 'figsize': '(13, 8)'}), '(2, sharex=True, sharey=False, figsize=(13, 8))\n', (7225, 7272), True, 'import matplotlib.pyplot as plt\n'), ((7349, 7380), 'numpy.argmax', 'np.argmax', (["samples['x']"], {'axis': '(1)'}), "(samples['x'], axis=1)\n", (7358, 7380), True, 'import numpy as np\n'), ((7961, 7992), 'numpy.argmax', 'np.argmax', (["samples['y']"], {'axis': '(1)'}), "(samples['y'], axis=1)\n", (7970, 7992), True, 'import numpy as np\n'), ((8527, 8537), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8535, 8537), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1046), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': "config['seed']"}), "(seed=config['seed'])\n", (1025, 1046), True, 'import numpy as np\n'), ((2850, 2863), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2861, 2863), False, 'from collections import OrderedDict\n'), ((3251, 3285), 'numpy.ndarray', 'np.ndarray', (['length'], {'dtype': 'np.int32'}), '(length, dtype=np.int32)\n', (3261, 3285), True, 'import numpy as np\n'), ((3538, 3555), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (3547, 3555), True, 'import numpy as np\n'), ((3571, 3586), 'numpy.ones', 'np.ones', (['length'], {}), '(length)\n', (3578, 3586), True, 'import numpy as np\n'), ((3775, 3789), 'numpy.arange', 'np.arange', (['len'], {}), '(len)\n', (3784, 3789), True, 'import numpy as np\n'), ((3804, 3817), 'numpy.zeros', 'np.zeros', (['len'], {}), '(len)\n', (3812, 3817), True, 'import numpy as np\n'), ((3833, 3846), 'numpy.zeros', 'np.zeros', (['len'], {}), '(len)\n', (3841, 3846), True, 'import numpy as np\n'), ((4827, 4838), 'numpy.max', 'np.max', (['len'], {}), '(len)\n', (4833, 4838), True, 'import numpy as np\n'), ((5640, 5680), 'numpy.transpose', 'np.transpose', (["batch['x']"], {'axes': '(1, 0, 2)'}), "(batch['x'], axes=(1, 0, 2))\n", (5652, 5680), True, 'import numpy as np\n'), ((5702, 5742), 'numpy.transpose', 'np.transpose', (["batch['y']"], {'axes': '(1, 0, 2)'}), "(batch['y'], axes=(1, 0, 2))\n", (5714, 5742), True, 'import numpy as np\n'), ((5764, 5801), 'numpy.transpose', 'np.transpose', (["batch['m']"], {'axes': '(1, 0)'}), "(batch['m'], axes=(1, 0))\n", (5776, 5801), True, 'import numpy as np\n'), ((7477, 7498), 'numpy.ones', 'np.ones', (['plot_x.shape'], {}), '(plot_x.shape)\n', (7484, 7498), True, 'import numpy as np\n'), ((7642, 7676), 'numpy.arange', 'np.arange', (['(0)', '(feature_width + 1)', '(1)'], {}), '(0, feature_width + 1, 1)\n', (7651, 7676), True, 'import numpy as np\n'), ((8117, 8147), 'numpy.arange', 'np.arange', (['(0)', 'feature_width', '(1)'], {}), '(0, feature_width, 1)\n', (8126, 8147), True, 'import numpy as np\n'), ((5589, 5617), 'numpy.stack', 'np.stack', (['batch[key]'], {'axis': '(0)'}), '(batch[key], axis=0)\n', (5597, 5617), True, 'import numpy as np\n'), ((3604, 3660), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, numbers))'], {'shape': '(length, size)'}), '((data, (row, numbers)), shape=(length, size))\n', (3614, 3660), False, 'from scipy.sparse import csr_matrix\n'), ((3865, 3915), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, col))'], {'shape': '(len, width)'}), '((data, (row, col)), shape=(len, width))\n', (3875, 3915), False, 'from scipy.sparse import csr_matrix\n'), ((5314, 5333), 'numpy.zeros', 'np.zeros', (['[add_len]'], {}), '([add_len])\n', (5322, 5333), True, 'import numpy as np\n'), ((6390, 6418), 'numpy.argmax', 'np.argmax', (["sample['y'][i, :]"], {}), "(sample['y'][i, :])\n", (6399, 6418), True, 'import numpy as np\n'), ((6490, 6517), 'numpy.argmax', 'np.argmax', (['prediction[i, :]'], {}), '(prediction[i, :])\n', (6499, 6517), True, 'import numpy as np\n'), ((5099, 5143), 'numpy.concatenate', 'np.concatenate', (["[sample['x'], x_add]"], {'axis': '(0)'}), "([sample['x'], x_add], axis=0)\n", (5113, 5143), True, 'import numpy as np\n'), ((5244, 5288), 'numpy.concatenate', 'np.concatenate', (["[sample['y'], y_add]"], {'axis': '(0)'}), "([sample['y'], y_add], axis=0)\n", (5258, 5288), True, 'import numpy as np\n'), ((5368, 5412), 'numpy.concatenate', 'np.concatenate', (["[sample['m'], m_add]"], {'axis': '(0)'}), "([sample['m'], m_add], axis=0)\n", (5382, 5412), True, 'import numpy as np\n'), ((6098, 6129), 'numpy.argmax', 'np.argmax', (["sample['y'][i, b, :]"], {}), "(sample['y'][i, b, :])\n", (6107, 6129), True, 'import numpy as np\n'), ((6216, 6246), 'numpy.argmax', 'np.argmax', (['prediction[i, b, :]'], {}), '(prediction[i, b, :])\n', (6225, 6246), True, 'import numpy as np\n')] |
import torch
from torchvision import transforms
import os
import cv2
import time
import numpy as np
from .pse import decode as pse_decode
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
class SingletonType(type):
def __init__(cls, *args, **kwargs):
super(SingletonType, cls).__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
obj = cls.__new__(cls, *args, **kwargs)
cls.__init__(obj, *args, **kwargs)
return obj
class PSENetHandel(metaclass=SingletonType):
def __init__(self, model_path, net, scale, gpu_id=None):
"""
初始化pytorch模型
:param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
:param net: 网络计算图,如果在model_path中指定的是参数的保存路径,则需要给出网络的计算图
:param img_channel: 图像的通道数: 1,3
:param gpu_id: 在哪一块gpu上运行
"""
self.scale = scale
if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:{}".format(gpu_id))
else:
self.device = torch.device("cpu")
self.net = torch.load(model_path, map_location=self.device)['state_dict']
print('device:', self.device)
# for k in net.state_dict():
# print(k)
if net is not None:
# 如果网络计算图和参数是分开保存的,就执行参数加载
net = net.to(self.device)
net.scale = scale
try:
sk = {}
for k in self.net:
sk[k[7:]] = self.net[k]
net.load_state_dict(sk)
except:
net.load_state_dict(self.net)
self.net = net
print('load model')
self.net.eval()
#
def predict(self, img: np.ndarray, long_size: int = 640):
"""
对传入的图像进行预测,支持图像ndarray
:param long_size:
:param img:
:return:
"""
# assert os.path.exists(img), 'file is not exists'
# img = cv2.imread(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w = img.shape[:2]
if h > w:
scale_h = long_size / h
tar_w = w * scale_h
tar_w = tar_w - tar_w % 32
tar_w = max(32, tar_w)
scale_w = tar_w / w
else:
scale_w = long_size / w
tar_h = h * scale_w
tar_h = tar_h - tar_h % 32
tar_h = max(32, tar_h)
scale_h = tar_h / h
# scale = long_size / max(h, w)
img = cv2.resize(img, None, fx=scale_w, fy=scale_h)
# 将图片由(w,h)变为(1,img_channel,h,w)
img = img.astype(np.float32)
# img /= 255.0
# tensor = transforms.ToTensor()(img)
# tensor = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(tensor)
#
img = img.astype(np.float32)
img /= 255.0
img -= np.array((0.485, 0.456, 0.406))
img /= np.array((0.229, 0.224, 0.225))
tensor = transforms.ToTensor()(img)
tensor = tensor.unsqueeze_(0)
tensor = tensor.to(self.device)
with torch.no_grad():
# torch.cuda.synchronize()
start = time.time()
preds = self.net(tensor)
preds, boxes_list, rects = pse_decode(preds[0], self.scale)
scale = (preds.shape[1] / w, preds.shape[0] / h)
# print(scale)
# preds, boxes_list = decode(preds,num_pred=-1)
rects_re = [] # degree, w, h, cx, cy
if len(boxes_list):
boxes_list = boxes_list / scale
for rect in rects:
temp_rec = []
temp_rec.append(rect[-1])
temp_rec.append(rect[1][1] / scale[0])
temp_rec.append(rect[1][0] / scale[1])
temp_rec.append(rect[0][0] / scale[0])
temp_rec.append(rect[0][1] / scale[1])
rects_re.append(temp_rec)
# torch.cuda.synchronize()
t = time.time() - start
return preds, boxes_list, rects_re, t
| [
"torch.load",
"numpy.array",
"torch.cuda.is_available",
"cv2.cvtColor",
"time.time",
"torch.no_grad",
"cv2.resize",
"torchvision.transforms.ToTensor",
"torch.device"
] | [((2214, 2250), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2226, 2250), False, 'import cv2\n'), ((2750, 2795), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': 'scale_w', 'fy': 'scale_h'}), '(img, None, fx=scale_w, fy=scale_h)\n', (2760, 2795), False, 'import cv2\n'), ((3138, 3169), 'numpy.array', 'np.array', (['(0.485, 0.456, 0.406)'], {}), '((0.485, 0.456, 0.406))\n', (3146, 3169), True, 'import numpy as np\n'), ((3185, 3216), 'numpy.array', 'np.array', (['(0.229, 0.224, 0.225)'], {}), '((0.229, 0.224, 0.225))\n', (3193, 3216), True, 'import numpy as np\n'), ((1081, 1106), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1104, 1106), False, 'import torch\n'), ((1213, 1232), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1225, 1232), False, 'import torch\n'), ((1252, 1300), 'torch.load', 'torch.load', (['model_path'], {'map_location': 'self.device'}), '(model_path, map_location=self.device)\n', (1262, 1300), False, 'import torch\n'), ((3234, 3255), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3253, 3255), False, 'from torchvision import transforms\n'), ((3352, 3367), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3365, 3367), False, 'import torch\n'), ((3428, 3439), 'time.time', 'time.time', ([], {}), '()\n', (3437, 3439), False, 'import time\n'), ((4305, 4316), 'time.time', 'time.time', ([], {}), '()\n', (4314, 4316), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python libraries
import pandas as pd
import numpy as np
import random
import copy
import logging
import ipdb
from ETL2.DBIndicadores import DBIndicadores
# Local imports
from rdigraphs.supergraph.snode import DataGraph
class DataGraph_sql(DataGraph):
"""
Generic class defining a graph of data
"""
def __init__(self, db_info, fields, max_num_nodes=None, REF='Id',
topics_field=None, out_path=None, label="dg"):
"""
Stores the main attributes of a datagraph object and loads the graph
data as a list of node attributes from a database
"""
super(DataGraph, self).__init__(
fields, max_num_nodes, REF, topics_field, out_path, label)
# ##################################
# Variables for the SQL data sources
# Parameters for the db containing the data
self.db_info = db_info
if db_info is not None:
if 'filterOptions' in db_info:
self.db_info['filterOptions'] = db_info['filterOptions']
else:
self.db_info['filterOptions'] = None
if 'orderOptions' in db_info:
self.db_info['orderOptions'] = db_info['orderOptions']
else:
self.db_info['orderOptions'] = None
# Selected fields in the db that will be node attributes.
self.fields = fields
self.topics_field = topics_field
self.base_fields = None # Fields from the main DB table
self.sub_fields = None # Fields to read fromo other tables
self.db = None # Database manager
# Load data from the database
if db_info is not None:
self.importData()
return
def importData(self):
"""
Reads the pandas dataframe for the datagraph from a database.
"""
# #############
# Read database
# Open database
self.db = DBIndicadores(self.db_info['server'], self.db_info['user'],
self.db_info['password'], self.db_info['name'])
# Separate fields from the main table and fields to read from other
# tables in the db.
self.base_fields = [x for x in self.fields if type(x) is not tuple]
# Each sub-field is a tuple whose first component is the name of the
# table containing the data dn the second component is the list of
# attributes to load.
self.sub_fields = [x for x in self.fields if type(x) is tuple]
# Read nodes and node attributes from database
idREF = self.base_fields.index(self.REF)
self.nodes, self.df_nodes = self.db2df(
self.base_fields, self.db_info, self.max_num_nodes, idREF,
refCol=self.REF)
self.n_nodes = len(self.nodes)
# It topic features have been taken from the database, compute
# the topic matrix
if (self.topics_field is not None and self.topics_field in
self.base_fields):
self.T = self.getTopics()
self.df_nodes.drop(self.topics_field, axis=1, inplace=True)
logging.info("-- -- -- Data loaded with {0} topics".format(
self.T.shape[1]))
# Read some node attributes from auxiliary tables.
db_info = copy.deepcopy(self.db_info)
db_info['filterOptions'] = None
db_info['orderOptions'] = None
for f in self.sub_fields:
db_info['tablename'] = f[0]
if len(f) == 3:
# Note that we are assuming here that the first field in f[1],
# is used as the reference field (i.e. idREF=0)
left_on = f[1]
right_on = f[2][0]
f_nodes, df_fnodes = self.db2df(f[2], db_info, refCol=f[2][0])
else: # if len(f) == 2
# Note that we are assuming here that the first field in f[1],
# is used as the reference field (i.e. idREF=0)
left_on = f[1][0]
right_on = f[1][0]
f_nodes, df_fnodes = self.db2df(f[1], db_info, refCol=f[1][0])
# self.df_nodes = self.df_nodes.merge(df_fnodes, how='left',
# on=f[1][0])
self.df_nodes = self.df_nodes.merge(
df_fnodes, how='left', left_on=left_on, right_on=right_on)
def db2df(self, fields, db_info, max_num_nodes=None, idREF=0, refCol='Id'):
"""
Constructs a data frame from the field contained in the database
specified in db_info.
Args:
:fields: List of fields to read from the database
:db_info: Dictionary specifying the database access parameters
:max_num_nodes: Number of entries to read from the database. If
this number is smaller than the db size, a subset
is taken by random sampling.
:idREF: Index value corresponding to the field in 'fields'
that will be used as reference value in the output
dataframe.
:refCol: Name to give to the reference column (the original
field name in the db can be used. This option is
provided to manage some particular needs)
Returns:
:nodes: List of components of the reference column in the
output dataframe.
:df_nodes: Output dataframe
The number of entries taken fron the db is at most max_num_projects
"""
# #############
# Read database
# Load raw data from the database.
selectOptions = ", ".join(('`' + x + '`' for x in fields))
rawData = self.db.queryData(
db_info['tablename'], selectOptions=selectOptions,
filterOptions=db_info['filterOptions'],
orderOptions=db_info['orderOptions'])
# Since we need random sampling, we download the whole dataset, and
# subsample:
if (max_num_nodes is not None and max_num_nodes < len(rawData)):
random.seed(1)
rawData = random.sample(rawData, max_num_nodes)
if len(rawData) == 0:
logging.warning("There are no elements in the database")
# #########################
# Create dataframe of nodes
# Extract nodes
# idREF = self.base_fields.index(refCol)
nodes = map(lambda x: x[idREF], rawData)
# Create pandas structures for nodes.
# Each node is indexed by field REFERENCIA.
df_nodes = pd.DataFrame(nodes, columns=[refCol])
for n, t in enumerate(fields):
if t != fields[idREF]: # and t != self.topics_field:
df_nodes[t] = map(lambda x: x[n], rawData)
# The following is necessary to save the graph data in a csv
# file to be read by gephi (not sure why)
if type(rawData[0][n]) == unicode:
df_nodes[t] = df_nodes[t].str.encode('utf8')
return nodes, df_nodes
def getTopics(self):
"""
Extracts numerical arrays of data from a list of strings.
Input:
rawData :A list of lists of strings.
idx: The element of each rawData element to get.
Returns:
NumData: A list of lists of numbers.
"""
# Get the position of the topic string in rawdata
topics_str = self.df_nodes[self.topics_field].tolist()
# Check if all entries have a topic vector
is_data = map(lambda d: d == '' or d is None, topics_str)
if np.count_nonzero(is_data) > 0:
ipdb.set_trace()
exit("There are nodes without topic vectors")
# Get the topic vectors
StrData = map(lambda d: d.split(','), topics_str)
topics = np.array([[float(c) for c in d] for d in StrData])
return topics
def saveModel(self, Id, tipo, nc):
"""
Save some parameters and results related to the clustering
algorithm.
This method is a specific feature for the graphs constructed for the
FECYT project.
"""
table = 'modelos'
keyname = 'id'
# TIPO: ClusterGeneral, ClusterTIC, ClusterBIO, ClusterENE
valuename = 'TIPO'
values = [(tipo, str(Id))]
self.db.setGenericField(table, keyname, valuename, values)
# NTPCGRP: Number of clusters
valuename = 'NTPCGRP'
values = [(nc, str(Id))]
self.db.setGenericField(table, keyname, valuename, values)
# NAMES
valuename = 'NAMES'
# Provisionally, we use trivial names for the clusters.
# Names must be a string in the form
# 'name1//name2//name3//name4...'
names = '//'.join(['Cluster' + str(i) for i in range(nc)])
values = [(names, str(Id))]
self.db.setGenericField(table, keyname, valuename, values)
# DESCRIPCION and CONTENIDO: not used
def exportClusters(self, field, label):
REF = 'Id'
data = [tuple(x) for x in self.df_nodes[[label, REF]].values]
logging.info("-- -- Exporting cluster indices to field " + field)
self.db.setFields(field, data)
def saveGraph(self, extralabel="", newREF=True, mode='gnodes'):
# Change the name of the self.REF column to 'Id'
# This is a requirement to visualize the graph using Gephi.
if newREF is True:
self.df_nodes.rename(columns={self.REF: 'Id'}, inplace=True)
if mode == 'gnodes':
# Select only the nodes in the subgraph.
df2_nodes = self.df_nodes.ix[self.i_to_n]
else:
df2_nodes = self.df_nodes
# Save nodes
fpath = self.out_path + self.label + extralabel + '_nodes.csv'
df2_nodes.to_csv(fpath, index=False, columns=self.df_nodes.columns,
sep=',', encoding='utf-8')
# Save edges
if hasattr(self, 'df_edges'):
fpath = self.out_path + self.label + extralabel + '_edges.csv'
self.df_edges.to_csv(fpath, index=False,
columns=self.df_edges.columns,
sep=',', encoding='utf-8')
| [
"random.sample",
"pandas.DataFrame",
"ipdb.set_trace",
"logging.warning",
"random.seed",
"numpy.count_nonzero",
"copy.deepcopy",
"ETL2.DBIndicadores.DBIndicadores",
"logging.info"
] | [((2004, 2116), 'ETL2.DBIndicadores.DBIndicadores', 'DBIndicadores', (["self.db_info['server']", "self.db_info['user']", "self.db_info['password']", "self.db_info['name']"], {}), "(self.db_info['server'], self.db_info['user'], self.db_info[\n 'password'], self.db_info['name'])\n", (2017, 2116), False, 'from ETL2.DBIndicadores import DBIndicadores\n'), ((3366, 3393), 'copy.deepcopy', 'copy.deepcopy', (['self.db_info'], {}), '(self.db_info)\n', (3379, 3393), False, 'import copy\n'), ((6693, 6730), 'pandas.DataFrame', 'pd.DataFrame', (['nodes'], {'columns': '[refCol]'}), '(nodes, columns=[refCol])\n', (6705, 6730), True, 'import pandas as pd\n'), ((9264, 9329), 'logging.info', 'logging.info', (["('-- -- Exporting cluster indices to field ' + field)"], {}), "('-- -- Exporting cluster indices to field ' + field)\n", (9276, 9329), False, 'import logging\n'), ((6205, 6219), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (6216, 6219), False, 'import random\n'), ((6242, 6279), 'random.sample', 'random.sample', (['rawData', 'max_num_nodes'], {}), '(rawData, max_num_nodes)\n', (6255, 6279), False, 'import random\n'), ((6322, 6378), 'logging.warning', 'logging.warning', (['"""There are no elements in the database"""'], {}), "('There are no elements in the database')\n", (6337, 6378), False, 'import logging\n'), ((7741, 7766), 'numpy.count_nonzero', 'np.count_nonzero', (['is_data'], {}), '(is_data)\n', (7757, 7766), True, 'import numpy as np\n'), ((7784, 7800), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7798, 7800), False, 'import ipdb\n')] |
import cv2
import numpy as np
import os
########## KNN CODE ############
def distance(v1, v2):
# Eucledian
return np.sqrt(((v1-v2)**2).sum())
def knn(train, test, k=5):
dist = []
for i in range(train.shape[0]):
# Get the vector and label
ix = train[i, :-1]
iy = train[i, -1]
# Compute the distance from test point
d = distance(test, ix)
dist.append([d, iy])
# Sort based on distance and get top k
dk = sorted(dist, key=lambda x: x[0])[:k]
# Retrieve only the labels
labels = np.array(dk)[:, -1]
# Get frequencies of each label
output = np.unique(labels, return_counts=True)
# Find max frequency and corresponding label
index = np.argmax(output[1])
return output[0][index]
################################
#Init Camera
cap = cv2.VideoCapture(0)
# Face Detection
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
skip = 0
dataset_path = './data/'
face_data = []
labels = []
class_id = 0 # Labels for the given file
names = {} #Mapping btw id - name
# Data Preparation
for fx in os.listdir(dataset_path):
if fx.endswith('.npy'):
#Create a mapping btw class_id and name
names[class_id] = fx[:-4]
print("Loaded "+fx)
data_item = np.load(dataset_path+fx)
face_data.append(data_item)
#Create Labels for the class
target = class_id*np.ones((data_item.shape[0],))
class_id += 1
labels.append(target)
face_dataset = np.concatenate(face_data,axis=0)
face_labels = np.concatenate(labels,axis=0).reshape((-1,1))
print(face_dataset.shape)
print(face_labels.shape)
trainset = np.concatenate((face_dataset,face_labels),axis=1)
print(trainset.shape)
# Testing
while True:
ret,frame = cap.read()
if ret == False:
continue
faces = face_cascade.detectMultiScale(frame,1.3,5)
if(len(faces)==0):
continue
for face in faces:
x,y,w,h = face
#Get the face ROI
offset = 10
face_section = frame[y-offset:y+h+offset,x-offset:x+w+offset]
face_section = cv2.resize(face_section,(150,150))
#Predicted Label (out)
out = knn(trainset,face_section.flatten())
#Display on the screen the name and rectangle around it
pred_name = names[int(out)]
cv2.putText(frame,pred_name,(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
cv2.imshow("Faces",frame)
key = cv2.waitKey(1) & 0xFF
if key==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"os.listdir",
"numpy.unique",
"numpy.ones",
"numpy.argmax",
"cv2.imshow",
"cv2.putText",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"numpy.concatenate",
"cv2.CascadeClassifier",
"cv2.resize",
"numpy.load",
"cv2.waitKey"
] | [((762, 781), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (778, 781), False, 'import cv2\n'), ((815, 871), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt.xml"""'], {}), "('haarcascade_frontalface_alt.xml')\n", (836, 871), False, 'import cv2\n'), ((1043, 1067), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (1053, 1067), False, 'import os\n'), ((1394, 1427), 'numpy.concatenate', 'np.concatenate', (['face_data'], {'axis': '(0)'}), '(face_data, axis=0)\n', (1408, 1427), True, 'import numpy as np\n'), ((1551, 1602), 'numpy.concatenate', 'np.concatenate', (['(face_dataset, face_labels)'], {'axis': '(1)'}), '((face_dataset, face_labels), axis=1)\n', (1565, 1602), True, 'import numpy as np\n'), ((2378, 2401), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2399, 2401), False, 'import cv2\n'), ((569, 606), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (578, 606), True, 'import numpy as np\n'), ((662, 682), 'numpy.argmax', 'np.argmax', (['output[1]'], {}), '(output[1])\n', (671, 682), True, 'import numpy as np\n'), ((2280, 2306), 'cv2.imshow', 'cv2.imshow', (['"""Faces"""', 'frame'], {}), "('Faces', frame)\n", (2290, 2306), False, 'import cv2\n'), ((504, 516), 'numpy.array', 'np.array', (['dk'], {}), '(dk)\n', (512, 516), True, 'import numpy as np\n'), ((1200, 1226), 'numpy.load', 'np.load', (['(dataset_path + fx)'], {}), '(dataset_path + fx)\n', (1207, 1226), True, 'import numpy as np\n'), ((1441, 1471), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (1455, 1471), True, 'import numpy as np\n'), ((1939, 1975), 'cv2.resize', 'cv2.resize', (['face_section', '(150, 150)'], {}), '(face_section, (150, 150))\n', (1949, 1975), False, 'import cv2\n'), ((2136, 2241), 'cv2.putText', 'cv2.putText', (['frame', 'pred_name', '(x, y - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(frame, pred_name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 0, 0), 2, cv2.LINE_AA)\n', (2147, 2241), False, 'import cv2\n'), ((2227, 2289), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)\n', (2240, 2289), False, 'import cv2\n'), ((2314, 2328), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2325, 2328), False, 'import cv2\n'), ((1307, 1337), 'numpy.ones', 'np.ones', (['(data_item.shape[0],)'], {}), '((data_item.shape[0],))\n', (1314, 1337), True, 'import numpy as np\n')] |
"""IMDB Dataset module for sentiment analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from data.util import OOV_CHAR
from data.util import START_CHAR
from data.util import pad_sentence
NUM_CLASS = 2
def load(vocabulary_size, sentence_length):
"""Returns training and evaluation input for imdb dataset.
Args:
vocabulary_size: The number of the most frequent tokens
to be used from the corpus.
sentence_length: The number of words in each sentence.
Longer sentences get cut, shorter ones padded.
Raises:
ValueError: if the dataset value is not valid.
Returns:
A tuple of length 4, for training and evaluation data,
each being an numpy array.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data(
path="imdb.npz",
num_words=vocabulary_size,
skip_top=0,
maxlen=None,
seed=113,
start_char=START_CHAR,
oov_char=OOV_CHAR,
index_from=OOV_CHAR+1)
x_train_processed = []
for sen in x_train:
sen = pad_sentence(sen, sentence_length)
x_train_processed.append(np.array(sen))
x_train_processed = np.array(x_train_processed)
x_test_processed = []
for sen in x_test:
sen = pad_sentence(sen, sentence_length)
x_test_processed.append(np.array(sen))
x_test_processed = np.array(x_test_processed)
return x_train_processed, np.eye(NUM_CLASS)[y_train], \
x_test_processed, np.eye(NUM_CLASS)[y_test]
| [
"numpy.array",
"numpy.eye",
"tensorflow.keras.datasets.imdb.load_data",
"data.util.pad_sentence"
] | [((849, 1036), 'tensorflow.keras.datasets.imdb.load_data', 'tf.keras.datasets.imdb.load_data', ([], {'path': '"""imdb.npz"""', 'num_words': 'vocabulary_size', 'skip_top': '(0)', 'maxlen': 'None', 'seed': '(113)', 'start_char': 'START_CHAR', 'oov_char': 'OOV_CHAR', 'index_from': '(OOV_CHAR + 1)'}), "(path='imdb.npz', num_words=vocabulary_size,\n skip_top=0, maxlen=None, seed=113, start_char=START_CHAR, oov_char=\n OOV_CHAR, index_from=OOV_CHAR + 1)\n", (881, 1036), True, 'import tensorflow as tf\n'), ((1234, 1261), 'numpy.array', 'np.array', (['x_train_processed'], {}), '(x_train_processed)\n', (1242, 1261), True, 'import numpy as np\n'), ((1417, 1443), 'numpy.array', 'np.array', (['x_test_processed'], {}), '(x_test_processed)\n', (1425, 1443), True, 'import numpy as np\n'), ((1133, 1167), 'data.util.pad_sentence', 'pad_sentence', (['sen', 'sentence_length'], {}), '(sen, sentence_length)\n', (1145, 1167), False, 'from data.util import pad_sentence\n'), ((1318, 1352), 'data.util.pad_sentence', 'pad_sentence', (['sen', 'sentence_length'], {}), '(sen, sentence_length)\n', (1330, 1352), False, 'from data.util import pad_sentence\n'), ((1197, 1210), 'numpy.array', 'np.array', (['sen'], {}), '(sen)\n', (1205, 1210), True, 'import numpy as np\n'), ((1381, 1394), 'numpy.array', 'np.array', (['sen'], {}), '(sen)\n', (1389, 1394), True, 'import numpy as np\n'), ((1473, 1490), 'numpy.eye', 'np.eye', (['NUM_CLASS'], {}), '(NUM_CLASS)\n', (1479, 1490), True, 'import numpy as np\n'), ((1530, 1547), 'numpy.eye', 'np.eye', (['NUM_CLASS'], {}), '(NUM_CLASS)\n', (1536, 1547), True, 'import numpy as np\n')] |
import os
import re
import textwrap
from pathlib import Path
import moderngl
import numpy as np
from .. import config
from ..utils import opengl
from ..utils.simple_functions import get_parameters
SHADER_FOLDER = Path(__file__).parent / "shaders"
shader_program_cache: dict = {}
file_path_to_code_map: dict = {}
__all__ = [
"Object3D",
"Mesh",
"Shader",
"FullScreenQuad",
]
def get_shader_code_from_file(file_path):
if file_path in file_path_to_code_map:
return file_path_to_code_map[file_path]
with open(file_path) as f:
source = f.read()
include_lines = re.finditer(
r"^#include (?P<include_path>.*\.glsl)$",
source,
flags=re.MULTILINE,
)
for match in include_lines:
include_path = match.group("include_path")
included_code = get_shader_code_from_file(
os.path.join(file_path.parent / include_path),
)
source = source.replace(match.group(0), included_code)
file_path_to_code_map[file_path] = source
return source
def filter_attributes(unfiltered_attributes, attributes):
# Construct attributes for only those needed by the shader.
filtered_attributes_dtype = []
for i, dtype_name in enumerate(unfiltered_attributes.dtype.names):
if dtype_name in attributes:
filtered_attributes_dtype.append(
(
dtype_name,
unfiltered_attributes.dtype[i].subdtype[0].str,
unfiltered_attributes.dtype[i].shape,
),
)
filtered_attributes = np.zeros(
unfiltered_attributes[unfiltered_attributes.dtype.names[0]].shape[0],
dtype=filtered_attributes_dtype,
)
for dtype_name in unfiltered_attributes.dtype.names:
if dtype_name in attributes:
filtered_attributes[dtype_name] = unfiltered_attributes[dtype_name]
return filtered_attributes
class Object3D:
def __init__(self, *children):
self.model_matrix = np.eye(4)
self.normal_matrix = np.eye(4)
self.children = []
self.parent = None
self.add(*children)
self.init_updaters()
# TODO: Use path_func.
def interpolate(self, start, end, alpha, _):
self.model_matrix = (1 - alpha) * start.model_matrix + alpha * end.model_matrix
self.normal_matrix = (
1 - alpha
) * start.normal_matrix + alpha * end.normal_matrix
def single_copy(self):
copy = Object3D()
copy.model_matrix = self.model_matrix.copy()
copy.normal_matrix = self.normal_matrix.copy()
return copy
def copy(self):
node_to_copy = {}
bfs = [self]
while bfs:
node = bfs.pop(0)
bfs.extend(node.children)
node_copy = node.single_copy()
node_to_copy[node] = node_copy
# Add the copy to the copy of the parent.
if node.parent is not None and node is not self:
node_to_copy[node.parent].add(node_copy)
return node_to_copy[self]
def add(self, *children):
for child in children:
if child.parent is not None:
raise Exception(
"Attempt to add child that's already added to another Object3D",
)
self.remove(*children, current_children_only=False)
self.children.extend(children)
for child in children:
child.parent = self
def remove(self, *children, current_children_only=True):
if current_children_only:
for child in children:
if child.parent != self:
raise Exception(
"Attempt to remove child that isn't added to this Object3D",
)
self.children = list(filter(lambda child: child not in children, self.children))
for child in children:
child.parent = None
def get_position(self):
return self.model_matrix[:, 3][:3]
def set_position(self, position):
self.model_matrix[:, 3][:3] = position
return self
def get_meshes(self):
dfs = [self]
while dfs:
parent = dfs.pop()
if isinstance(parent, Mesh):
yield parent
dfs.extend(parent.children)
def get_family(self):
dfs = [self]
while dfs:
parent = dfs.pop()
yield parent
dfs.extend(parent.children)
def align_data_and_family(self, _):
pass
def hierarchical_model_matrix(self):
if self.parent is None:
return self.model_matrix
model_matrices = [self.model_matrix]
current_object = self
while current_object.parent is not None:
model_matrices.append(current_object.parent.model_matrix)
current_object = current_object.parent
return np.linalg.multi_dot(list(reversed(model_matrices)))
def hierarchical_normal_matrix(self):
if self.parent is None:
return self.normal_matrix[:3, :3]
normal_matrices = [self.normal_matrix]
current_object = self
while current_object.parent is not None:
normal_matrices.append(current_object.parent.model_matrix)
current_object = current_object.parent
return np.linalg.multi_dot(list(reversed(normal_matrices)))[:3, :3]
def init_updaters(self):
self.time_based_updaters = []
self.non_time_updaters = []
self.has_updaters = False
self.updating_suspended = False
def update(self, dt=0):
if not self.has_updaters or self.updating_suspended:
return self
for updater in self.time_based_updaters:
updater(self, dt)
for updater in self.non_time_updaters:
updater(self)
return self
def get_time_based_updaters(self):
return self.time_based_updaters
def has_time_based_updater(self):
return len(self.time_based_updaters) > 0
def get_updaters(self):
return self.time_based_updaters + self.non_time_updaters
def add_updater(self, update_function, index=None, call_updater=True):
if "dt" in get_parameters(update_function):
updater_list = self.time_based_updaters
else:
updater_list = self.non_time_updaters
if index is None:
updater_list.append(update_function)
else:
updater_list.insert(index, update_function)
self.refresh_has_updater_status()
if call_updater:
self.update()
return self
def remove_updater(self, update_function):
for updater_list in [self.time_based_updaters, self.non_time_updaters]:
while update_function in updater_list:
updater_list.remove(update_function)
self.refresh_has_updater_status()
return self
def clear_updaters(self):
self.time_based_updaters = []
self.non_time_updaters = []
self.refresh_has_updater_status()
return self
def match_updaters(self, mobject):
self.clear_updaters()
for updater in mobject.get_updaters():
self.add_updater(updater)
return self
def suspend_updating(self):
self.updating_suspended = True
return self
def resume_updating(self, call_updater=True):
self.updating_suspended = False
if call_updater:
self.update(dt=0)
return self
def refresh_has_updater_status(self):
self.has_updaters = len(self.get_updaters()) > 0
return self
class Mesh(Object3D):
def __init__(
self,
shader=None,
attributes=None,
geometry=None,
material=None,
indices=None,
use_depth_test=True,
primitive=moderngl.TRIANGLES,
):
super().__init__()
if shader is not None and attributes is not None:
self.shader = shader
self.attributes = attributes
self.indices = indices
elif geometry is not None and material is not None:
self.shader = material
self.attributes = geometry.attributes
self.indices = geometry.index
else:
raise Exception(
"Mesh requires either attributes and a Shader or a Geometry and a "
"Material",
)
self.use_depth_test = use_depth_test
self.primitive = primitive
self.skip_render = False
self.init_updaters()
def single_copy(self):
copy = Mesh(
attributes=self.attributes.copy(),
shader=self.shader,
indices=self.indices.copy() if self.indices is not None else None,
use_depth_test=self.use_depth_test,
primitive=self.primitive,
)
copy.skip_render = self.skip_render
copy.model_matrix = self.model_matrix.copy()
copy.normal_matrix = self.normal_matrix.copy()
# TODO: Copy updaters?
return copy
def set_uniforms(self, renderer):
self.shader.set_uniform(
"u_model_matrix",
opengl.matrix_to_shader_input(self.model_matrix),
)
self.shader.set_uniform("u_view_matrix", renderer.camera.get_view_matrix())
self.shader.set_uniform(
"u_projection_matrix",
renderer.camera.projection_matrix,
)
def render(self):
if self.skip_render:
return
if self.use_depth_test:
self.shader.context.enable(moderngl.DEPTH_TEST)
else:
self.shader.context.disable(moderngl.DEPTH_TEST)
from moderngl.program_members.attribute import Attribute
shader_attributes = []
for k, v in self.shader.shader_program._members.items():
if isinstance(v, Attribute):
shader_attributes.append(k)
shader_attributes = filter_attributes(self.attributes, shader_attributes)
vertex_buffer_object = self.shader.context.buffer(shader_attributes.tobytes())
if self.indices is None:
index_buffer_object = None
else:
vert_index_data = self.indices.astype("i4").tobytes()
if vert_index_data:
index_buffer_object = self.shader.context.buffer(vert_index_data)
else:
index_buffer_object = None
vertex_array_object = self.shader.context.simple_vertex_array(
self.shader.shader_program,
vertex_buffer_object,
*shader_attributes.dtype.names,
index_buffer=index_buffer_object,
)
vertex_array_object.render(self.primitive)
vertex_buffer_object.release()
vertex_array_object.release()
if index_buffer_object is not None:
index_buffer_object.release()
class Shader:
def __init__(
self,
context,
name=None,
source=None,
):
global shader_program_cache
self.context = context
self.name = name
# See if the program is cached.
if (
self.name in shader_program_cache
and shader_program_cache[self.name].ctx == self.context
):
self.shader_program = shader_program_cache[self.name]
elif source is not None:
# Generate the shader from inline code if it was passed.
self.shader_program = context.program(**source)
else:
# Search for a file containing the shader.
source_dict = {}
source_dict_key = {
"vert": "vertex_shader",
"frag": "fragment_shader",
"geom": "geometry_shader",
}
shader_folder = SHADER_FOLDER / name
for shader_file in shader_folder.iterdir():
shader_file_path = shader_folder / shader_file
shader_source = get_shader_code_from_file(shader_file_path)
source_dict[source_dict_key[shader_file_path.stem]] = shader_source
self.shader_program = context.program(**source_dict)
# Cache the shader.
if name is not None and name not in shader_program_cache:
shader_program_cache[self.name] = self.shader_program
def set_uniform(self, name, value):
try:
self.shader_program[name] = value
except KeyError:
pass
class FullScreenQuad(Mesh):
def __init__(
self,
context,
fragment_shader_source=None,
fragment_shader_name=None,
):
if fragment_shader_source is None and fragment_shader_name is None:
raise Exception("Must either pass shader name or shader source.")
if fragment_shader_name is not None:
# Use the name.
shader_file_path = SHADER_FOLDER / f"{fragment_shader_name}.frag"
fragment_shader_source = get_shader_code_from_file(shader_file_path)
elif fragment_shader_source is not None:
fragment_shader_source = textwrap.dedent(fragment_shader_source.lstrip())
shader = Shader(
context,
source={
"vertex_shader": """
#version 330
in vec4 in_vert;
uniform mat4 u_model_view_matrix;
uniform mat4 u_projection_matrix;
void main() {{
vec4 camera_space_vertex = u_model_view_matrix * in_vert;
vec4 clip_space_vertex = u_projection_matrix * camera_space_vertex;
gl_Position = clip_space_vertex;
}}
""",
"fragment_shader": fragment_shader_source,
},
)
attributes = np.zeros(6, dtype=[("in_vert", np.float32, (4,))])
attributes["in_vert"] = np.array(
[
[-config["frame_x_radius"], -config["frame_y_radius"], 0, 1],
[-config["frame_x_radius"], config["frame_y_radius"], 0, 1],
[config["frame_x_radius"], config["frame_y_radius"], 0, 1],
[-config["frame_x_radius"], -config["frame_y_radius"], 0, 1],
[config["frame_x_radius"], -config["frame_y_radius"], 0, 1],
[config["frame_x_radius"], config["frame_y_radius"], 0, 1],
],
)
shader.set_uniform("u_model_view_matrix", opengl.view_matrix())
shader.set_uniform(
"u_projection_matrix",
opengl.orthographic_projection_matrix(),
)
super().__init__(shader, attributes)
def render(self):
super().render()
| [
"numpy.eye",
"pathlib.Path",
"os.path.join",
"numpy.array",
"numpy.zeros",
"re.finditer"
] | [((1650, 1766), 'numpy.zeros', 'np.zeros', (['unfiltered_attributes[unfiltered_attributes.dtype.names[0]].shape[0]'], {'dtype': 'filtered_attributes_dtype'}), '(unfiltered_attributes[unfiltered_attributes.dtype.names[0]].shape[\n 0], dtype=filtered_attributes_dtype)\n', (1658, 1766), True, 'import numpy as np\n'), ((216, 230), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'from pathlib import Path\n'), ((610, 696), 're.finditer', 're.finditer', (['"""^#include (?P<include_path>.*\\\\.glsl)$"""', 'source'], {'flags': 're.MULTILINE'}), "('^#include (?P<include_path>.*\\\\.glsl)$', source, flags=re.\n MULTILINE)\n", (621, 696), False, 'import re\n'), ((2073, 2082), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2079, 2082), True, 'import numpy as np\n'), ((2112, 2121), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2118, 2121), True, 'import numpy as np\n'), ((13917, 13967), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': "[('in_vert', np.float32, (4,))]"}), "(6, dtype=[('in_vert', np.float32, (4,))])\n", (13925, 13967), True, 'import numpy as np\n'), ((14000, 14401), 'numpy.array', 'np.array', (["[[-config['frame_x_radius'], -config['frame_y_radius'], 0, 1], [-config[\n 'frame_x_radius'], config['frame_y_radius'], 0, 1], [config[\n 'frame_x_radius'], config['frame_y_radius'], 0, 1], [-config[\n 'frame_x_radius'], -config['frame_y_radius'], 0, 1], [config[\n 'frame_x_radius'], -config['frame_y_radius'], 0, 1], [config[\n 'frame_x_radius'], config['frame_y_radius'], 0, 1]]"], {}), "([[-config['frame_x_radius'], -config['frame_y_radius'], 0, 1], [-\n config['frame_x_radius'], config['frame_y_radius'], 0, 1], [config[\n 'frame_x_radius'], config['frame_y_radius'], 0, 1], [-config[\n 'frame_x_radius'], -config['frame_y_radius'], 0, 1], [config[\n 'frame_x_radius'], -config['frame_y_radius'], 0, 1], [config[\n 'frame_x_radius'], config['frame_y_radius'], 0, 1]])\n", (14008, 14401), True, 'import numpy as np\n'), ((901, 946), 'os.path.join', 'os.path.join', (['(file_path.parent / include_path)'], {}), '(file_path.parent / include_path)\n', (913, 946), False, 'import os\n')] |
"""Density plot from a distribution of points in 3D"""
import numpy as np
from vedo import *
n = 3000
p = np.random.normal(7, 0.3, (n,3))
p[:int(n*1/3) ] += [1,0,0] # shift 1/3 of the points along x by 1
p[ int(n*2/3):] += [1.7,0.4,0.2]
pts = Points(p, alpha=0.5)
vol = pts.density().c('Dark2').alpha([0.1,1]) # density() returns a Volume
r = precision(vol.info['radius'], 2) # retrieve automatic radius value
vol.addScalarBar3D(title='Density (counts in r_s ='+r+')', c='k', italic=1)
show([(pts,__doc__), vol], N=2, axes=True).close()
| [
"numpy.random.normal"
] | [((107, 139), 'numpy.random.normal', 'np.random.normal', (['(7)', '(0.3)', '(n, 3)'], {}), '(7, 0.3, (n, 3))\n', (123, 139), True, 'import numpy as np\n')] |
"""
Running the threelink arm with the pygame display. The arm will
move the end-effector to the target, which can be moved by
clicking on the background.
"""
import numpy as np
from abr_control.arms import threejoint as arm
# from abr_control.arms import twojoint as arm
from abr_control.interfaces import PyGame
from abr_control.controllers import OSC, Damping, RestingConfig
# initialize our robot config
robot_config = arm.Config(use_cython=True)
# create our arm simulation
arm_sim = arm.ArmSim(robot_config)
# damp the movements of the arm
damping = Damping(robot_config, kv=10)
# keep the arm near a default configuration
resting_config = RestingConfig(
robot_config, kp=50, kv=np.sqrt(50),
rest_angles=[np.pi/4, np.pi/4, None])
# create an operational space controller
ctrlr = OSC(robot_config, kp=20, use_C=True, null_controllers=[damping])
def on_click(self, mouse_x, mouse_y):
self.target[0] = self.mouse_x
self.target[1] = self.mouse_y
# create our interface
interface = PyGame(robot_config, arm_sim, dt=.001, on_click=on_click)
interface.connect()
# create a target
feedback = interface.get_feedback()
target_xyz = robot_config.Tx('EE', feedback['q'])
interface.set_target(target_xyz)
# set up lists for tracking data
ee_path = []
target_path = []
try:
# run ctrl.generate once to load all functions
zeros = np.zeros(robot_config.N_JOINTS)
ctrlr.generate(q=zeros, dq=zeros, target_pos=np.zeros(3))
robot_config.R('EE', q=zeros)
print('\nSimulation starting...\n')
print('\nClick to move the target.\n')
count = 0
while 1:
# get arm feedback
feedback = interface.get_feedback()
hand_xyz = robot_config.Tx('EE', feedback['q'])
# generate an operational space control signal
u = ctrlr.generate(
q=feedback['q'],
dq=feedback['dq'],
target_pos=target_xyz,
target_vel=np.zeros(3))
new_target = interface.get_mousexy()
if new_target is not None:
target_xyz[0:2] = new_target
interface.set_target(target_xyz)
# apply the control signal, step the sim forward
interface.send_forces(
u, update_display=True if count % 50 == 0 else False)
# track data
ee_path.append(np.copy(hand_xyz))
target_path.append(np.copy(target_xyz))
count += 1
finally:
# stop and reset the simulation
interface.disconnect()
print('Simulation terminated...')
| [
"numpy.copy",
"numpy.sqrt",
"abr_control.controllers.OSC",
"abr_control.interfaces.PyGame",
"numpy.zeros",
"abr_control.arms.threejoint.Config",
"abr_control.controllers.Damping",
"abr_control.arms.threejoint.ArmSim"
] | [((426, 453), 'abr_control.arms.threejoint.Config', 'arm.Config', ([], {'use_cython': '(True)'}), '(use_cython=True)\n', (436, 453), True, 'from abr_control.arms import threejoint as arm\n'), ((492, 516), 'abr_control.arms.threejoint.ArmSim', 'arm.ArmSim', (['robot_config'], {}), '(robot_config)\n', (502, 516), True, 'from abr_control.arms import threejoint as arm\n'), ((560, 588), 'abr_control.controllers.Damping', 'Damping', (['robot_config'], {'kv': '(10)'}), '(robot_config, kv=10)\n', (567, 588), False, 'from abr_control.controllers import OSC, Damping, RestingConfig\n'), ((798, 862), 'abr_control.controllers.OSC', 'OSC', (['robot_config'], {'kp': '(20)', 'use_C': '(True)', 'null_controllers': '[damping]'}), '(robot_config, kp=20, use_C=True, null_controllers=[damping])\n', (801, 862), False, 'from abr_control.controllers import OSC, Damping, RestingConfig\n'), ((1008, 1066), 'abr_control.interfaces.PyGame', 'PyGame', (['robot_config', 'arm_sim'], {'dt': '(0.001)', 'on_click': 'on_click'}), '(robot_config, arm_sim, dt=0.001, on_click=on_click)\n', (1014, 1066), False, 'from abr_control.interfaces import PyGame\n'), ((1358, 1389), 'numpy.zeros', 'np.zeros', (['robot_config.N_JOINTS'], {}), '(robot_config.N_JOINTS)\n', (1366, 1389), True, 'import numpy as np\n'), ((693, 704), 'numpy.sqrt', 'np.sqrt', (['(50)'], {}), '(50)\n', (700, 704), True, 'import numpy as np\n'), ((1439, 1450), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1447, 1450), True, 'import numpy as np\n'), ((2303, 2320), 'numpy.copy', 'np.copy', (['hand_xyz'], {}), '(hand_xyz)\n', (2310, 2320), True, 'import numpy as np\n'), ((2349, 2368), 'numpy.copy', 'np.copy', (['target_xyz'], {}), '(target_xyz)\n', (2356, 2368), True, 'import numpy as np\n'), ((1927, 1938), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1935, 1938), True, 'import numpy as np\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import math
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32]
_TEST_PARAM_VALUES = [
# learning_rate, rho, momentum, epsilon, centered
[0.05, 0.9, 0.0, 1e-3, True],
[0.05, 0.9, 0.0, 1e-3, False],
[0.1, 0.9, 0.0, 1e-3, True],
[0.01, 0.9, 0.0, 1e-5, True],
[0.01, 0.9, 0.9, 1e-5, True],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSpropOptimizerTest(test.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,
epsilon, centered):
rms_t = rms * rho + (1 - rho) * g * g
if centered:
mg_t = mg * rho + (1 - rho) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
if momentum > 0.:
mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))
var_t = var - mom_t
else:
mom_t = mom
var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, rho, momentum, epsilon, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue
if centered:
mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue
denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]
else:
denom_t = rms_t[gindex]
if momentum > 0.:
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +
epsilon)
var_t[gindex] = var[gindex] - mom_t[gindex]
else:
mom_t[gindex] = mom[gindex]
var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
@test_util.run_deprecated_v1
def testDense(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(var1_np, dtype=dtype)
grads0 = constant_op.constant(grads0_np, dtype=dtype)
grads1 = constant_op.constant(grads1_np, dtype=dtype)
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
mg1 = opt.get_slot(var1, "mg")
else:
mg0 = None
mg1 = None
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testDenseWithLearningRateDecay(self):
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = rmsprop.RMSprop(
learning_rate=1.0,
rho=0.0,
momentum=0.0,
epsilon=0.0,
centered=False).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[0., 1.]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariableCentered(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
# loss = lambda: pred * pred # pylint: disable=cell-var-from-loop
sgd_op = rmsprop.RMSprop(
learning_rate=1.0,
rho=0.0,
momentum=0.0,
epsilon=1.0,
centered=True).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testSparse(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([1]))
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
else:
mg0 = None
mg1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, rho, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, rho, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testCallableParams(self):
with context.eager_mode():
for dtype in [dtypes.half, dtypes.float32]:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
rho = lambda: 0.9
momentum = lambda: 0.0
epsilon = lambda: 1.0
opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
def testConstructRMSpropWithLR(self):
opt = rmsprop.RMSprop(lr=1.0)
opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)
opt_3 = rmsprop.RMSprop(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
opt = rmsprop.RMSprop(1., momentum=0., centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and one unique slot variable for v1 and v2.
self.assertEqual(3, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and three unique slot variables for v1 and v2
self.assertEqual(7, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.framework.test_util.use_gpu",
"tensorflow.python.eager.context.eager_mode",
"numpy.sqrt",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.global_variables_initializer",
"itertools.product",
"math.sqrt",
"tensorflow.python.framework.constant_op.constant",
"... | [((19603, 19614), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (19612, 19614), False, 'from tensorflow.python.platform import test\n'), ((1782, 1832), 'itertools.product', 'itertools.product', (['_DATA_TYPES', '_TEST_PARAM_VALUES'], {}), '(_DATA_TYPES, _TEST_PARAM_VALUES)\n', (1799, 1832), False, 'import itertools\n'), ((2589, 2606), 'copy.deepcopy', 'copy.deepcopy', (['mg'], {}), '(mg)\n', (2602, 2606), False, 'import copy\n'), ((2619, 2637), 'copy.deepcopy', 'copy.deepcopy', (['rms'], {}), '(rms)\n', (2632, 2637), False, 'import copy\n'), ((2650, 2668), 'copy.deepcopy', 'copy.deepcopy', (['mom'], {}), '(mom)\n', (2663, 2668), False, 'import copy\n'), ((2681, 2699), 'copy.deepcopy', 'copy.deepcopy', (['var'], {}), '(var)\n', (2694, 2699), False, 'import copy\n'), ((6900, 6920), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (6908, 6920), True, 'import numpy as np\n'), ((6937, 6957), 'numpy.array', 'np.array', (['[0.1, 0.2]'], {}), '([0.1, 0.2])\n', (6945, 6957), True, 'import numpy as np\n'), ((6972, 6992), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (6980, 6992), True, 'import numpy as np\n'), ((7009, 7030), 'numpy.array', 'np.array', (['[0.01, 0.2]'], {}), '([0.01, 0.2])\n', (7017, 7030), True, 'import numpy as np\n'), ((7043, 7090), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['var0_np'], {}), '(var0_np)\n', (7081, 7090), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((7102, 7149), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['var1_np'], {}), '(var1_np)\n', (7140, 7149), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((7163, 7194), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads0_np'], {}), '(grads0_np)\n', (7183, 7194), False, 'from tensorflow.python.framework import constant_op\n'), ((7208, 7239), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads1_np'], {}), '(grads1_np)\n', (7228, 7239), False, 'from tensorflow.python.framework import constant_op\n'), ((7364, 7489), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': 'learning_rate', 'rho': 'rho', 'momentum': 'momentum', 'epsilon': 'epsilon', 'centered': 'centered', 'decay': 'decay'}), '(learning_rate=learning_rate, rho=rho, momentum=momentum,\n epsilon=epsilon, centered=centered, decay=decay)\n', (7379, 7489), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((7987, 8007), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (7995, 8007), True, 'import numpy as np\n'), ((8021, 8041), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8029, 8041), True, 'import numpy as np\n'), ((8056, 8076), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8064, 8076), True, 'import numpy as np\n'), ((8091, 8111), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8099, 8111), True, 'import numpy as np\n'), ((8126, 8146), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8134, 8146), True, 'import numpy as np\n'), ((8161, 8181), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8169, 8181), True, 'import numpy as np\n'), ((17840, 17863), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'lr': '(1.0)'}), '(lr=1.0)\n', (17855, 17863), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((17876, 17918), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': '(0.1)', 'lr': '(1.0)'}), '(learning_rate=0.1, lr=1.0)\n', (17891, 17918), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((17931, 17965), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (17946, 17965), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((7624, 7664), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (7662, 7664), False, 'from tensorflow.python.ops import variables\n'), ((15488, 15508), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (15506, 15508), False, 'from tensorflow.python.eager import context\n'), ((18151, 18191), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (18189, 18191), False, 'from tensorflow.python.ops import variables\n'), ((18403, 18423), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (18421, 18423), False, 'from tensorflow.python.eager import context\n'), ((18436, 18459), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(1.0)'], {}), '(1.0)\n', (18454, 18459), False, 'from tensorflow.python.ops import variables\n'), ((18470, 18493), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(1.0)'], {}), '(1.0)\n', (18488, 18493), False, 'from tensorflow.python.ops import variables\n'), ((18506, 18556), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', (['(1.0)'], {'momentum': '(0.0)', 'centered': '(False)'}), '(1.0, momentum=0.0, centered=False)\n', (18521, 18556), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((18855, 18919), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': '(1.0)', 'momentum': '(0.2)', 'centered': '(False)'}), '(learning_rate=1.0, momentum=0.2, centered=False)\n', (18870, 18919), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((19220, 19283), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': '(1.0)', 'momentum': '(0.2)', 'centered': '(True)'}), '(learning_rate=1.0, momentum=0.2, centered=True)\n', (19235, 19283), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((3594, 3613), 'tensorflow.python.framework.test_util.use_gpu', 'test_util.use_gpu', ([], {}), '()\n', (3611, 3613), False, 'from tensorflow.python.framework import test_util\n'), ((3690, 3738), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n', (3698, 3738), True, 'import numpy as np\n'), ((3759, 3807), 'numpy.array', 'np.array', (['[0.1, 0.2]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.1, 0.2], dtype=dtype.as_numpy_dtype)\n', (3767, 3807), True, 'import numpy as np\n'), ((3826, 3874), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n', (3834, 3874), True, 'import numpy as np\n'), ((3895, 3944), 'numpy.array', 'np.array', (['[0.01, 0.2]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.01, 0.2], dtype=dtype.as_numpy_dtype)\n', (3903, 3944), True, 'import numpy as np\n'), ((3961, 4021), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['var0_np'], {'dtype': 'dtype'}), '(var0_np, dtype=dtype)\n', (3999, 4021), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((4037, 4097), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['var1_np'], {'dtype': 'dtype'}), '(var1_np, dtype=dtype)\n', (4075, 4097), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((4115, 4159), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads0_np'], {'dtype': 'dtype'}), '(grads0_np, dtype=dtype)\n', (4135, 4159), False, 'from tensorflow.python.framework import constant_op\n'), ((4177, 4221), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads1_np'], {'dtype': 'dtype'}), '(grads1_np, dtype=dtype)\n', (4197, 4221), False, 'from tensorflow.python.framework import constant_op\n'), ((4236, 4348), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': 'learning_rate', 'rho': 'rho', 'momentum': 'momentum', 'epsilon': 'epsilon', 'centered': 'centered'}), '(learning_rate=learning_rate, rho=rho, momentum=momentum,\n epsilon=epsilon, centered=centered)\n', (4251, 4348), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((5071, 5119), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (5079, 5119), True, 'import numpy as np\n'), ((5137, 5185), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (5145, 5185), True, 'import numpy as np\n'), ((5204, 5252), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (5212, 5252), True, 'import numpy as np\n'), ((5271, 5319), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (5279, 5319), True, 'import numpy as np\n'), ((5338, 5386), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (5346, 5386), True, 'import numpy as np\n'), ((5405, 5453), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (5413, 5453), True, 'import numpy as np\n'), ((9483, 9548), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['[[1.0, 2.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0]], dtype=dtype)\n', (9521, 9548), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((9561, 9610), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[4.0], [5.0]]'], {'dtype': 'dtype'}), '([[4.0], [5.0]], dtype=dtype)\n', (9581, 9610), False, 'from tensorflow.python.framework import constant_op\n'), ((10640, 10705), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['[[1.0, 2.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0]], dtype=dtype)\n', (10678, 10705), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((10718, 10767), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[4.0], [5.0]]'], {'dtype': 'dtype'}), '([[4.0], [5.0]], dtype=dtype)\n', (10738, 10767), False, 'from tensorflow.python.framework import constant_op\n'), ((11835, 11854), 'tensorflow.python.framework.test_util.use_gpu', 'test_util.use_gpu', ([], {}), '()\n', (11852, 11854), False, 'from tensorflow.python.framework import test_util\n'), ((11931, 11979), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n', (11939, 11979), True, 'import numpy as np\n'), ((12000, 12043), 'numpy.array', 'np.array', (['[0.1]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.1], dtype=dtype.as_numpy_dtype)\n', (12008, 12043), True, 'import numpy as np\n'), ((12062, 12110), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n', (12070, 12110), True, 'import numpy as np\n'), ((12131, 12175), 'numpy.array', 'np.array', (['[0.01]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.01], dtype=dtype.as_numpy_dtype)\n', (12139, 12175), True, 'import numpy as np\n'), ((12192, 12219), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['var0_np'], {}), '(var0_np)\n', (12210, 12219), False, 'from tensorflow.python.ops import variables\n'), ((12235, 12262), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['var1_np'], {}), '(var1_np)\n', (12253, 12262), False, 'from tensorflow.python.ops import variables\n'), ((12291, 12320), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (12299, 12320), True, 'import numpy as np\n'), ((12510, 12539), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (12518, 12539), True, 'import numpy as np\n'), ((12715, 12827), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': 'learning_rate', 'rho': 'rho', 'momentum': 'momentum', 'epsilon': 'epsilon', 'centered': 'centered'}), '(learning_rate=learning_rate, rho=rho, momentum=momentum,\n epsilon=epsilon, centered=centered)\n', (12730, 12827), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((13655, 13703), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (13663, 13703), True, 'import numpy as np\n'), ((13721, 13769), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (13729, 13769), True, 'import numpy as np\n'), ((13788, 13836), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (13796, 13836), True, 'import numpy as np\n'), ((13855, 13903), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (13863, 13903), True, 'import numpy as np\n'), ((13922, 13970), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (13930, 13970), True, 'import numpy as np\n'), ((13989, 14037), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype.as_numpy_dtype'}), '([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n', (13997, 14037), True, 'import numpy as np\n'), ((15575, 15638), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['[1.0, 2.0]'], {'dtype': 'dtype'}), '([1.0, 2.0], dtype=dtype)\n', (15613, 15638), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((15654, 15717), 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['[3.0, 4.0]'], {'dtype': 'dtype'}), '([3.0, 4.0], dtype=dtype)\n', (15692, 15717), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((15735, 15780), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[0.1, 0.1]'], {'dtype': 'dtype'}), '([0.1, 0.1], dtype=dtype)\n', (15755, 15780), False, 'from tensorflow.python.framework import constant_op\n'), ((15798, 15845), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[0.01, 0.01]'], {'dtype': 'dtype'}), '([0.01, 0.01], dtype=dtype)\n', (15818, 15845), False, 'from tensorflow.python.framework import constant_op\n'), ((15984, 16038), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', (['learning_rate', 'rho', 'momentum', 'epsilon'], {}), '(learning_rate, rho, momentum, epsilon)\n', (15999, 16038), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((2248, 2274), 'numpy.sqrt', 'np.sqrt', (['(denom_t + epsilon)'], {}), '(denom_t + epsilon)\n', (2255, 2274), True, 'import numpy as np\n'), ((4503, 4543), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (4541, 4543), False, 'from tensorflow.python.ops import variables\n'), ((10017, 10057), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (10055, 10057), False, 'from tensorflow.python.ops import variables\n'), ((11248, 11288), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (11286, 11288), False, 'from tensorflow.python.ops import variables\n'), ((12369, 12400), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads0_np'], {}), '(grads0_np)\n', (12389, 12400), False, 'from tensorflow.python.framework import constant_op\n'), ((12414, 12453), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads0_np_indices'], {}), '(grads0_np_indices)\n', (12434, 12453), False, 'from tensorflow.python.framework import constant_op\n'), ((12455, 12480), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1]'], {}), '([1])\n', (12475, 12480), False, 'from tensorflow.python.framework import constant_op\n'), ((12588, 12619), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads1_np'], {}), '(grads1_np)\n', (12608, 12619), False, 'from tensorflow.python.framework import constant_op\n'), ((12633, 12672), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['grads1_np_indices'], {}), '(grads1_np_indices)\n', (12653, 12672), False, 'from tensorflow.python.framework import constant_op\n'), ((12674, 12699), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1]'], {}), '([1])\n', (12694, 12699), False, 'from tensorflow.python.framework import constant_op\n'), ((12981, 13021), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (13019, 13021), False, 'from tensorflow.python.ops import variables\n'), ((2360, 2376), 'numpy.sqrt', 'np.sqrt', (['denom_t'], {}), '(denom_t)\n', (2367, 2376), True, 'import numpy as np\n'), ((3131, 3157), 'numpy.sqrt', 'np.sqrt', (['(denom_t + epsilon)'], {}), '(denom_t + epsilon)\n', (3138, 3157), True, 'import numpy as np\n'), ((9665, 9708), 'tensorflow.python.ops.embedding_ops.embedding_lookup', 'embedding_ops.embedding_lookup', (['[var0]', '[0]'], {}), '([var0], [0])\n', (9695, 9708), False, 'from tensorflow.python.ops import embedding_ops\n'), ((9798, 9888), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': '(1.0)', 'rho': '(0.0)', 'momentum': '(0.0)', 'epsilon': '(0.0)', 'centered': '(False)'}), '(learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,\n centered=False)\n', (9813, 9888), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((10822, 10865), 'tensorflow.python.ops.embedding_ops.embedding_lookup', 'embedding_ops.embedding_lookup', (['[var0]', '[0]'], {}), '([var0], [0])\n', (10852, 10865), False, 'from tensorflow.python.ops import embedding_ops\n'), ((11030, 11119), 'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop', 'rmsprop.RMSprop', ([], {'learning_rate': '(1.0)', 'rho': '(0.0)', 'momentum': '(0.0)', 'epsilon': '(1.0)', 'centered': '(True)'}), '(learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,\n centered=True)\n', (11045, 11119), False, 'from tensorflow.python.keras.optimizer_v2 import rmsprop\n'), ((3382, 3398), 'numpy.sqrt', 'np.sqrt', (['denom_t'], {}), '(denom_t)\n', (3389, 3398), True, 'import numpy as np\n'), ((16529, 16551), 'math.sqrt', 'math.sqrt', (['(0.001 + 1.0)'], {}), '(0.001 + 1.0)\n', (16538, 16551), False, 'import math\n'), ((16589, 16611), 'math.sqrt', 'math.sqrt', (['(0.001 + 1.0)'], {}), '(0.001 + 1.0)\n', (16598, 16611), False, 'import math\n'), ((16753, 16775), 'math.sqrt', 'math.sqrt', (['(1e-05 + 1.0)'], {}), '(1e-05 + 1.0)\n', (16762, 16775), False, 'import math\n'), ((16816, 16838), 'math.sqrt', 'math.sqrt', (['(1e-05 + 1.0)'], {}), '(1e-05 + 1.0)\n', (16825, 16838), False, 'import math\n'), ((17214, 17250), 'math.sqrt', 'math.sqrt', (['(0.001 * 0.9 + 0.001 + 1.0)'], {}), '(0.001 * 0.9 + 0.001 + 1.0)\n', (17223, 17250), False, 'import math\n'), ((17343, 17379), 'math.sqrt', 'math.sqrt', (['(0.001 * 0.9 + 0.001 + 1.0)'], {}), '(0.001 * 0.9 + 0.001 + 1.0)\n', (17352, 17379), False, 'import math\n'), ((17579, 17615), 'math.sqrt', 'math.sqrt', (['(1e-05 * 0.9 + 1e-05 + 1.0)'], {}), '(1e-05 * 0.9 + 1e-05 + 1.0)\n', (17588, 17615), False, 'import math\n'), ((17713, 17749), 'math.sqrt', 'math.sqrt', (['(1e-05 * 0.9 + 1e-05 + 1.0)'], {}), '(1e-05 * 0.9 + 1e-05 + 1.0)\n', (17722, 17749), False, 'import math\n'), ((17159, 17181), 'math.sqrt', 'math.sqrt', (['(0.001 + 1.0)'], {}), '(0.001 + 1.0)\n', (17168, 17181), False, 'import math\n'), ((17288, 17310), 'math.sqrt', 'math.sqrt', (['(0.001 + 1.0)'], {}), '(0.001 + 1.0)\n', (17297, 17310), False, 'import math\n'), ((17521, 17543), 'math.sqrt', 'math.sqrt', (['(1e-05 + 1.0)'], {}), '(1e-05 + 1.0)\n', (17530, 17543), False, 'import math\n'), ((17655, 17677), 'math.sqrt', 'math.sqrt', (['(1e-05 + 1.0)'], {}), '(1e-05 + 1.0)\n', (17664, 17677), False, 'import math\n')] |
"""
A couple of mesh objects for GPU rendering.
"""
from OpenGL.GL import *
from OpenGL.arrays import vbo
import numpy as np
class Cube:
def __init__(self):
O = -1.0
X = 1.0
positions = np.array([O, O, O, O, O, X, O, X, O, O, X, X, X, O, O, X, O, X, X, X, O, X, X, X,],dtype="f")
indices = np.array([
7, 3, 1, 1, 5, 7,
0, 2, 6, 6, 4, 0,
6, 2, 3, 3, 7, 6,
1, 0, 4, 4, 5, 1,
3, 2, 0, 0, 1, 3,
4, 6, 7, 7, 5, 4,
], dtype=np.int32)
#Create the VBO for positions:
self.vertex_vbo = vbo.VBO(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)
#Create the VBO for indices:
self.index_vbo = vbo.VBO(data=indices ,usage=GL_STATIC_DRAW, target=GL_ELEMENT_ARRAY_BUFFER)
class UnitCube:
def __init__(self):
self.vertices = np.array([[-0.5,-0.5,-0.5],
[0.5,-0.5,-0.5],
[0.5, 0.5,-0.5],
[-0.5, 0.5,-0.5],
[-0.5,-0.5, 0.5],
[0.5,-0.5, 0.5],
[0.5, 0.5, 0.5],
[-0.5, 0.5, 0.5]],dtype="f")
self.edge_list = np.array([[ 0,1,5,6, 4,8,11,9, 3,7,2,10 ],
[0,4,3,11, 1,2,6,7, 5,9,8,10 ],
[1,5,0,8, 2,3,7,4, 6,10,9,11],
[7,11,10,8, 2,6,1,9, 3,0,4,5],
[8,5,9,1, 11,10,7,6, 4,3,0,2],
[9,6,10,2, 8,11,4,7, 5,0,1,3],
[9,8,5,4, 6,1,2,0, 10,7,11,3],
[10,9,6,5, 7,2,3,1, 11,4,8,0]
], dtype=np.int32)
self.edges = np.asarray([[0,1],[1,2],[2,3],[3,0],[0,4],[1,5],[2,6],[3,7],[4,5],[5,6],[6,7],[7,4]], dtype=np.int32)
class Quad:
def __init__(self):
positions = np.array([
[-1, -1],
[-1, 1],
[1, -1],
[1, 1],
], dtype="f")
self.vertex_vbo = vbo.VBO(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)
class Texture:
def __init__(self):
positions = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], dtype="f")
self.vertex_vbo = vbo.VBO(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)
class Surface:
def __init__(self):
positions = np.array([
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],dtype="f")
#indices = np.array([
# 0, 1, 2,
# 2, 1, 3
#], dtype=np.int32)
#Create the VBO for positions:
self.vertex_vbo = vbo.VBO(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)
#Create the VBO for indices:
#self.index_vbo = vbo.VBO(data=indices, usage=GL_STATIC_DRAW, target=GL_ELEMENT_ARRAY_BUFFER) | [
"numpy.array",
"numpy.asarray",
"OpenGL.arrays.vbo.VBO"
] | [((217, 314), 'numpy.array', 'np.array', (['[O, O, O, O, O, X, O, X, O, O, X, X, X, O, O, X, O, X, X, X, O, X, X, X]'], {'dtype': '"""f"""'}), "([O, O, O, O, O, X, O, X, O, O, X, X, X, O, O, X, O, X, X, X, O, X,\n X, X], dtype='f')\n", (225, 314), True, 'import numpy as np\n'), ((329, 467), 'numpy.array', 'np.array', (['[7, 3, 1, 1, 5, 7, 0, 2, 6, 6, 4, 0, 6, 2, 3, 3, 7, 6, 1, 0, 4, 4, 5, 1, 3,\n 2, 0, 0, 1, 3, 4, 6, 7, 7, 5, 4]'], {'dtype': 'np.int32'}), '([7, 3, 1, 1, 5, 7, 0, 2, 6, 6, 4, 0, 6, 2, 3, 3, 7, 6, 1, 0, 4, 4,\n 5, 1, 3, 2, 0, 0, 1, 3, 4, 6, 7, 7, 5, 4], dtype=np.int32)\n', (337, 467), True, 'import numpy as np\n'), ((588, 657), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'positions', 'usage': 'GL_STATIC_DRAW', 'target': 'GL_ARRAY_BUFFER'}), '(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)\n', (595, 657), False, 'from OpenGL.arrays import vbo\n'), ((720, 795), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'indices', 'usage': 'GL_STATIC_DRAW', 'target': 'GL_ELEMENT_ARRAY_BUFFER'}), '(data=indices, usage=GL_STATIC_DRAW, target=GL_ELEMENT_ARRAY_BUFFER)\n', (727, 795), False, 'from OpenGL.arrays import vbo\n'), ((863, 1041), 'numpy.array', 'np.array', (['[[-0.5, -0.5, -0.5], [0.5, -0.5, -0.5], [0.5, 0.5, -0.5], [-0.5, 0.5, -0.5],\n [-0.5, -0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, 0.5], [-0.5, 0.5, 0.5]]'], {'dtype': '"""f"""'}), "([[-0.5, -0.5, -0.5], [0.5, -0.5, -0.5], [0.5, 0.5, -0.5], [-0.5, \n 0.5, -0.5], [-0.5, -0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, 0.5], [-0.5,\n 0.5, 0.5]], dtype='f')\n", (871, 1041), True, 'import numpy as np\n'), ((1252, 1615), 'numpy.array', 'np.array', (['[[0, 1, 5, 6, 4, 8, 11, 9, 3, 7, 2, 10], [0, 4, 3, 11, 1, 2, 6, 7, 5, 9, 8,\n 10], [1, 5, 0, 8, 2, 3, 7, 4, 6, 10, 9, 11], [7, 11, 10, 8, 2, 6, 1, 9,\n 3, 0, 4, 5], [8, 5, 9, 1, 11, 10, 7, 6, 4, 3, 0, 2], [9, 6, 10, 2, 8, \n 11, 4, 7, 5, 0, 1, 3], [9, 8, 5, 4, 6, 1, 2, 0, 10, 7, 11, 3], [10, 9, \n 6, 5, 7, 2, 3, 1, 11, 4, 8, 0]]'], {'dtype': 'np.int32'}), '([[0, 1, 5, 6, 4, 8, 11, 9, 3, 7, 2, 10], [0, 4, 3, 11, 1, 2, 6, 7,\n 5, 9, 8, 10], [1, 5, 0, 8, 2, 3, 7, 4, 6, 10, 9, 11], [7, 11, 10, 8, 2,\n 6, 1, 9, 3, 0, 4, 5], [8, 5, 9, 1, 11, 10, 7, 6, 4, 3, 0, 2], [9, 6, 10,\n 2, 8, 11, 4, 7, 5, 0, 1, 3], [9, 8, 5, 4, 6, 1, 2, 0, 10, 7, 11, 3], [\n 10, 9, 6, 5, 7, 2, 3, 1, 11, 4, 8, 0]], dtype=np.int32)\n', (1260, 1615), True, 'import numpy as np\n'), ((1752, 1880), 'numpy.asarray', 'np.asarray', (['[[0, 1], [1, 2], [2, 3], [3, 0], [0, 4], [1, 5], [2, 6], [3, 7], [4, 5], [5,\n 6], [6, 7], [7, 4]]'], {'dtype': 'np.int32'}), '([[0, 1], [1, 2], [2, 3], [3, 0], [0, 4], [1, 5], [2, 6], [3, 7],\n [4, 5], [5, 6], [6, 7], [7, 4]], dtype=np.int32)\n', (1762, 1880), True, 'import numpy as np\n'), ((1913, 1970), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 1], [1, -1], [1, 1]]'], {'dtype': '"""f"""'}), "([[-1, -1], [-1, 1], [1, -1], [1, 1]], dtype='f')\n", (1921, 1970), True, 'import numpy as np\n'), ((2044, 2113), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'positions', 'usage': 'GL_STATIC_DRAW', 'target': 'GL_ARRAY_BUFFER'}), '(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)\n', (2051, 2113), False, 'from OpenGL.arrays import vbo\n'), ((2176, 2229), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {'dtype': '"""f"""'}), "([[0, 0], [0, 1], [1, 0], [1, 1]], dtype='f')\n", (2184, 2229), True, 'import numpy as np\n'), ((2319, 2388), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'positions', 'usage': 'GL_STATIC_DRAW', 'target': 'GL_ARRAY_BUFFER'}), '(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)\n', (2326, 2388), False, 'from OpenGL.arrays import vbo\n'), ((2451, 2545), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0]]'], {'dtype': '"""f"""'}), "([[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0\n ]], dtype='f')\n", (2459, 2545), True, 'import numpy as np\n'), ((2769, 2838), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'positions', 'usage': 'GL_STATIC_DRAW', 'target': 'GL_ARRAY_BUFFER'}), '(data=positions, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)\n', (2776, 2838), False, 'from OpenGL.arrays import vbo\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from empiricaldist import Pmf
from scipy.stats import gaussian_kde
from scipy.stats import binom
from scipy.stats import gamma
from scipy.stats import poisson
def values(series):
"""Make a series of values and the number of times they appear.
Returns a DataFrame because they get rendered better in Jupyter.
series: Pandas Series
returns: Pandas DataFrame
"""
series = series.value_counts(dropna=False).sort_index()
series.index.name = 'values'
series.name = 'counts'
return pd.DataFrame(series)
def write_table(table, label, **options):
"""Write a table in LaTex format.
table: DataFrame
label: string
options: passed to DataFrame.to_latex
"""
filename = f'tables/{label}.tex'
fp = open(filename, 'w')
s = table.to_latex(**options)
fp.write(s)
fp.close()
def write_pmf(pmf, label):
"""Write a Pmf object as a table.
pmf: Pmf
label: string
"""
df = pd.DataFrame()
df['qs'] = pmf.index
df['ps'] = pmf.values
write_table(df, label, index=False)
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
d: dictionary
options: keyword args to add to d
"""
for key, val in options.items():
d.setdefault(key, val)
return d
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def savefig(root, **options):
"""Save the current figure.
root: string filename root
options: passed to plt.savefig
"""
format = options.pop('format', None)
if format:
formats = [format]
else:
formats = ['pdf', 'png']
for format in formats:
fname = f'figs/{root}.{format}'
plt.savefig(fname, **options)
def make_die(sides):
"""Pmf that represents a die with the given number of sides.
sides: int
returns: Pmf
"""
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
def add_dist_seq(seq):
"""Distribution of sum of quantities from PMFs.
seq: sequence of Pmf objects
returns: Pmf
"""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
def make_mixture(pmf, pmf_seq):
"""Make a mixture of distributions.
pmf: mapping from each hypothesis to its probability
(or it can be a sequence of probabilities)
pmf_seq: sequence of Pmfs, each representing
a conditional distribution for one hypothesis
returns: Pmf representing the mixture
"""
df = pd.DataFrame(pmf_seq).fillna(0).transpose()
df *= np.array(pmf)
total = df.sum(axis=1)
return Pmf(total)
def summarize(posterior, digits=3, prob=0.9):
"""Print the mean and CI of a distribution.
posterior: Pmf
digits: number of digits to round to
prob: probability in the CI
"""
mean = np.round(posterior.mean(), 3)
ci = posterior.credible_interval(prob)
print (mean, ci)
def outer_product(s1, s2):
"""Compute the outer product of two Series.
First Series goes down the rows;
second goes across the columns.
s1: Series
s2: Series
return: DataFrame
"""
a = np.multiply.outer(s1.to_numpy(), s2.to_numpy())
return pd.DataFrame(a, index=s1.index, columns=s2.index)
def make_uniform(qs, name=None, **options):
"""Make a Pmf that represents a uniform distribution.
qs: quantities
name: string name for the quantities
options: passed to Pmf
returns: Pmf
"""
pmf = Pmf(1.0, qs, **options)
pmf.normalize()
if name:
pmf.index.name = name
return pmf
def make_joint(s1, s2):
"""Compute the outer product of two Series.
First Series goes across the columns;
second goes down the rows.
s1: Series
s2: Series
return: DataFrame
"""
X, Y = np.meshgrid(s1, s2)
return pd.DataFrame(X*Y, columns=s1.index, index=s2.index)
def make_mesh(joint):
"""Make a mesh grid from the quantities in a joint distribution.
joint: DataFrame representing a joint distribution
returns: a mesh grid (X, Y) where X contains the column names and
Y contains the row labels
"""
x = joint.columns
y = joint.index
return np.meshgrid(x, y)
def normalize(joint):
"""Normalize a joint distribution.
joint: DataFrame
"""
prob_data = joint.to_numpy().sum()
joint /= prob_data
return prob_data
def marginal(joint, axis):
"""Compute a marginal distribution.
axis=0 returns the marginal distribution of the first variable
axis=1 returns the marginal distribution of the second variable
joint: DataFrame representing a joint distribution
axis: int axis to sum along
returns: Pmf
"""
return Pmf(joint.sum(axis=axis))
def pmf_marginal(joint_pmf, level):
"""Compute a marginal distribution.
joint_pmf: Pmf representing a joint distribution
level: int, level to sum along
returns: Pmf
"""
return Pmf(joint_pmf.sum(level=level))
def plot_contour(joint, **options):
"""Plot a joint distribution.
joint: DataFrame representing a joint PMF
"""
low = joint.to_numpy().min()
high = joint.to_numpy().max()
levels = np.linspace(low, high, 6)
levels = levels[1:]
underride(options, levels=levels, linewidths=1)
cs = plt.contour(joint.columns, joint.index, joint, **options)
decorate(xlabel=joint.columns.name,
ylabel=joint.index.name)
return cs
def make_binomial(n, p):
"""Make a binomial distribution.
n: number of trials
p: probability of success
returns: Pmf representing the distribution of k
"""
ks = np.arange(n+1)
ps = binom.pmf(ks, n, p)
return Pmf(ps, ks)
def make_gamma_dist(alpha, beta):
"""Makes a gamma object.
alpha: shape parameter
beta: scale parameter
returns: gamma object
"""
dist = gamma(alpha, scale=1/beta)
dist.alpha = alpha
dist.beta = beta
return dist
def make_poisson_pmf(lam, qs):
"""Make a PMF of a Poisson distribution.
lam: event rate
qs: sequence of values for `k`
returns: Pmf
"""
ps = poisson(lam).pmf(qs)
pmf = Pmf(ps, qs)
pmf.normalize()
return pmf
def pmf_from_dist(dist, qs):
"""Make a discrete approximation.
dist: SciPy distribution object
qs: quantities
returns: Pmf
"""
ps = dist.pdf(qs)
pmf = Pmf(ps, qs)
pmf.normalize()
return pmf
def kde_from_sample(sample, qs, **options):
"""Make a kernel density estimate from a sample
sample: sequence of values
qs: quantities where we should evaluate the KDE
returns: normalized Pmf
"""
kde = gaussian_kde(sample)
ps = kde(qs)
pmf = Pmf(ps, qs, **options)
pmf.normalize()
return pmf
def kde_from_pmf(pmf, n=101, **options):
"""Make a kernel density estimate from a Pmf.
pmf: Pmf object
n: number of points
returns: Pmf object
"""
# TODO: should this take qs rather than use min-max?
kde = gaussian_kde(pmf.qs, weights=pmf.ps)
qs = np.linspace(pmf.qs.min(), pmf.qs.max(), n)
ps = kde.evaluate(qs)
pmf = Pmf(ps, qs, **options)
pmf.normalize()
return pmf
from statsmodels.nonparametric.smoothers_lowess import lowess
def make_lowess(series):
"""Use LOWESS to compute a smooth line.
series: pd.Series
returns: pd.Series
"""
endog = series.values
exog = series.index.values
smooth = lowess(endog, exog)
index, data = np.transpose(smooth)
return pd.Series(data, index=index)
def plot_series_lowess(series, color):
"""Plots a series of data points and a smooth line.
series: pd.Series
color: string or tuple
"""
series.plot(lw=0, marker='o', color=color, alpha=0.5)
smooth = make_lowess(series)
smooth.plot(label='_', color=color)
from seaborn import JointGrid
def joint_plot(joint, **options):
"""Show joint and marginal distributions.
joint: DataFrame that represents a joint distribution
options: passed to JointGrid
"""
# get the names of the parameters
x = joint.columns.name
x = 'x' if x is None else x
y = joint.index.name
y = 'y' if y is None else y
# make a JointGrid with minimal data
data = pd.DataFrame({x:[0], y:[0]})
g = JointGrid(x, y, data, **options)
# replace the contour plot
g.ax_joint.contour(joint.columns,
joint.index,
joint,
cmap='viridis')
# replace the marginals
marginal_x = marginal(joint, 0)
g.ax_marg_x.plot(marginal_x.qs, marginal_x.ps)
marginal_y = marginal(joint, 1)
g.ax_marg_y.plot(marginal_y.ps, marginal_y.qs)
Gray20 = (0.162, 0.162, 0.162, 0.7)
Gray30 = (0.262, 0.262, 0.262, 0.7)
Gray40 = (0.355, 0.355, 0.355, 0.7)
Gray50 = (0.44, 0.44, 0.44, 0.7)
Gray60 = (0.539, 0.539, 0.539, 0.7)
Gray70 = (0.643, 0.643, 0.643, 0.7)
Gray80 = (0.757, 0.757, 0.757, 0.7)
Pu20 = (0.247, 0.0, 0.49, 0.7)
Pu30 = (0.327, 0.149, 0.559, 0.7)
Pu40 = (0.395, 0.278, 0.62, 0.7)
Pu50 = (0.46, 0.406, 0.685, 0.7)
Pu60 = (0.529, 0.517, 0.742, 0.7)
Pu70 = (0.636, 0.623, 0.795, 0.7)
Pu80 = (0.743, 0.747, 0.866, 0.7)
Bl20 = (0.031, 0.188, 0.42, 0.7)
Bl30 = (0.031, 0.265, 0.534, 0.7)
Bl40 = (0.069, 0.365, 0.649, 0.7)
Bl50 = (0.159, 0.473, 0.725, 0.7)
Bl60 = (0.271, 0.581, 0.781, 0.7)
Bl70 = (0.417, 0.681, 0.838, 0.7)
Bl80 = (0.617, 0.791, 0.882, 0.7)
Gr20 = (0.0, 0.267, 0.106, 0.7)
Gr30 = (0.0, 0.312, 0.125, 0.7)
Gr40 = (0.001, 0.428, 0.173, 0.7)
Gr50 = (0.112, 0.524, 0.253, 0.7)
Gr60 = (0.219, 0.633, 0.336, 0.7)
Gr70 = (0.376, 0.73, 0.424, 0.7)
Gr80 = (0.574, 0.824, 0.561, 0.7)
Or20 = (0.498, 0.153, 0.016, 0.7)
Or30 = (0.498, 0.153, 0.016, 0.7)
Or40 = (0.599, 0.192, 0.013, 0.7)
Or50 = (0.746, 0.245, 0.008, 0.7)
Or60 = (0.887, 0.332, 0.031, 0.7)
Or70 = (0.966, 0.475, 0.147, 0.7)
Or80 = (0.992, 0.661, 0.389, 0.7)
Re20 = (0.404, 0.0, 0.051, 0.7)
Re30 = (0.495, 0.022, 0.063, 0.7)
Re40 = (0.662, 0.062, 0.085, 0.7)
Re50 = (0.806, 0.104, 0.118, 0.7)
Re60 = (0.939, 0.239, 0.178, 0.7)
Re70 = (0.985, 0.448, 0.322, 0.7)
Re80 = (0.988, 0.646, 0.532, 0.7)
from cycler import cycler
color_list = [Bl30, Or70, Gr50, Re60, Pu20, Gray70, Re80, Gray50,
Gr70, Bl50, Re40, Pu70, Or50, Gr30, Bl70, Pu50, Gray30]
color_cycle = cycler(color=color_list)
def set_pyplot_params():
plt.rcParams['axes.prop_cycle'] = color_cycle
plt.rcParams['lines.linewidth'] = 3
# For generating publication-ready files, we need to crank
# up the resolution.
plt.rcParams['figure.dpi'] = 300
| [
"numpy.array",
"numpy.arange",
"scipy.stats.gaussian_kde",
"statsmodels.nonparametric.smoothers_lowess.lowess",
"matplotlib.pyplot.contour",
"numpy.linspace",
"pandas.DataFrame",
"numpy.meshgrid",
"matplotlib.pyplot.savefig",
"seaborn.JointGrid",
"matplotlib.pyplot.gca",
"cycler.cycler",
"nu... | [((10858, 10882), 'cycler.cycler', 'cycler', ([], {'color': 'color_list'}), '(color=color_list)\n', (10864, 10882), False, 'from cycler import cycler\n'), ((589, 609), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (601, 609), True, 'import pandas as pd\n'), ((1029, 1043), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1041, 1043), True, 'import pandas as pd\n'), ((1679, 1688), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1686, 1688), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1839), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1837, 1839), True, 'import matplotlib.pyplot as plt\n'), ((2356, 2379), 'numpy.arange', 'np.arange', (['(1)', '(sides + 1)'], {}), '(1, sides + 1)\n', (2365, 2379), True, 'import numpy as np\n'), ((2388, 2412), 'empiricaldist.Pmf', 'Pmf', (['(1 / sides)', 'outcomes'], {}), '(1 / sides, outcomes)\n', (2391, 2412), False, 'from empiricaldist import Pmf\n'), ((3069, 3082), 'numpy.array', 'np.array', (['pmf'], {}), '(pmf)\n', (3077, 3082), True, 'import numpy as np\n'), ((3121, 3131), 'empiricaldist.Pmf', 'Pmf', (['total'], {}), '(total)\n', (3124, 3131), False, 'from empiricaldist import Pmf\n'), ((3714, 3763), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {'index': 's1.index', 'columns': 's2.index'}), '(a, index=s1.index, columns=s2.index)\n', (3726, 3763), True, 'import pandas as pd\n'), ((3992, 4015), 'empiricaldist.Pmf', 'Pmf', (['(1.0)', 'qs'], {}), '(1.0, qs, **options)\n', (3995, 4015), False, 'from empiricaldist import Pmf\n'), ((4315, 4334), 'numpy.meshgrid', 'np.meshgrid', (['s1', 's2'], {}), '(s1, s2)\n', (4326, 4334), True, 'import numpy as np\n'), ((4346, 4399), 'pandas.DataFrame', 'pd.DataFrame', (['(X * Y)'], {'columns': 's1.index', 'index': 's2.index'}), '(X * Y, columns=s1.index, index=s2.index)\n', (4358, 4399), True, 'import pandas as pd\n'), ((4743, 4760), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4754, 4760), True, 'import numpy as np\n'), ((5736, 5761), 'numpy.linspace', 'np.linspace', (['low', 'high', '(6)'], {}), '(low, high, 6)\n', (5747, 5761), True, 'import numpy as np\n'), ((5848, 5905), 'matplotlib.pyplot.contour', 'plt.contour', (['joint.columns', 'joint.index', 'joint'], {}), '(joint.columns, joint.index, joint, **options)\n', (5859, 5905), True, 'import matplotlib.pyplot as plt\n'), ((6187, 6203), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (6196, 6203), True, 'import numpy as np\n'), ((6211, 6230), 'scipy.stats.binom.pmf', 'binom.pmf', (['ks', 'n', 'p'], {}), '(ks, n, p)\n', (6220, 6230), False, 'from scipy.stats import binom\n'), ((6242, 6253), 'empiricaldist.Pmf', 'Pmf', (['ps', 'ks'], {}), '(ps, ks)\n', (6245, 6253), False, 'from empiricaldist import Pmf\n'), ((6419, 6447), 'scipy.stats.gamma', 'gamma', (['alpha'], {'scale': '(1 / beta)'}), '(alpha, scale=1 / beta)\n', (6424, 6447), False, 'from scipy.stats import gamma\n'), ((6706, 6717), 'empiricaldist.Pmf', 'Pmf', (['ps', 'qs'], {}), '(ps, qs)\n', (6709, 6717), False, 'from empiricaldist import Pmf\n'), ((6936, 6947), 'empiricaldist.Pmf', 'Pmf', (['ps', 'qs'], {}), '(ps, qs)\n', (6939, 6947), False, 'from empiricaldist import Pmf\n'), ((7212, 7232), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['sample'], {}), '(sample)\n', (7224, 7232), False, 'from scipy.stats import gaussian_kde\n'), ((7260, 7282), 'empiricaldist.Pmf', 'Pmf', (['ps', 'qs'], {}), '(ps, qs, **options)\n', (7263, 7282), False, 'from empiricaldist import Pmf\n'), ((7556, 7592), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['pmf.qs'], {'weights': 'pmf.ps'}), '(pmf.qs, weights=pmf.ps)\n', (7568, 7592), False, 'from scipy.stats import gaussian_kde\n'), ((7681, 7703), 'empiricaldist.Pmf', 'Pmf', (['ps', 'qs'], {}), '(ps, qs, **options)\n', (7684, 7703), False, 'from empiricaldist import Pmf\n'), ((7998, 8017), 'statsmodels.nonparametric.smoothers_lowess.lowess', 'lowess', (['endog', 'exog'], {}), '(endog, exog)\n', (8004, 8017), False, 'from statsmodels.nonparametric.smoothers_lowess import lowess\n'), ((8036, 8056), 'numpy.transpose', 'np.transpose', (['smooth'], {}), '(smooth)\n', (8048, 8056), True, 'import numpy as np\n'), ((8069, 8097), 'pandas.Series', 'pd.Series', (['data'], {'index': 'index'}), '(data, index=index)\n', (8078, 8097), True, 'import pandas as pd\n'), ((8803, 8833), 'pandas.DataFrame', 'pd.DataFrame', (['{x: [0], y: [0]}'], {}), '({x: [0], y: [0]})\n', (8815, 8833), True, 'import pandas as pd\n'), ((8840, 8872), 'seaborn.JointGrid', 'JointGrid', (['x', 'y', 'data'], {}), '(x, y, data, **options)\n', (8849, 8872), False, 'from seaborn import JointGrid\n'), ((2181, 2210), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname, **options)\n', (2192, 2210), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6687), 'scipy.stats.poisson', 'poisson', (['lam'], {}), '(lam)\n', (6682, 6687), False, 'from scipy.stats import poisson\n'), ((3015, 3036), 'pandas.DataFrame', 'pd.DataFrame', (['pmf_seq'], {}), '(pmf_seq)\n', (3027, 3036), True, 'import pandas as pd\n')] |
from itertools import combinations
import os
from re import T
import cv2
import numpy as np
from numpy.lib.function_base import append, select
from tensorflow import keras
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from scipy.spatial import distance
#this is for getting testing data
def preprocessing(face):
if not (face.ndim == 4 or face.ndim == 3):
raise ValueError('Dimension error '+ str(face.dim))
if not face.shape == (160,160,3):
face = cv2.resize(face, (160,160), interpolation=cv2.INTER_AREA)
mean = np.mean(face, axis=(0,1,2), keepdims=True)
std = np.std(face, axis=(0,1,2), keepdims=True)
std_adj = np.maximum(std, 1.0/np.sqrt(face.size))
face = (face - mean) / std_adj
return face
def l2_normalize(x):
return (x / np.sqrt(np.maximum(np.sum(np.square(x), axis=-1, keepdims=True), 1e-10)))
def train():
model = keras.models.load_model('facenet_keras.h5', compile = False)
path = "data_cropped/"
names = [i for i in os.listdir(path)]
print(names)
f = open("emb.txt", "w")
dic = {}
for name in names:
_feature = []
print("processing "+name)
base_path = path + name + '/'
face_path = [os.path.join(base_path, f) for f in os.listdir(base_path)] # data_cropped/lamb/[1.jpg 2.jpg ...]
a = 0
for i in face_path:
face = cv2.imread(i)
_feature.append(l2_normalize(model.predict(np.expand_dims(preprocessing(face), axis=0)))) # facenet extract 128dim v
from itertools import combinations
from random import sample
# you = sample(_feature, 3)
you = list(combinations(you, 2))
distance_intra = []
for (x, y) in you:
distance_intra.append(distance.euclidean(x,y))
n = len(distance_intra)
f.write(str(n))
f.write("\n")
for i in distance_intra:
f.write(i)
f.write('\n')
#dic[name] = distance_intra
f.close()
return dic
def compare():
from random import sample
model = keras.models.load_model('facenet_keras.h5', compile = False)
t_path = "data_test/"
c_path = "data_cropped/"
i_path = "inpic/"
base_path = [os.path.join(c_path, f) for f in os.listdir(c_path)]
test_path = [os.path.join(t_path, g) for g in os.listdir(t_path)]
in_path = [os.path.join(i_path, k) for k in os.listdir(i_path)]
print(base_path)
print(test_path)
print(in_path)
'''
base_embs = []
for i in base_path:
chosen = sample(os.listdir(i), 1)
chosen = str(i + '/' + chosen[0])
face = cv2.imread(chosen)
base_embs.append(l2_normalize(model.predict(np.expand_dims(preprocessing(face), axis=0))))
test_embs = []
for k in test_path:
face = cv2.imread(k)
test_embs.append(l2_normalize(model.predict(np.expand_dims(preprocessing(face), axis=0))))
distance_inter = []
for m in base_embs:
for l in test_embs:
distance_inter.append(distance.euclidean(l, m))
'''
#print(len(distance_inter))
#print(distance_inter)
output = []
for i in base_path:
the = [os.path.join(i, j) for j in os.listdir(i)]
the = sample(the, 5)
emb_test = []
for k in the:
f = cv2.imread(k)
emb_test.append(l2_normalize(model.predict(np.expand_dims(preprocessing(f), axis=0))))
comb = list(combinations(emb_test, 2))
dis = []
for (x, y) in comb:
dis.append(distance.euclidean(x, y))
output.append(dis)
print(output)
print(len(output)) | [
"numpy.mean",
"random.sample",
"os.listdir",
"numpy.sqrt",
"os.path.join",
"numpy.square",
"itertools.combinations",
"scipy.spatial.distance.euclidean",
"tensorflow.keras.models.load_model",
"numpy.std",
"cv2.resize",
"cv2.imread"
] | [((622, 666), 'numpy.mean', 'np.mean', (['face'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '(face, axis=(0, 1, 2), keepdims=True)\n', (629, 666), True, 'import numpy as np\n'), ((675, 718), 'numpy.std', 'np.std', (['face'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '(face, axis=(0, 1, 2), keepdims=True)\n', (681, 718), True, 'import numpy as np\n'), ((965, 1023), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""facenet_keras.h5"""'], {'compile': '(False)'}), "('facenet_keras.h5', compile=False)\n", (988, 1023), False, 'from tensorflow import keras\n'), ((2177, 2235), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""facenet_keras.h5"""'], {'compile': '(False)'}), "('facenet_keras.h5', compile=False)\n", (2200, 2235), False, 'from tensorflow import keras\n'), ((548, 606), 'cv2.resize', 'cv2.resize', (['face', '(160, 160)'], {'interpolation': 'cv2.INTER_AREA'}), '(face, (160, 160), interpolation=cv2.INTER_AREA)\n', (558, 606), False, 'import cv2\n'), ((2333, 2356), 'os.path.join', 'os.path.join', (['c_path', 'f'], {}), '(c_path, f)\n', (2345, 2356), False, 'import os\n'), ((2403, 2426), 'os.path.join', 'os.path.join', (['t_path', 'g'], {}), '(t_path, g)\n', (2415, 2426), False, 'import os\n'), ((2471, 2494), 'os.path.join', 'os.path.join', (['i_path', 'k'], {}), '(i_path, k)\n', (2483, 2494), False, 'import os\n'), ((3343, 3357), 'random.sample', 'sample', (['the', '(5)'], {}), '(the, 5)\n', (3349, 3357), False, 'from random import sample\n'), ((751, 769), 'numpy.sqrt', 'np.sqrt', (['face.size'], {}), '(face.size)\n', (758, 769), True, 'import numpy as np\n'), ((1078, 1094), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1088, 1094), False, 'import os\n'), ((1295, 1321), 'os.path.join', 'os.path.join', (['base_path', 'f'], {}), '(base_path, f)\n', (1307, 1321), False, 'import os\n'), ((1459, 1472), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (1469, 1472), False, 'import cv2\n'), ((1740, 1760), 'itertools.combinations', 'combinations', (['you', '(2)'], {}), '(you, 2)\n', (1752, 1760), False, 'from itertools import combinations\n'), ((2366, 2384), 'os.listdir', 'os.listdir', (['c_path'], {}), '(c_path)\n', (2376, 2384), False, 'import os\n'), ((2436, 2454), 'os.listdir', 'os.listdir', (['t_path'], {}), '(t_path)\n', (2446, 2454), False, 'import os\n'), ((2504, 2522), 'os.listdir', 'os.listdir', (['i_path'], {}), '(i_path)\n', (2514, 2522), False, 'import os\n'), ((3286, 3304), 'os.path.join', 'os.path.join', (['i', 'j'], {}), '(i, j)\n', (3298, 3304), False, 'import os\n'), ((3428, 3441), 'cv2.imread', 'cv2.imread', (['k'], {}), '(k)\n', (3438, 3441), False, 'import cv2\n'), ((3562, 3587), 'itertools.combinations', 'combinations', (['emb_test', '(2)'], {}), '(emb_test, 2)\n', (3574, 3587), False, 'from itertools import combinations\n'), ((1331, 1352), 'os.listdir', 'os.listdir', (['base_path'], {}), '(base_path)\n', (1341, 1352), False, 'import os\n'), ((1851, 1875), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['x', 'y'], {}), '(x, y)\n', (1869, 1875), False, 'from scipy.spatial import distance\n'), ((3314, 3327), 'os.listdir', 'os.listdir', (['i'], {}), '(i)\n', (3324, 3327), False, 'import os\n'), ((3658, 3682), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['x', 'y'], {}), '(x, y)\n', (3676, 3682), False, 'from scipy.spatial import distance\n'), ((891, 903), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (900, 903), True, 'import numpy as np\n')] |
import pytest
import numpy.testing as npt
@pytest.fixture
def graphs_and_features():
import numpy as np
import torch
permutation_idx = np.random.permutation(5)
permutation_matrix = np.zeros((5, 5), dtype=np.float32)
permutation_matrix[
np.arange(5),
permutation_idx,
] = 1
permutation_matrix = torch.tensor(permutation_matrix, dtype=torch.float32)
import dgl
g0 = dgl.rand_graph(5, 20)
g1 = dgl.reorder_graph(
g0,
"custom",
permute_config={"nodes_perm": permutation_idx}
)
import hpno
g0 = hpno.heterograph(g0)
g1 = hpno.heterograph(g1)
h0 = torch.randn(5, 3)
h1 = permutation_matrix @ h0
return g0, g1, h0, h1, permutation_matrix
def test_layer_equivariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
layer = hpno.HierarchicalPathNetworkLayer(3, 4, 5, max_level=4)
y0 = layer(g0, h0)
y1 = layer(g1, h1)
npt.assert_almost_equal(
(permutation_matrix @ y0).detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
def test_model_equivariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
model = hpno.HierarchicalPathNetwork(3, 4, 5, 2, max_level=4)
y0 = model(g0, h0)
y1 = model(g1, h1)
npt.assert_almost_equal(
(permutation_matrix @ y0).detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
def test_readout_invariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
readout = hpno.GraphReadout(3, 4, 5, max_level=4)
y0 = readout(g0, h0)
y1 = readout(g1, h1)
npt.assert_almost_equal(
y0.detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
def test_model_and_readout_invariance(graphs_and_features):
g0, g1, h0, h1, permutation_matrix = graphs_and_features
import hpno
readout = hpno.HierarchicalPathNetwork(
3, 5, 5, 2,
max_level=4,
readout=hpno.GraphReadout(5, 5, 6)
)
y0 = readout(g0, h0)
y1 = readout(g1, h1)
npt.assert_almost_equal(
y0.detach().numpy(),
y1.detach().numpy(),
decimal=5,
)
| [
"dgl.reorder_graph",
"hpno.HierarchicalPathNetworkLayer",
"hpno.heterograph",
"torch.randn",
"torch.tensor",
"numpy.zeros",
"hpno.GraphReadout",
"hpno.HierarchicalPathNetwork",
"dgl.rand_graph",
"numpy.arange",
"numpy.random.permutation"
] | [((148, 172), 'numpy.random.permutation', 'np.random.permutation', (['(5)'], {}), '(5)\n', (169, 172), True, 'import numpy as np\n'), ((198, 232), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {'dtype': 'np.float32'}), '((5, 5), dtype=np.float32)\n', (206, 232), True, 'import numpy as np\n'), ((339, 392), 'torch.tensor', 'torch.tensor', (['permutation_matrix'], {'dtype': 'torch.float32'}), '(permutation_matrix, dtype=torch.float32)\n', (351, 392), False, 'import torch\n'), ((418, 439), 'dgl.rand_graph', 'dgl.rand_graph', (['(5)', '(20)'], {}), '(5, 20)\n', (432, 439), False, 'import dgl\n'), ((449, 528), 'dgl.reorder_graph', 'dgl.reorder_graph', (['g0', '"""custom"""'], {'permute_config': "{'nodes_perm': permutation_idx}"}), "(g0, 'custom', permute_config={'nodes_perm': permutation_idx})\n", (466, 528), False, 'import dgl\n'), ((585, 605), 'hpno.heterograph', 'hpno.heterograph', (['g0'], {}), '(g0)\n', (601, 605), False, 'import hpno\n'), ((615, 635), 'hpno.heterograph', 'hpno.heterograph', (['g1'], {}), '(g1)\n', (631, 635), False, 'import hpno\n'), ((646, 663), 'torch.randn', 'torch.randn', (['(5)', '(3)'], {}), '(5, 3)\n', (657, 663), False, 'import torch\n'), ((885, 940), 'hpno.HierarchicalPathNetworkLayer', 'hpno.HierarchicalPathNetworkLayer', (['(3)', '(4)', '(5)'], {'max_level': '(4)'}), '(3, 4, 5, max_level=4)\n', (918, 940), False, 'import hpno\n'), ((1263, 1316), 'hpno.HierarchicalPathNetwork', 'hpno.HierarchicalPathNetwork', (['(3)', '(4)', '(5)', '(2)'], {'max_level': '(4)'}), '(3, 4, 5, 2, max_level=4)\n', (1291, 1316), False, 'import hpno\n'), ((1641, 1680), 'hpno.GraphReadout', 'hpno.GraphReadout', (['(3)', '(4)', '(5)'], {'max_level': '(4)'}), '(3, 4, 5, max_level=4)\n', (1658, 1680), False, 'import hpno\n'), ((265, 277), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (274, 277), True, 'import numpy as np\n'), ((2083, 2109), 'hpno.GraphReadout', 'hpno.GraphReadout', (['(5)', '(5)', '(6)'], {}), '(5, 5, 6)\n', (2100, 2109), False, 'import hpno\n')] |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The neutronics physics package in the ARMI framework.
Neutronics encompasses the modeling of nuclear chain reactions and their associated transmutation and decay.
The ARMI Framework comes with a neutronics plugin that introduces two
independent interfaces:
:py:mod:`~armi.physics.neutronics.fissionProductModel`
Handles fission product modeling
:py:mod:`~armi.physics.neutronics.crossSectionGroupManager`
Handles the management of different cross section "groups"
.. warning:: There is also some legacy and question-raising code in this module that
is here temporarily while we finish untangling some of the neutronics
plugins outside of ARMI.
"""
import os
import yamlize
import numpy
import tabulate
from armi import plugins
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.utils import directoryChangers
from armi import runLog
class NeutronicsPlugin(plugins.ArmiPlugin):
"""
The built-in neutronics plugin with a few capabilities and a lot of state parameter definitions.
"""
@staticmethod
@plugins.HOOKIMPL
def exposeInterfaces(cs):
"""
Collect and expose all of the interfaces that live under the built-in neutronics package
"""
from armi.physics.neutronics import crossSectionGroupManager
from armi.physics.neutronics.fissionProductModel import fissionProductModel
interfaceInfo = []
for mod in (crossSectionGroupManager, fissionProductModel):
interfaceInfo += plugins.collectInterfaceDescriptions(mod, cs)
return interfaceInfo
@staticmethod
@plugins.HOOKIMPL
def defineParameters():
from . import parameters as neutronicsParameters
return neutronicsParameters.getNeutronicsParameterDefinitions()
@staticmethod
@plugins.HOOKIMPL
def defineEntryPoints():
from armi.physics.neutronics import diffIsotxs
entryPoints = [diffIsotxs.CompareIsotxsLibraries]
return entryPoints
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
from . import settings as neutronicsSettings
from armi.physics.neutronics import crossSectionSettings
from armi.physics.neutronics.fissionProductModel import (
fissionProductModelSettings,
)
settings = [
crossSectionSettings.XSSettingDef(
CONF_CROSS_SECTION,
)
]
settings += neutronicsSettings.defineSettings()
settings += fissionProductModelSettings.defineSettings()
return settings
@staticmethod
@plugins.HOOKIMPL
def defineSettingsValidators(inspector):
"""
Check neutronics settings.
"""
from armi.operators import settingsValidation # avoid cyclic import
from armi.scripts.migration.crossSectionBlueprintsToSettings import (
migrateCrossSectionsFromBlueprints,
)
queries = []
def blueprintsHasOldXSInput(path):
with directoryChangers.DirectoryChanger(inspector.cs.inputDirectory):
with open(os.path.expandvars(path)) as f:
for line in f:
if line.startswith("cross sections:"):
return True
return False
queries.append(
settingsValidation.Query(
lambda: inspector.cs["loadingFile"]
and blueprintsHasOldXSInput(inspector.cs["loadingFile"]),
"The specified blueprints input file '{0}' contains compound cross section settings. "
"".format(inspector.cs["loadingFile"]),
"Automatically move them to the settings file, {}? WARNING: if multiple settings files point "
"to this blueprints input you must manually update the others.".format(
inspector.cs.path
),
lambda: migrateCrossSectionsFromBlueprints(inspector.cs),
)
)
return queries
@staticmethod
@plugins.HOOKIMPL
def onProcessCoreLoading(core, cs):
applyEffectiveDelayedNeutronFractionToCore(core, cs)
from .const import (
GAMMA,
NEUTRON,
NEUTRONGAMMA,
ALL,
RESTARTFILES,
INPUTOUTPUT,
FLUXFILES,
)
# ARC and CCCC cross section file format names
COMPXS = "COMPXS"
PMATRX = "PMATRX"
GAMISO = "GAMISO"
ISOTXS = "ISOTXS"
# Constants for neutronics calculation types
ADJOINT_CALC = "adjoint"
REAL_CALC = "real"
ADJREAL_CALC = "both"
# Constants for boundary conditions
# All external boundary conditions are set to zero outward current
INFINITE = "Infinite"
# "Planar" external boundaries conditions are set to zero outward current
REFLECTIVE = "Reflective"
# Generalized boundary conditions D * PHI PRIME + A * PHI = 0 where A is user-specified constant,
# D is the diffusion coefficient, PHI PRIME and PHI are the outward current and flux at the
# external boundaries.
GENERAL_BC = "Generalized"
# The following boundary conditions are three approximations of the vacuum boundary condition
# in diffusion theory.
# 'Extrapolated': sets A to 0.4692 (in generalized BC) to have the flux vanishing at
# 0.7104*transport mean free path through linear extrapolation. Derived for plane
# geometries - should be valid for complex geometries unless radius of curvature is
# comparable to the mean free path.
# 'ZeroSurfaceFlux': flux vanishes at the external boundary.
# 'ZeroInwardCurrent': set A to 0.5 (in generalized BC) to have Jminus = 0 at the external boundaries.
EXTRAPOLATED = "Extrapolated"
ZEROFLUX = "ZeroSurfaceFlux"
ZERO_INWARD_CURRENT = "ZeroInwardCurrent"
# Common settings checks
def gammaTransportIsRequested(cs):
"""
Check if gamma transport was requested by the user.
Arguments
---------
cs : ARMI settings object
Object containing the default and user-specified ARMI settings controlling the simulation
Returns
-------
flag : bool
Returns true if gamma transport is requested.
"""
return GAMMA in cs["globalFluxActive"]
def gammaXsAreRequested(cs):
"""
Check if gamma cross-sections generation was requested by the user.
Arguments
---------
cs : ARMI settings object
Object containing the default and user-specified ARMI settings controlling the simulation.
Returns
-------
flag : bool
Returns true if gamma cross section generation is requested.
"""
return GAMMA in cs["genXS"]
def adjointCalculationRequested(cs):
"""Return true if an adjoint calculation is requested based on the ``neutronicsType`` setting."""
return cs["neutronicsType"] in [ADJOINT_CALC, ADJREAL_CALC]
def realCalculationRequested(cs):
"""Return true if a real calculation is requested based on the ``neutronicsType`` type setting."""
return cs["neutronicsType"] in ["real", "both"]
def applyEffectiveDelayedNeutronFractionToCore(core, cs):
"""Process the settings for the delayed neutron fraction and precursor decay constants."""
# Verify and set the core beta parameters based on the user-supplied settings
beta = cs["beta"]
decayConstants = cs["decayConstants"]
# If beta is interpreted as a float, then assign it to
# the total delayed neutron fraction parameter. Otherwise, setup the
# group-wise delayed neutron fractions and precursor decay constants.
reportTableData = []
if isinstance(beta, float):
core.p.beta = beta
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
elif isinstance(beta, list) and isinstance(decayConstants, list):
if len(beta) != len(decayConstants):
raise ValueError(
f"The values for `beta` ({beta}) and `decayConstants` "
f"({decayConstants}) are not consistent lengths."
)
core.p.beta = sum(beta)
core.p.betaComponents = numpy.array(beta)
core.p.betaDecayConstants = numpy.array(decayConstants)
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
reportTableData.append(
("Group-wise Delayed Neutron Fractions", core.p.betaComponents)
)
reportTableData.append(
("Group-wise Precursor Decay Constants", core.p.betaDecayConstants)
)
# Report to the user the values were not applied.
if not reportTableData and (beta is not None or decayConstants is not None):
runLog.warning(
f"Delayed neutron fraction(s) - {beta} and decay constants"
" - {decayConstants} have not been applied."
)
else:
runLog.extra(
tabulate.tabulate(
tabular_data=reportTableData,
headers=["Component", "Value"],
tablefmt="armi",
)
)
| [
"armi.runLog.warning",
"tabulate.tabulate",
"armi.plugins.collectInterfaceDescriptions",
"armi.physics.neutronics.fissionProductModel.fissionProductModelSettings.defineSettings",
"os.path.expandvars",
"armi.scripts.migration.crossSectionBlueprintsToSettings.migrateCrossSectionsFromBlueprints",
"numpy.ar... | [((3088, 3132), 'armi.physics.neutronics.fissionProductModel.fissionProductModelSettings.defineSettings', 'fissionProductModelSettings.defineSettings', ([], {}), '()\n', (3130, 3132), False, 'from armi.physics.neutronics.fissionProductModel import fissionProductModelSettings\n'), ((9141, 9270), 'armi.runLog.warning', 'runLog.warning', (['f"""Delayed neutron fraction(s) - {beta} and decay constants - {{decayConstants}} have not been applied."""'], {}), "(\n f'Delayed neutron fraction(s) - {beta} and decay constants - {{decayConstants}} have not been applied.'\n )\n", (9155, 9270), False, 'from armi import runLog\n'), ((2094, 2139), 'armi.plugins.collectInterfaceDescriptions', 'plugins.collectInterfaceDescriptions', (['mod', 'cs'], {}), '(mod, cs)\n', (2130, 2139), False, 'from armi import plugins\n'), ((2917, 2970), 'armi.physics.neutronics.crossSectionSettings.XSSettingDef', 'crossSectionSettings.XSSettingDef', (['CONF_CROSS_SECTION'], {}), '(CONF_CROSS_SECTION)\n', (2950, 2970), False, 'from armi.physics.neutronics import crossSectionSettings\n'), ((8594, 8611), 'numpy.array', 'numpy.array', (['beta'], {}), '(beta)\n', (8605, 8611), False, 'import numpy\n'), ((8648, 8675), 'numpy.array', 'numpy.array', (['decayConstants'], {}), '(decayConstants)\n', (8659, 8675), False, 'import numpy\n'), ((9340, 9440), 'tabulate.tabulate', 'tabulate.tabulate', ([], {'tabular_data': 'reportTableData', 'headers': "['Component', 'Value']", 'tablefmt': '"""armi"""'}), "(tabular_data=reportTableData, headers=['Component',\n 'Value'], tablefmt='armi')\n", (9357, 9440), False, 'import tabulate\n'), ((3599, 3662), 'armi.utils.directoryChangers.DirectoryChanger', 'directoryChangers.DirectoryChanger', (['inspector.cs.inputDirectory'], {}), '(inspector.cs.inputDirectory)\n', (3633, 3662), False, 'from armi.utils import directoryChangers\n'), ((4513, 4561), 'armi.scripts.migration.crossSectionBlueprintsToSettings.migrateCrossSectionsFromBlueprints', 'migrateCrossSectionsFromBlueprints', (['inspector.cs'], {}), '(inspector.cs)\n', (4547, 4561), False, 'from armi.scripts.migration.crossSectionBlueprintsToSettings import migrateCrossSectionsFromBlueprints\n'), ((3690, 3714), 'os.path.expandvars', 'os.path.expandvars', (['path'], {}), '(path)\n', (3708, 3714), False, 'import os\n')] |
import cv2
import pandas as pd
from tqdm import tqdm
train = pd.read_csv('Christof/assets/train_ext1.csv')
#test = pd.read_csv('Christof/assets/sample_submission.csv')
path_to_train = 'Christof/assets/ext_tomomi/'
#path_to_test = 'Christof/assets/test_rgby_512/'
fns = [path_to_train + f[:-4] + '.png' for f in train['Id']]
import numpy as np
channel_avg = np.zeros(3)
channel_std = np.zeros(3)
#images = np.zeros((len(fns),512,512,3))
for i, fn in tqdm(enumerate(fns)):
image = cv2.imread(fn, cv2.IMREAD_UNCHANGED)
channel_avg += np.mean(np.reshape(image,(-1,3)),axis=0)
channel_std += np.std(np.reshape(image,(-1,3)),axis=0)
channel_avg/=len(fns)
channel_std/=len(fns)
print(channel_avg/255)
print(channel_std/255) | [
"numpy.zeros",
"numpy.reshape",
"cv2.imread",
"pandas.read_csv"
] | [((62, 107), 'pandas.read_csv', 'pd.read_csv', (['"""Christof/assets/train_ext1.csv"""'], {}), "('Christof/assets/train_ext1.csv')\n", (73, 107), True, 'import pandas as pd\n'), ((363, 374), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (371, 374), True, 'import numpy as np\n'), ((389, 400), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (397, 400), True, 'import numpy as np\n'), ((489, 525), 'cv2.imread', 'cv2.imread', (['fn', 'cv2.IMREAD_UNCHANGED'], {}), '(fn, cv2.IMREAD_UNCHANGED)\n', (499, 525), False, 'import cv2\n'), ((553, 579), 'numpy.reshape', 'np.reshape', (['image', '(-1, 3)'], {}), '(image, (-1, 3))\n', (563, 579), True, 'import numpy as np\n'), ((612, 638), 'numpy.reshape', 'np.reshape', (['image', '(-1, 3)'], {}), '(image, (-1, 3))\n', (622, 638), True, 'import numpy as np\n')] |
import numpy as np
def random_split_data(data, label, proportion):
"""
Split two numpy arrays into two parts of `proportion` and `1 - proportion`
Args:
- data: numpy array, to be split along the first axis
- proportion: a float less than 1
"""
assert data.shape[0] == label.shape[0]
size = data.shape[0]
s = np.random.permutation(size)
split_idx = int(proportion * size)
return data[s[:split_idx]], label[s[:split_idx]], data[s[split_idx:]], label[s[split_idx:]] | [
"numpy.random.permutation"
] | [((355, 382), 'numpy.random.permutation', 'np.random.permutation', (['size'], {}), '(size)\n', (376, 382), True, 'import numpy as np\n')] |
from collections import Counter
from copy import copy
import json
import numpy as np
import re
import logging
from stanza.models.common.utils import ud_scores, harmonic_mean
from stanza.utils.conll import CoNLL
from stanza.models.common.doc import *
logger = logging.getLogger('stanza')
def load_mwt_dict(filename):
if filename is not None:
with open(filename, 'r') as f:
mwt_dict0 = json.load(f)
mwt_dict = dict()
for item in mwt_dict0:
(key, expansion), count = item
if key not in mwt_dict or mwt_dict[key][1] < count:
mwt_dict[key] = (expansion, count)
return mwt_dict
else:
return
def process_sentence(sentence, mwt_dict=None):
sent = []
i = 0
for tok, p, additional_info in sentence:
expansion = None
if (p == 3 or p == 4) and mwt_dict is not None:
# MWT found, (attempt to) expand it!
if tok in mwt_dict:
expansion = mwt_dict[tok][0]
elif tok.lower() in mwt_dict:
expansion = mwt_dict[tok.lower()][0]
if expansion is not None:
infostr = None if len(additional_info) == 0 else '|'.join([f"{k}={additional_info[k]}" for k in additional_info])
sent.append({ID: f'{i+1}-{i+len(expansion)}', TEXT: tok})
if infostr is not None: sent[-1][MISC] = infostr
for etok in expansion:
sent.append({ID: f'{i+1}', TEXT: etok})
i += 1
else:
if len(tok) <= 0:
continue
if p == 3 or p == 4:
additional_info['MWT'] = 'Yes'
infostr = None if len(additional_info) == 0 else '|'.join([f"{k}={additional_info[k]}" for k in additional_info])
sent.append({ID: f'{i+1}', TEXT: tok})
if infostr is not None: sent[-1][MISC] = infostr
i += 1
return sent
def find_token(token, text):
"""
Robustly finds the first occurrence of token in the text, and return its offset and it's underlying original string.
Ignores whitespace mismatches between the text and the token.
"""
m = re.search('\s*'.join(['\s' if re.match('\s', x) else re.escape(x) for x in token]), text)
return m.start(), m.group()
def output_predictions(output_file, trainer, data_generator, vocab, mwt_dict, max_seqlen=1000, orig_text=None, no_ssplit=False,prob=False):
paragraphs = []
for i, p in enumerate(data_generator.sentences):
start = 0 if i == 0 else paragraphs[-1][2]
length = sum([len(x) for x in p])
paragraphs += [(i, start, start+length, length+1)] # para idx, start idx, end idx, length
paragraphs = list(sorted(paragraphs, key=lambda x: x[3], reverse=True))
all_preds = [None] * len(paragraphs)
all_raw = [None] * len(paragraphs)
eval_limit = max(3000, max_seqlen)
batch_size = trainer.args['batch_size']
batches = int((len(paragraphs) + batch_size - 1) / batch_size)
t = 0
list_prob = []
for i in range(batches):
batchparas = paragraphs[i * batch_size : (i + 1) * batch_size]
offsets = [x[1] for x in batchparas]
t += sum([x[3] for x in batchparas])
batch = data_generator.next(eval_offsets=offsets)
raw = batch[3]
N = len(batch[3][0])
if N <= eval_limit:
a = trainer.predict(batch)
pred = np.argmax(a, axis=2)
print("555 "+str(a))
print("Hi "+str(pred))
list_prob.append(a)
else:
idx = [0] * len(batchparas)
Ns = [p[3] for p in batchparas]
pred = [[] for _ in batchparas]
while True:
ens = [min(N - idx1, eval_limit) for idx1, N in zip(idx, Ns)]
en = max(ens)
batch1 = batch[0][:, :en], batch[1][:, :en], batch[2][:, :en], [x[:en] for x in batch[3]]
pred1 = np.argmax(trainer.predict(batch1), axis=2)
print("p1 "+str(pred1))
for j in range(len(batchparas)):
sentbreaks = np.where((pred1[j] == 2) + (pred1[j] == 4))[0]
if len(sentbreaks) <= 0 or idx[j] >= Ns[j] - eval_limit:
advance = ens[j]
else:
advance = np.max(sentbreaks) + 1
pred[j] += [pred1[j, :advance]]
idx[j] += advance
if all([idx1 >= N for idx1, N in zip(idx, Ns)]):
break
batch = data_generator.next(eval_offsets=[x+y for x, y in zip(idx, offsets)])
pred = [np.concatenate(p, 0) for p in pred]
print(pred)
for j, p in enumerate(batchparas):
len1 = len([1 for x in raw[j] if x != '<PAD>'])
if pred[j][len1-1] < 2:
pred[j][len1-1] = 2
elif pred[j][len1-1] > 2:
pred[j][len1-1] = 4
all_preds[p[0]] = pred[j][:len1]
all_raw[p[0]] = raw[j]
offset = 0
oov_count = 0
doc = []
text = orig_text
char_offset = 0
for j in range(len(paragraphs)):
raw = all_raw[j]
pred = all_preds[j]
current_tok = ''
current_sent = []
for t, p in zip(raw, pred):
if t == '<PAD>':
break
# hack la_ittb
if trainer.args['shorthand'] == 'la_ittb' and t in [":", ";"]:
p = 2
offset += 1
if vocab.unit2id(t) == vocab.unit2id('<UNK>'):
oov_count += 1
current_tok += t
if p >= 1:
tok = vocab.normalize_token(current_tok)
assert '\t' not in tok, tok
if len(tok) <= 0:
current_tok = ''
continue
if orig_text is not None:
st0, tok0 = find_token(tok, text)
st = char_offset + st0
text = text[st0 + len(tok0):]
char_offset += st0 + len(tok0)
additional_info = {START_CHAR: st, END_CHAR: st + len(tok0)}
else:
additional_info = dict()
current_sent += [(tok, p, additional_info)]
current_tok = ''
if (p == 2 or p == 4) and not no_ssplit:
doc.append(process_sentence(current_sent, mwt_dict))
current_sent = []
if len(current_tok):
tok = vocab.normalize_token(current_tok)
assert '\t' not in tok, tok
if len(tok) > 0:
if orig_text is not None:
st0, tok0 = find_token(tok, text)
st = char_offset + st0
text = text[st0 + len(tok0):]
char_offset += st0 + len(tok0)
additional_info = {END_CHAR: st, END_CHAR: st + len(tok0)}
else:
additional_info = dict()
current_sent += [(tok, 2, additional_info)]
if len(current_sent):
doc.append(process_sentence(current_sent, mwt_dict))
if output_file: CoNLL.dict2conll(doc, output_file)
if prob:
return oov_count, offset, all_preds, doc, list(list_prob)
return oov_count, offset, all_preds, doc
def eval_model(args, trainer, batches, vocab, mwt_dict):
oov_count, N, all_preds, doc = output_predictions(args['conll_file'], trainer, batches, vocab, mwt_dict, args['max_seqlen'])
all_preds = np.concatenate(all_preds, 0)
labels = [y[1] for x in batches.data for y in x]
counter = Counter(zip(all_preds, labels))
def f1(pred, gold, mapping):
pred = [mapping[p] for p in pred]
gold = [mapping[g] for g in gold]
lastp = -1; lastg = -1
tp = 0; fp = 0; fn = 0
for i, (p, g) in enumerate(zip(pred, gold)):
if p == g > 0 and lastp == lastg:
lastp = i
lastg = i
tp += 1
elif p > 0 and g > 0:
lastp = i
lastg = i
fp += 1
fn += 1
elif p > 0:
# and g == 0
lastp = i
fp += 1
elif g > 0:
lastg = i
fn += 1
if tp == 0:
return 0
else:
return 2 * tp / (2 * tp + fp + fn)
f1tok = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:1, 4:1})
f1sent = f1(all_preds, labels, {0:0, 1:0, 2:1, 3:0, 4:1})
f1mwt = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:2, 4:2})
logger.info(f"{args['shorthand']}: token F1 = {f1tok*100:.2f}, sentence F1 = {f1sent*100:.2f}, mwt F1 = {f1mwt*100:.2f}")
return harmonic_mean([f1tok, f1sent, f1mwt], [1, 1, .01])
| [
"logging.getLogger",
"re.escape",
"numpy.where",
"re.match",
"numpy.argmax",
"stanza.models.common.utils.harmonic_mean",
"numpy.max",
"numpy.concatenate",
"json.load",
"stanza.utils.conll.CoNLL.dict2conll"
] | [((261, 288), 'logging.getLogger', 'logging.getLogger', (['"""stanza"""'], {}), "('stanza')\n", (278, 288), False, 'import logging\n'), ((7603, 7631), 'numpy.concatenate', 'np.concatenate', (['all_preds', '(0)'], {}), '(all_preds, 0)\n', (7617, 7631), True, 'import numpy as np\n'), ((8823, 8874), 'stanza.models.common.utils.harmonic_mean', 'harmonic_mean', (['[f1tok, f1sent, f1mwt]', '[1, 1, 0.01]'], {}), '([f1tok, f1sent, f1mwt], [1, 1, 0.01])\n', (8836, 8874), False, 'from stanza.models.common.utils import ud_scores, harmonic_mean\n'), ((7240, 7274), 'stanza.utils.conll.CoNLL.dict2conll', 'CoNLL.dict2conll', (['doc', 'output_file'], {}), '(doc, output_file)\n', (7256, 7274), False, 'from stanza.utils.conll import CoNLL\n'), ((411, 423), 'json.load', 'json.load', (['f'], {}), '(f)\n', (420, 423), False, 'import json\n'), ((3433, 3453), 'numpy.argmax', 'np.argmax', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (3442, 3453), True, 'import numpy as np\n'), ((4670, 4690), 'numpy.concatenate', 'np.concatenate', (['p', '(0)'], {}), '(p, 0)\n', (4684, 4690), True, 'import numpy as np\n'), ((2208, 2226), 're.match', 're.match', (['"""\\\\s"""', 'x'], {}), "('\\\\s', x)\n", (2216, 2226), False, 'import re\n'), ((2231, 2243), 're.escape', 're.escape', (['x'], {}), '(x)\n', (2240, 2243), False, 'import re\n'), ((4124, 4167), 'numpy.where', 'np.where', (['((pred1[j] == 2) + (pred1[j] == 4))'], {}), '((pred1[j] == 2) + (pred1[j] == 4))\n', (4132, 4167), True, 'import numpy as np\n'), ((4349, 4367), 'numpy.max', 'np.max', (['sentbreaks'], {}), '(sentbreaks)\n', (4355, 4367), True, 'import numpy as np\n')] |
import base64
import json
import numpy as np
import pytest
from zarr.codecs import Blosc, Delta, Zlib
from zarr.errors import MetadataError
from zarr.meta import (ZARR_FORMAT, decode_array_metadata, decode_dtype,
decode_group_metadata, encode_array_metadata,
encode_dtype)
def assert_json_equal(expect, actual):
if isinstance(actual, bytes):
actual = str(actual, 'ascii')
ej = json.loads(expect)
aj = json.loads(actual)
assert ej == aj
def test_encode_decode_array_1():
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('<f8'),
compressor=Zlib(1).get_config(),
fill_value=None,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert meta_dec['fill_value'] is None
assert meta_dec['filters'] is None
def test_encode_decode_array_2():
# some variations
df = Delta(astype='<u2', dtype='V14')
compressor = Blosc(cname='lz4', clevel=3, shuffle=2)
dtype = np.dtype([('a', '<i4'), ('b', 'S10')])
fill_value = np.zeros((), dtype=dtype)[()]
meta = dict(
shape=(100, 100),
chunks=(10, 10),
dtype=dtype,
compressor=compressor.get_config(),
fill_value=fill_value,
order='F',
filters=[df.get_config()]
)
meta_json = '''{
"chunks": [10, 10],
"compressor": {
"id": "blosc",
"clevel": 3,
"cname": "lz4",
"shuffle": 2,
"blocksize": 0
},
"dtype": [["a", "<i4"], ["b", "|S10"]],
"fill_value": "AAAAAAAAAAAAAAAAAAA=",
"filters": [
{"id": "delta", "astype": "<u2", "dtype": "|V14"}
],
"order": "F",
"shape": [100, 100],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert fill_value == meta_dec['fill_value']
assert [df.get_config()] == meta_dec['filters']
def test_encode_decode_array_complex():
# some variations
for k in ['c8', 'c16']:
compressor = Blosc(cname='lz4', clevel=3, shuffle=2)
dtype = np.dtype(k)
fill_value = dtype.type(np.nan-1j)
meta = dict(
shape=(100, 100),
chunks=(10, 10),
dtype=dtype,
compressor=compressor.get_config(),
fill_value=fill_value,
order=dtype.char,
filters=[]
)
meta_json = '''{
"chunks": [10, 10],
"compressor": {
"id": "blosc",
"clevel": 3,
"cname": "lz4",
"shuffle": 2,
"blocksize": 0
},
"dtype": "%s",
"fill_value": ["NaN", -1.0],
"filters": [],
"order": "%s",
"shape": [100, 100],
"zarr_format": %s
}''' % (dtype.str, dtype.char, ZARR_FORMAT)
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
# Based off of this SO answer: https://stackoverflow.com/a/49972198
assert np.all(
fill_value.view((np.uint8, fill_value.itemsize)) ==
meta_dec['fill_value'].view((np.uint8, meta_dec['fill_value'].itemsize))
)
assert [] == meta_dec['filters']
def test_encode_decode_array_datetime_timedelta():
# some variations
for k in ['m8[s]', 'M8[s]']:
compressor = Blosc(cname='lz4', clevel=3, shuffle=2)
dtype = np.dtype(k)
fill_value = dtype.type("NaT")
meta = dict(
shape=(100, 100),
chunks=(10, 10),
dtype=dtype,
compressor=compressor.get_config(),
fill_value=fill_value,
order=dtype.char,
filters=[]
)
meta_json = '''{
"chunks": [10, 10],
"compressor": {
"id": "blosc",
"clevel": 3,
"cname": "lz4",
"shuffle": 2,
"blocksize": 0
},
"dtype": "%s",
"fill_value": -9223372036854775808,
"filters": [],
"order": "%s",
"shape": [100, 100],
"zarr_format": %s
}''' % (dtype.str, dtype.char, ZARR_FORMAT)
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
# Based off of this SO answer: https://stackoverflow.com/a/49972198
assert np.all(
fill_value.view((np.uint8, fill_value.itemsize)) ==
meta_dec['fill_value'].view((np.uint8, meta_dec['fill_value'].itemsize))
)
assert [] == meta_dec['filters']
def test_encode_decode_array_dtype_shape():
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('(10, 10)<f8'),
compressor=Zlib(1).get_config(),
fill_value=None,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [100, 10, 10],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
# to maintain consistency with numpy unstructured arrays, unpack dimensions into shape
assert meta['shape'] + meta['dtype'].shape == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
# to maintain consistency with numpy unstructured arrays, unpack dtypes
assert meta['dtype'].base == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert meta_dec['fill_value'] is None
assert meta_dec['filters'] is None
def test_encode_decode_array_structured():
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('<i8, (10, 10)<f8, (5, 10, 15)u1'),
compressor=Zlib(1).get_config(),
fill_value=None,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": [["f0", "<i8"], ["f1", "<f8", [10, 10]], ["f2", "|u1", [5, 10, 15]]],
"fill_value": null,
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
# to maintain consistency with numpy unstructured arrays, unpack dimensions into shape
assert meta['shape'] + meta['dtype'].shape == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
# to maintain consistency with numpy unstructured arrays, unpack dimensions into shape
assert meta['dtype'].base == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert meta_dec['fill_value'] is None
assert meta_dec['filters'] is None
def test_encode_decode_fill_values_nan():
fills = (
(np.nan, "NaN", np.isnan),
(np.NINF, "-Infinity", np.isneginf),
(np.PINF, "Infinity", np.isposinf),
)
for v, s, f in fills:
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('<f8'),
compressor=Zlib(1).get_config(),
fill_value=v,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "<f8",
"fill_value": "%s",
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % (s, ZARR_FORMAT)
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
actual = meta_dec['fill_value']
assert f(actual)
def test_encode_decode_fill_values_bytes():
dtype = np.dtype('S10')
fills = b'foo', bytes(10)
for v in fills:
# setup and encode metadata
meta = dict(
shape=(100,),
chunks=(10,),
dtype=dtype,
compressor=Zlib(1).get_config(),
fill_value=v,
filters=None,
order='C'
)
meta_enc = encode_array_metadata(meta)
# define expected metadata encoded as JSON
s = base64.standard_b64encode(v)
s = s.decode()
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "|S10",
"fill_value": "%s",
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % (s, ZARR_FORMAT)
# test encoding
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
actual = meta_dec['fill_value']
expect = np.array(v, dtype=dtype)[()]
assert expect == actual
def test_decode_array_unsupported_format():
# unsupported format
meta_json = '''{
"zarr_format": %s,
"shape": [100],
"chunks": [10],
"dtype": "<f8",
"compressor": {"id": "zlib", "level": 1},
"fill_value": null,
"order": "C"
}''' % (ZARR_FORMAT - 1)
with pytest.raises(MetadataError):
decode_array_metadata(meta_json)
def test_decode_array_missing_fields():
# missing fields
meta_json = '''{
"zarr_format": %s
}''' % ZARR_FORMAT
with pytest.raises(MetadataError):
decode_array_metadata(meta_json)
def test_encode_decode_dtype():
for dt in ['f8', [('a', 'f8')], [('a', 'f8'), ('b', 'i1')]]:
e = encode_dtype(np.dtype(dt))
s = json.dumps(e) # check JSON serializable
o = json.loads(s)
d = decode_dtype(o)
assert np.dtype(dt) == d
def test_decode_group():
# typical
b = '''{
"zarr_format": %s
}''' % ZARR_FORMAT
meta = decode_group_metadata(b)
assert ZARR_FORMAT == meta['zarr_format']
# unsupported format
b = '''{
"zarr_format": %s
}''' % (ZARR_FORMAT - 1)
with pytest.raises(MetadataError):
decode_group_metadata(b)
| [
"zarr.meta.decode_group_metadata",
"json.loads",
"zarr.meta.decode_array_metadata",
"zarr.meta.decode_dtype",
"json.dumps",
"zarr.codecs.Blosc",
"base64.standard_b64encode",
"numpy.array",
"numpy.zeros",
"pytest.raises",
"zarr.meta.encode_array_metadata",
"zarr.codecs.Delta",
"numpy.dtype",
... | [((443, 461), 'json.loads', 'json.loads', (['expect'], {}), '(expect)\n', (453, 461), False, 'import json\n'), ((471, 489), 'json.loads', 'json.loads', (['actual'], {}), '(actual)\n', (481, 489), False, 'import json\n'), ((1055, 1082), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (1076, 1082), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((1162, 1193), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (1183, 1193), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((1635, 1667), 'zarr.codecs.Delta', 'Delta', ([], {'astype': '"""<u2"""', 'dtype': '"""V14"""'}), "(astype='<u2', dtype='V14')\n", (1640, 1667), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((1685, 1724), 'zarr.codecs.Blosc', 'Blosc', ([], {'cname': '"""lz4"""', 'clevel': '(3)', 'shuffle': '(2)'}), "(cname='lz4', clevel=3, shuffle=2)\n", (1690, 1724), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((1737, 1775), 'numpy.dtype', 'np.dtype', (["[('a', '<i4'), ('b', 'S10')]"], {}), "([('a', '<i4'), ('b', 'S10')])\n", (1745, 1775), True, 'import numpy as np\n'), ((2588, 2615), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (2609, 2615), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((2695, 2726), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (2716, 2726), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((7263, 7290), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (7284, 7290), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((7370, 7401), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (7391, 7401), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((8614, 8641), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (8635, 8641), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((8721, 8752), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (8742, 8752), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((10421, 10436), 'numpy.dtype', 'np.dtype', (['"""S10"""'], {}), "('S10')\n", (10429, 10436), True, 'import numpy as np\n'), ((12502, 12526), 'zarr.meta.decode_group_metadata', 'decode_group_metadata', (['b'], {}), '(b)\n', (12523, 12526), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((1793, 1818), 'numpy.zeros', 'np.zeros', (['()'], {'dtype': 'dtype'}), '((), dtype=dtype)\n', (1801, 1818), True, 'import numpy as np\n'), ((3233, 3272), 'zarr.codecs.Blosc', 'Blosc', ([], {'cname': '"""lz4"""', 'clevel': '(3)', 'shuffle': '(2)'}), "(cname='lz4', clevel=3, shuffle=2)\n", (3238, 3272), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((3289, 3300), 'numpy.dtype', 'np.dtype', (['k'], {}), '(k)\n', (3297, 3300), True, 'import numpy as np\n'), ((4130, 4157), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (4151, 4157), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((4249, 4280), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (4270, 4280), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((5026, 5065), 'zarr.codecs.Blosc', 'Blosc', ([], {'cname': '"""lz4"""', 'clevel': '(3)', 'shuffle': '(2)'}), "(cname='lz4', clevel=3, shuffle=2)\n", (5031, 5065), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((5082, 5093), 'numpy.dtype', 'np.dtype', (['k'], {}), '(k)\n', (5090, 5093), True, 'import numpy as np\n'), ((5926, 5953), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (5947, 5953), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((6045, 6076), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (6066, 6076), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((10146, 10173), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (10167, 10173), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((10265, 10296), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (10286, 10296), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((10771, 10798), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (10792, 10798), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((10863, 10891), 'base64.standard_b64encode', 'base64.standard_b64encode', (['v'], {}), '(v)\n', (10888, 10891), False, 'import base64\n'), ((11344, 11375), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_enc'], {}), '(meta_enc)\n', (11365, 11375), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((11823, 11851), 'pytest.raises', 'pytest.raises', (['MetadataError'], {}), '(MetadataError)\n', (11836, 11851), False, 'import pytest\n'), ((11861, 11893), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_json'], {}), '(meta_json)\n', (11882, 11893), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((12037, 12065), 'pytest.raises', 'pytest.raises', (['MetadataError'], {}), '(MetadataError)\n', (12050, 12065), False, 'import pytest\n'), ((12075, 12107), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_json'], {}), '(meta_json)\n', (12096, 12107), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((12259, 12272), 'json.dumps', 'json.dumps', (['e'], {}), '(e)\n', (12269, 12272), False, 'import json\n'), ((12312, 12325), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (12322, 12325), False, 'import json\n'), ((12338, 12353), 'zarr.meta.decode_dtype', 'decode_dtype', (['o'], {}), '(o)\n', (12350, 12353), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((12676, 12704), 'pytest.raises', 'pytest.raises', (['MetadataError'], {}), '(MetadataError)\n', (12689, 12704), False, 'import pytest\n'), ((12714, 12738), 'zarr.meta.decode_group_metadata', 'decode_group_metadata', (['b'], {}), '(b)\n', (12735, 12738), False, 'from zarr.meta import ZARR_FORMAT, decode_array_metadata, decode_dtype, decode_group_metadata, encode_array_metadata, encode_dtype\n'), ((622, 637), 'numpy.dtype', 'np.dtype', (['"""<f8"""'], {}), "('<f8')\n", (630, 637), True, 'import numpy as np\n'), ((6814, 6837), 'numpy.dtype', 'np.dtype', (['"""(10, 10)<f8"""'], {}), "('(10, 10)<f8')\n", (6822, 6837), True, 'import numpy as np\n'), ((8090, 8133), 'numpy.dtype', 'np.dtype', (['"""<i8, (10, 10)<f8, (5, 10, 15)u1"""'], {}), "('<i8, (10, 10)<f8, (5, 10, 15)u1')\n", (8098, 8133), True, 'import numpy as np\n'), ((11433, 11457), 'numpy.array', 'np.array', (['v'], {'dtype': 'dtype'}), '(v, dtype=dtype)\n', (11441, 11457), True, 'import numpy as np\n'), ((12233, 12245), 'numpy.dtype', 'np.dtype', (['dt'], {}), '(dt)\n', (12241, 12245), True, 'import numpy as np\n'), ((12369, 12381), 'numpy.dtype', 'np.dtype', (['dt'], {}), '(dt)\n', (12377, 12381), True, 'import numpy as np\n'), ((9643, 9658), 'numpy.dtype', 'np.dtype', (['"""<f8"""'], {}), "('<f8')\n", (9651, 9658), True, 'import numpy as np\n'), ((658, 665), 'zarr.codecs.Zlib', 'Zlib', (['(1)'], {}), '(1)\n', (662, 665), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((6858, 6865), 'zarr.codecs.Zlib', 'Zlib', (['(1)'], {}), '(1)\n', (6862, 6865), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((8154, 8161), 'zarr.codecs.Zlib', 'Zlib', (['(1)'], {}), '(1)\n', (8158, 8161), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((9683, 9690), 'zarr.codecs.Zlib', 'Zlib', (['(1)'], {}), '(1)\n', (9687, 9690), False, 'from zarr.codecs import Blosc, Delta, Zlib\n'), ((10646, 10653), 'zarr.codecs.Zlib', 'Zlib', (['(1)'], {}), '(1)\n', (10650, 10653), False, 'from zarr.codecs import Blosc, Delta, Zlib\n')] |
import numpy as np
import datacube
from datetime import datetime
dc = datacube.Datacube(app = 'my_app', config = '/home/localuser/.datacube.conf')
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi(config = '/home/localuser/.datacube.conf')
# <hr>
#
# ## <a id="plat_prod">Select the Product and Platform</a> [▴](#top)
# In[2]:
# Change the data platform and data cube here
platform = "LANDSAT_7"
# platform = "LANDSAT_8"
# product = "ls7_ledaps_ghana"
product = "ls7_ledaps_general"
# product = "ls7_ledaps_senegal"
# product = "ls7_ledaps_sierra_leone"
# product = "ls7_ledaps_tanzania"
# product = "ls7_ledaps_vietnam"
# Get Extents
extents = api.get_full_dataset_extent(platform = platform, product = product)
# <hr>
#
# ## <a id="extents">Determine the Extents of the Data</a> [▴](#top)
# In[3]:
dt = datetime.utcnow()
dt64 = np.datetime64(dt)
ts1 = (dt64 - np.datetime64(min(extents['time'].values))) / np.timedelta64(1, 's')
datetime.utcfromtimestamp(ts1)
ts2 = (dt64 - np.datetime64(max(extents['time'].values))) / np.timedelta64(1, 's')
datetime.utcfromtimestamp(ts2)
latitude_extents = (min(extents['latitude'].values),max(extents['latitude'].values))
longitude_extents = (min(extents['longitude'].values),max(extents['longitude'].values))
time_extents = (ts1, ts2)
# <hr>
#
# ## <a id="define_extents">Define the Region to Be Examined</a> [▴](#top)
# In[4]:
from utils.data_cube_utilities.dc_display_map import display_map
display_map(latitude_extents, longitude_extents)
dataset = dc.load(platform = platform,
product = product,
latitude = latitude_extents,
longitude = longitude_extents,
time = time_extents)
print(dc.list_products()) | [
"datetime.datetime.utcfromtimestamp",
"utils.data_cube_utilities.dc_display_map.display_map",
"datetime.datetime.utcnow",
"datacube.Datacube",
"utils.data_cube_utilities.data_access_api.DataAccessApi",
"numpy.datetime64",
"numpy.timedelta64"
] | [((71, 143), 'datacube.Datacube', 'datacube.Datacube', ([], {'app': '"""my_app"""', 'config': '"""/home/localuser/.datacube.conf"""'}), "(app='my_app', config='/home/localuser/.datacube.conf')\n", (88, 143), False, 'import datacube\n'), ((216, 277), 'utils.data_cube_utilities.data_access_api.DataAccessApi', 'dc_api.DataAccessApi', ([], {'config': '"""/home/localuser/.datacube.conf"""'}), "(config='/home/localuser/.datacube.conf')\n", (236, 277), True, 'import utils.data_cube_utilities.data_access_api as dc_api\n'), ((877, 894), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (892, 894), False, 'from datetime import datetime\n'), ((902, 919), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (915, 919), True, 'import numpy as np\n'), ((1004, 1034), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['ts1'], {}), '(ts1)\n', (1029, 1034), False, 'from datetime import datetime\n'), ((1118, 1148), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['ts2'], {}), '(ts2)\n', (1143, 1148), False, 'from datetime import datetime\n'), ((1522, 1570), 'utils.data_cube_utilities.dc_display_map.display_map', 'display_map', (['latitude_extents', 'longitude_extents'], {}), '(latitude_extents, longitude_extents)\n', (1533, 1570), False, 'from utils.data_cube_utilities.dc_display_map import display_map\n'), ((981, 1003), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (995, 1003), True, 'import numpy as np\n'), ((1095, 1117), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (1109, 1117), True, 'import numpy as np\n')] |
""" Function to create the Rocket
The rocket_builder.py file contains the build function to
build the Rocket. Moreover, it contains the preprocess
and postprocess functions that will be added to the model.
"""
from __future__ import division
import json
import types
import os
import cv2
import numpy as np
import PIL
from PIL import ImageDraw
import torch
import torch.nn as nn
from . import utils
from . import model as lightpose
def build(config_path: str = '') -> nn.Module:
"""Builds a pytorch compatible deep learning model
The model can be used as any other pytorch model. Additional methods
for `preprocessing`, `postprocessing` have been added to ease the handling of the model
and simplify interchangeability of different models.
"""
# Load Config file
if not config_path: # If no config path then load default one
config_path = os.path.join(os.path.realpath(
os.path.dirname(__file__)), "config.json")
with open(config_path, 'r') as json_file:
config = json.load(json_file)
# Load Classes
classes_path = os.path.join(os.path.realpath(
os.path.dirname(__file__)), config['classes_path'])
with open(classes_path, 'r') as json_file:
classes = json.load(json_file)
# Set up model
model = lightpose.PoseEstimationWithMobileNet()
weights_path = os.path.join(os.path.realpath(
os.path.dirname(__file__)), config['weights_path'])
checkpoint = torch.load(weights_path, map_location='cpu')
utils.load_state(model, checkpoint)
model.postprocess = types.MethodType(postprocess, model)
model.preprocess = types.MethodType(preprocess, model)
setattr(model, 'config', config)
setattr(model, 'classes', classes)
return model
def preprocess(self, img: PIL.Image.Image) -> torch.Tensor:
"""Converts PIL Image or Array into pytorch tensor specific to this model
Handles all the necessary steps for preprocessing such as resizing, normalization.
Works with both single images and list/batch of images. Input image file is expected
to be a `PIL.Image` object with 3 color channels.
Labels must have the following format: `x1, y1, x2, y2, category_id`
Args:
img (PIL.Image): input image
labels (list): list of bounding boxes and class labels
"""
# Test if the input is a PIL image
if not isinstance(img, PIL.Image.Image):
raise TypeError(
'wrong input type: got {} but expected PIL.Image.Image.'.format(type(img)))
# Load the parameters from config.json
stride = self.config['stride']
pad_value = tuple(self.config['pad_color_RGB'])
img_mean = tuple(self.config['mean_RGB'])
img_scale = 1.0 / 255 # Typo in the initial repo was 1/256
net_input_height_size = self.config['input_size'][0]
# Conver the PIL image to a numpy array
np_img = np.array(img)
# Converting the image from RGB to BGR
img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
# Scale the input image
height, _, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale,
fy=scale, interpolation=cv2.INTER_CUBIC)
# Normalize the input image
normalized_img = utils.normalize(
scaled_img,
img_mean,
img_scale
)
# Pad the input image
min_dims = [net_input_height_size, max(
normalized_img.shape[1], net_input_height_size)]
padded_img, _ = utils.pad_width(
normalized_img,
stride,
pad_value,
min_dims
)
# Convert numpy to tensor + final modification
out_tensor = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
return out_tensor
def postprocess(self, pose_output: torch.Tensor, input_img: PIL.Image, visualize: bool = False):
"""Converts pytorch tensor into interpretable format
Handles all the steps for postprocessing of the raw output of the model.
Depending on the rocket family there might be additional options.
This model supports either outputting a list of bounding boxes of the format
(x0, y0, w, h) or outputting a `PIL.Image` with the bounding boxes
and (class name, class confidence, object confidence) indicated.
Args:
detections (Tensor): Output Tensor to postprocess
input_img (PIL.Image): Original input image which has not been preprocessed yet
visualize (bool): If True outputs image with annotations else a list of bounding boxes
"""
# Test if the input is a PIL image
if not isinstance(input_img, PIL.Image.Image):
raise TypeError(
'wrong input type: got {} but expected PIL.Image.Image.'.format(type(input_img)))
# Load parameters from config.json
stride = self.config['stride']
upsample_ratio = self.config['upsample_ratio']
pad_value = tuple(self.config['pad_color_RGB'])
img_mean = tuple(self.config['mean_RGB'])
img_scale = 1.0 / 255
net_input_height_size = self.config['input_size'][0]
# Convert PIL image to numpy array
np_img = np.array(input_img)
img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
height, _, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale,
fy=scale, interpolation=cv2.INTER_CUBIC)
scaled_img = utils.normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(
scaled_img.shape[1], net_input_height_size)]
_, pad = utils.pad_width(scaled_img, stride, pad_value, min_dims)
# Extract the keypoint heatmaps (heatmaps)
stage2_heatmaps = pose_output[-2]
heatmaps = np.transpose(
stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio,
fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
# Extract the Part Affinity Fields (pafs)
stage2_pafs = pose_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio,
fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
# Extract the keypoints
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18): # 19th for bg
total_keypoints_num += utils.extract_keypoints(
heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
# Group the keypoints
pose_entries, all_keypoints = utils.group_keypoints(
all_keypoints_by_type, pafs, demo=True)
# Convert the position of the keypoints to the original image
for kpt in all_keypoints:
kpt[0] = (kpt[0] * stride / upsample_ratio - pad[1]) / scale
kpt[1] = (kpt[1] * stride / upsample_ratio - pad[0]) / scale
# Convert the list of keypoints to a list of dictionary:
# [
# {"name_kpt":
# {"x": x_pos, "y": y_pos, "confidence": confidence_score},
# ...},
# ...]
list_humans_poses = []
for human in pose_entries:
human_pose = {}
for kpt_id, kpt_location in enumerate(human[:-2]):
if not kpt_location == -1:
kpt_info = all_keypoints[int(kpt_location)]
kpt_name = self.classes[str(int(kpt_id))]
x_pos = kpt_info[0]
y_pos = kpt_info[1]
confidence_score = kpt_info[2]
human_pose[kpt_name] = {
'x': x_pos,
'y': y_pos,
'confidence': confidence_score
}
list_humans_poses.append(human_pose)
if visualize:
# Visualization parameters
line_width = 2
line_color = (0, 225, 225, 255)
point_radius = 4
point_color = (255, 255, 255, 255)
# Initialize the context to draw on the image
img_out = input_img.copy()
ctx = ImageDraw.Draw(img_out, 'RGBA')
# Draw the skeleton of each human in the picture
for human in list_humans_poses:
# Draw every connection defined in classes.json
for connection in self.classes['connections']:
# Test if both keypoints have been found
human_has_kpt_a = connection[0] in human.keys()
human_has_kpt_b = connection[1] in human.keys()
if human_has_kpt_a and human_has_kpt_b:
# Get the coordinates of the two keypoints
kpt_a_x = int(round(human[connection[0]]['x']))
kpt_a_y = int(round(human[connection[0]]['y']))
kpt_b_x = int(round(human[connection[1]]['x']))
kpt_b_y = int(round(human[connection[1]]['y']))
# Draw the line between the two keypoints
ctx.line(
[(kpt_a_x, kpt_a_y), (kpt_b_x, kpt_b_y)],
fill=line_color,
width=line_width,
joint=None)
# Draw Keypoints
for _, item in human.items():
# Create bounding box of the point
top_left = (
int(round(item['x'] - point_radius)),
int(round(item['y'] - point_radius))
)
bottom_right = (
int(round(item['x'] + point_radius)),
int(round(item['y'] + point_radius))
)
# Draw the point at the keypoint position
ctx.ellipse(
[top_left, bottom_right],
fill=point_color,
outline=None,
width=0
)
del ctx
return img_out
return list_humans_poses
| [
"torch.load",
"torch.from_numpy",
"numpy.array",
"PIL.ImageDraw.Draw",
"os.path.dirname",
"cv2.cvtColor",
"json.load",
"cv2.resize",
"types.MethodType"
] | [((1468, 1512), 'torch.load', 'torch.load', (['weights_path'], {'map_location': '"""cpu"""'}), "(weights_path, map_location='cpu')\n", (1478, 1512), False, 'import torch\n'), ((1578, 1614), 'types.MethodType', 'types.MethodType', (['postprocess', 'model'], {}), '(postprocess, model)\n', (1594, 1614), False, 'import types\n'), ((1638, 1673), 'types.MethodType', 'types.MethodType', (['preprocess', 'model'], {}), '(preprocess, model)\n', (1654, 1673), False, 'import types\n'), ((2883, 2896), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2891, 2896), True, 'import numpy as np\n'), ((2951, 2990), 'cv2.cvtColor', 'cv2.cvtColor', (['np_img', 'cv2.COLOR_RGB2BGR'], {}), '(np_img, cv2.COLOR_RGB2BGR)\n', (2963, 2990), False, 'import cv2\n'), ((3109, 3183), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': 'scale', 'fy': 'scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n', (3119, 3183), False, 'import cv2\n'), ((5106, 5125), 'numpy.array', 'np.array', (['input_img'], {}), '(input_img)\n', (5114, 5125), True, 'import numpy as np\n'), ((5136, 5175), 'cv2.cvtColor', 'cv2.cvtColor', (['np_img', 'cv2.COLOR_RGB2BGR'], {}), '(np_img, cv2.COLOR_RGB2BGR)\n', (5148, 5175), False, 'import cv2\n'), ((5266, 5340), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': 'scale', 'fy': 'scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n', (5276, 5340), False, 'import cv2\n'), ((5797, 5898), 'cv2.resize', 'cv2.resize', (['heatmaps', '(0, 0)'], {'fx': 'upsample_ratio', 'fy': 'upsample_ratio', 'interpolation': 'cv2.INTER_CUBIC'}), '(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio,\n interpolation=cv2.INTER_CUBIC)\n', (5807, 5898), False, 'import cv2\n'), ((6090, 6187), 'cv2.resize', 'cv2.resize', (['pafs', '(0, 0)'], {'fx': 'upsample_ratio', 'fy': 'upsample_ratio', 'interpolation': 'cv2.INTER_CUBIC'}), '(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio,\n interpolation=cv2.INTER_CUBIC)\n', (6100, 6187), False, 'import cv2\n'), ((1032, 1052), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1041, 1052), False, 'import json\n'), ((1248, 1268), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1257, 1268), False, 'import json\n'), ((7955, 7986), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_out', '"""RGBA"""'], {}), "(img_out, 'RGBA')\n", (7969, 7986), False, 'from PIL import ImageDraw\n'), ((1131, 1156), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1146, 1156), False, 'import os\n'), ((1399, 1424), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1414, 1424), False, 'import os\n'), ((925, 950), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (940, 950), False, 'import os\n'), ((3662, 3690), 'torch.from_numpy', 'torch.from_numpy', (['padded_img'], {}), '(padded_img)\n', (3678, 3690), False, 'import torch\n')] |
import pytest
import numpy as np
from ctapipe.image.geometry_converter import (
convert_geometry_hex1d_to_rect2d,
convert_geometry_rect2d_back_to_hexe1d,
astri_to_2d_array,
array_2d_to_astri,
chec_to_2d_array,
array_2d_to_chec,
)
from ctapipe.image.hillas import hillas_parameters
from ctapipe.instrument import CameraDescription, CameraGeometry
from ctapipe.image.toymodel import Gaussian
import astropy.units as u
camera_names = CameraDescription.get_known_camera_names()
def create_mock_image(geom):
"""
creates a mock image, which parameters are adapted to the camera size
"""
camera_r = np.max(np.sqrt(geom.pix_x ** 2 + geom.pix_y ** 2))
model = Gaussian(
x=0.3 * camera_r,
y=0 * u.m,
width=0.03 * camera_r,
length=0.10 * camera_r,
psi="25d",
)
_, image, _ = model.generate_image(
geom, intensity=0.5 * geom.n_pixels, nsb_level_pe=3,
)
return image
@pytest.mark.parametrize("rot", [3,])
@pytest.mark.parametrize("camera_name", camera_names)
def test_convert_geometry(camera_name, rot):
geom = CameraGeometry.from_name(camera_name)
image = create_mock_image(geom)
hillas_0 = hillas_parameters(geom, image)
if geom.pix_type == "hexagonal":
convert_geometry_1d_to_2d = convert_geometry_hex1d_to_rect2d
convert_geometry_back = convert_geometry_rect2d_back_to_hexe1d
geom2d, image2d = convert_geometry_1d_to_2d(
geom, image, geom.camera_name + str(rot), add_rot=rot
)
geom1d, image1d = convert_geometry_back(
geom2d, image2d, geom.camera_name + str(rot), add_rot=rot
)
else:
if geom.camera_name == "ASTRICam":
convert_geometry_1d_to_2d = astri_to_2d_array
convert_geometry_back = array_2d_to_astri
elif geom.camera_name == "CHEC":
convert_geometry_1d_to_2d = chec_to_2d_array
convert_geometry_back = array_2d_to_chec
else:
print("camera {geom.camera_name} not implemented")
return
image2d = convert_geometry_1d_to_2d(image)
image1d = convert_geometry_back(image2d)
hillas_1 = hillas_parameters(geom, image1d)
# if __name__ == "__main__":
# plot_cam(geom, geom2d, geom1d, image, image2d, image1d)
# plt.tight_layout()
# plt.pause(.1)
assert np.abs(hillas_1.phi - hillas_0.phi).deg < 1.0
# TODO: test other parameters
@pytest.mark.parametrize("rot", [3,])
@pytest.mark.parametrize("camera_name", camera_names)
def test_convert_geometry_mock(camera_name, rot):
"""here we use a different key for the back conversion to trigger the mock conversion
"""
geom = CameraGeometry.from_name(camera_name)
image = create_mock_image(geom)
hillas_0 = hillas_parameters(geom, image)
if geom.pix_type == "hexagonal":
convert_geometry_1d_to_2d = convert_geometry_hex1d_to_rect2d
convert_geometry_back = convert_geometry_rect2d_back_to_hexe1d
geom2d, image2d = convert_geometry_1d_to_2d(geom, image, key=None, add_rot=rot)
geom1d, image1d = convert_geometry_back(
geom2d, image2d, "_".join([geom.camera_name, str(rot), "mock"]), add_rot=rot
)
else:
# originally rectangular geometries don't need a buffer and therefore no mock
# conversion
return
hillas_1 = hillas_parameters(geom, image1d)
assert np.abs(hillas_1.phi - hillas_0.phi).deg < 1.0
# def plot_cam(geom, geom2d, geom1d, image, image2d, image1d):
# # plt.viridis()
# plt.figure(figsize=(12, 4))
# ax = plt.subplot(1, 3, 1)
# CameraDisplay(geom, image=image).add_colorbar()
# plt.subplot(1, 3, 2, sharex=ax, sharey=ax)
# CameraDisplay(geom2d, image=image2d).add_colorbar()
# plt.subplot(1, 3, 3, sharex=ax, sharey=ax)
# CameraDisplay(geom1d, image=image1d).add_colorbar()
#
#
# if __name__ == "__main__":
# import logging
# logging.basicConfig(level=logging.DEBUG)
# for camera_name in CameraGeometry.get_known_camera_names():
# test_convert_geometry(camera_name, 3)
# plt.show()
| [
"numpy.abs",
"ctapipe.instrument.CameraDescription.get_known_camera_names",
"ctapipe.instrument.CameraGeometry.from_name",
"numpy.sqrt",
"pytest.mark.parametrize",
"ctapipe.image.toymodel.Gaussian",
"ctapipe.image.hillas.hillas_parameters"
] | [((457, 499), 'ctapipe.instrument.CameraDescription.get_known_camera_names', 'CameraDescription.get_known_camera_names', ([], {}), '()\n', (497, 499), False, 'from ctapipe.instrument import CameraDescription, CameraGeometry\n'), ((971, 1006), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rot"""', '[3]'], {}), "('rot', [3])\n", (994, 1006), False, 'import pytest\n'), ((1009, 1061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""camera_name"""', 'camera_names'], {}), "('camera_name', camera_names)\n", (1032, 1061), False, 'import pytest\n'), ((2487, 2522), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rot"""', '[3]'], {}), "('rot', [3])\n", (2510, 2522), False, 'import pytest\n'), ((2525, 2577), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""camera_name"""', 'camera_names'], {}), "('camera_name', camera_names)\n", (2548, 2577), False, 'import pytest\n'), ((700, 798), 'ctapipe.image.toymodel.Gaussian', 'Gaussian', ([], {'x': '(0.3 * camera_r)', 'y': '(0 * u.m)', 'width': '(0.03 * camera_r)', 'length': '(0.1 * camera_r)', 'psi': '"""25d"""'}), "(x=0.3 * camera_r, y=0 * u.m, width=0.03 * camera_r, length=0.1 *\n camera_r, psi='25d')\n", (708, 798), False, 'from ctapipe.image.toymodel import Gaussian\n'), ((1119, 1156), 'ctapipe.instrument.CameraGeometry.from_name', 'CameraGeometry.from_name', (['camera_name'], {}), '(camera_name)\n', (1143, 1156), False, 'from ctapipe.instrument import CameraDescription, CameraGeometry\n'), ((1208, 1238), 'ctapipe.image.hillas.hillas_parameters', 'hillas_parameters', (['geom', 'image'], {}), '(geom, image)\n', (1225, 1238), False, 'from ctapipe.image.hillas import hillas_parameters\n'), ((2206, 2238), 'ctapipe.image.hillas.hillas_parameters', 'hillas_parameters', (['geom', 'image1d'], {}), '(geom, image1d)\n', (2223, 2238), False, 'from ctapipe.image.hillas import hillas_parameters\n'), ((2738, 2775), 'ctapipe.instrument.CameraGeometry.from_name', 'CameraGeometry.from_name', (['camera_name'], {}), '(camera_name)\n', (2762, 2775), False, 'from ctapipe.instrument import CameraDescription, CameraGeometry\n'), ((2827, 2857), 'ctapipe.image.hillas.hillas_parameters', 'hillas_parameters', (['geom', 'image'], {}), '(geom, image)\n', (2844, 2857), False, 'from ctapipe.image.hillas import hillas_parameters\n'), ((3421, 3453), 'ctapipe.image.hillas.hillas_parameters', 'hillas_parameters', (['geom', 'image1d'], {}), '(geom, image1d)\n', (3438, 3453), False, 'from ctapipe.image.hillas import hillas_parameters\n'), ((644, 686), 'numpy.sqrt', 'np.sqrt', (['(geom.pix_x ** 2 + geom.pix_y ** 2)'], {}), '(geom.pix_x ** 2 + geom.pix_y ** 2)\n', (651, 686), True, 'import numpy as np\n'), ((2404, 2439), 'numpy.abs', 'np.abs', (['(hillas_1.phi - hillas_0.phi)'], {}), '(hillas_1.phi - hillas_0.phi)\n', (2410, 2439), True, 'import numpy as np\n'), ((3465, 3500), 'numpy.abs', 'np.abs', (['(hillas_1.phi - hillas_0.phi)'], {}), '(hillas_1.phi - hillas_0.phi)\n', (3471, 3500), True, 'import numpy as np\n')] |
import argparse
import pathlib
import sys
import gdcm
import imageio
import nibabel as nib
import numpy as np
import pydicom
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-i",
"--input",
type=pathlib.Path,
metavar="folder",
help="Dicom input folder",
dest="input_folder",
required=True,
)
parser.add_argument(
"-o",
"--output",
default="nii",
type=pathlib.Path,
metavar="folder",
help="output folder",
dest="output_folder",
required=True,
)
ignore = ["ID00052637202186188008618"]
def get_gdcm_to_numpy_typemap():
"""Returns the GDCM Pixel Format to numpy array type mapping."""
_gdcm_np = {
gdcm.PixelFormat.UINT8: np.uint8,
gdcm.PixelFormat.INT8: np.int8,
# gdcm.PixelFormat.UINT12 :np.uint12,
# gdcm.PixelFormat.INT12 :np.int12,
gdcm.PixelFormat.UINT16: np.uint16,
gdcm.PixelFormat.INT16: np.int16,
gdcm.PixelFormat.UINT32: np.uint32,
gdcm.PixelFormat.INT32: np.int32,
# gdcm.PixelFormat.FLOAT16:np.float16,
gdcm.PixelFormat.FLOAT32: np.float32,
gdcm.PixelFormat.FLOAT64: np.float64,
}
return _gdcm_np
def get_numpy_array_type(gdcm_pixel_format):
"""Returns a numpy array typecode given a GDCM Pixel Format."""
return get_gdcm_to_numpy_typemap()[gdcm_pixel_format]
# Based on http://gdcm.sourceforge.net/html/ConvertNumpy_8py-example.html
def gdcm_to_numpy(filename, apply_intercep_scale=False):
reader = gdcm.ImageReader()
reader.SetFileName(filename)
if not reader.Read():
raise Exception(f"It was not possible to read {filename}")
image = reader.GetImage()
pf = image.GetPixelFormat()
if image.GetNumberOfDimensions() == 3:
shape = (
image.GetDimension(2),
image.GetDimension(1),
image.GetDimension(0),
pf.GetSamplesPerPixel(),
)
else:
shape = image.GetDimension(1), image.GetDimension(0), pf.GetSamplesPerPixel()
dtype = get_numpy_array_type(pf.GetScalarType())
gdcm_array = image.GetBuffer()
np_array = np.frombuffer(
gdcm_array.encode("utf-8", errors="surrogateescape"), dtype=dtype
)
np_array.shape = shape
np_array = np_array.squeeze()
if apply_intercep_scale:
shift = image.GetIntercept()
scale = image.GetSlope()
output = np.empty_like(np_array, np.int16)
output[:] = scale * np_array + shift
return output
else:
return np_array
def read_dicom_to_ndarray(folder: pathlib.Path) -> np.ndarray:
print(folder)
dicom_arrs = []
dicom_files = list([str(i) for i in folder.iterdir()])
sorter = gdcm.IPPSorter()
sorter.Sort(dicom_files)
sorted_dicom_files = sorter.GetFilenames()
if len(sorted_dicom_files) != len(dicom_files):
sorted_dicom_files = dicom_files
for dcm_file in sorted_dicom_files:
dicom_arrs.append(gdcm_to_numpy(dcm_file))
return np.array(dicom_arrs)
def save_to_nii(data: np.ndarray, filename: str):
img = nib.Nifti1Image(data, np.eye(4))
nib.save(img, filename)
def create_screenshot(data: np.ndarray, filename: str):
mip_image = data.max(0)
imageio.imwrite(filename, mip_image)
def main():
args, _ = parser.parse_known_args()
input_folder = args.input_folder.absolute()
output_folder = args.output_folder.absolute()
for dicom_folder in input_folder.iterdir():
if dicom_folder.name not in ignore:
dcm_array = read_dicom_to_ndarray(dicom_folder)
nii_filename = output_folder.joinpath(dicom_folder.name, "image.nii.gz")
nii_filename.parent.mkdir(parents=True, exist_ok=True)
save_to_nii(dcm_array, str(nii_filename))
# create_screenshot(dcm_array, str(nii_filename).replace("nii.gz", "png"))
if __name__ == "__main__":
main()
| [
"numpy.eye",
"nibabel.save",
"argparse.ArgumentParser",
"imageio.imwrite",
"gdcm.IPPSorter",
"numpy.array",
"gdcm.ImageReader",
"numpy.empty_like"
] | [((136, 215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (159, 215), False, 'import argparse\n'), ((1558, 1576), 'gdcm.ImageReader', 'gdcm.ImageReader', ([], {}), '()\n', (1574, 1576), False, 'import gdcm\n'), ((2760, 2776), 'gdcm.IPPSorter', 'gdcm.IPPSorter', ([], {}), '()\n', (2774, 2776), False, 'import gdcm\n'), ((3048, 3068), 'numpy.array', 'np.array', (['dicom_arrs'], {}), '(dicom_arrs)\n', (3056, 3068), True, 'import numpy as np\n'), ((3168, 3191), 'nibabel.save', 'nib.save', (['img', 'filename'], {}), '(img, filename)\n', (3176, 3191), True, 'import nibabel as nib\n'), ((3282, 3318), 'imageio.imwrite', 'imageio.imwrite', (['filename', 'mip_image'], {}), '(filename, mip_image)\n', (3297, 3318), False, 'import imageio\n'), ((2450, 2483), 'numpy.empty_like', 'np.empty_like', (['np_array', 'np.int16'], {}), '(np_array, np.int16)\n', (2463, 2483), True, 'import numpy as np\n'), ((3153, 3162), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3159, 3162), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines classes for handling multi-task environments."""
import gym
import numpy as np
import tensorflow as tf
class TaskDistribution(object):
"""Defines a distribution over tasks.
Tasks can be parametrized by goals or other representations. The evaluate,
combine, and split methods may be called on either tensorflow Tensors or
Numpy arrays, so we must support both.
"""
@property
def task_space(self):
return self._task_space
def sample(self):
"""Samples a task."""
pass
def _assert_is_batched(self, *arrays):
"""Checks that all arrays are batched.
Args:
*arrays: any number of arrays.
"""
shape_list = []
for array in arrays:
if isinstance(array, tf.Tensor):
shape_list.append(array.shape.as_list())
else:
shape_list.append(np.shape(array))
# All arrays should have at least two dimensions.
assert all([len(shape) >= 2 for shape in shape_list])
# All arrays should have the same batch size.
assert len(set([shape[0] for shape in shape_list])) == 1
def _tf_call(self, fn, *inputs):
any_tf_inputs = any([isinstance(array, tf.Tensor) for array in inputs])
tf_inputs = [
tf.constant(array) if not isinstance(array, tf.Tensor) else array
for array in inputs
]
output = fn(*tf_inputs)
if not any_tf_inputs:
output = [array.numpy() for array in output]
return output
def evaluate(self, states, actions, tasks):
"""Evaluates states and actions under the provided tasks.
Args:
states: a batch of states.
actions: a batch of actions.
tasks: a batch of tasks
Returns:
rewards: a batch of rewards
dones: a batch of boolean done flags that are True when the episode
has terminated. Note that this is the logical negation of the discount.
"""
self._assert_is_batched(states, actions, tasks)
return self._tf_call(self._evaluate, states, actions, tasks)
def _evaluate(self, states, actions, tasks):
raise NotImplementedError
def combine(self, states, tasks):
"""Combines the states and task into a single representation.
Args:
states: a batch of states.
tasks: a batch of tasks.
Returns:
states_and_tasks: a batch of states concatenated with tasks
"""
self._assert_is_batched(states, tasks)
return self._tf_call(self._combine, states, tasks)
def _combine(self, states, tasks):
tasks = tf.cast(tasks, states.dtype)
return tf.concat([states, tasks], axis=-1)
def split(self, states_and_tasks):
"""Splits a concatenated state+task into a state and a task.
Args:
states_and_tasks: a batch of states concatenated with tasks.
Returns:
states: a batch of states.
tasks: a batch of tasks.
"""
self._assert_is_batched(states_and_tasks)
return self._tf_call(self._split, states_and_tasks)
def _split(self, states_and_tasks):
task_last_dim = self.task_space.low.shape[-1]
states = states_and_tasks[Ellipsis, :-task_last_dim]
tasks = states_and_tasks[Ellipsis, -task_last_dim:]
return states, tasks
def state_to_task(self, states):
"""Extracts the coordinates of the state that correspond to the task.
For example, if a manipulation task, this function might extract the current
position of the block. If this method is not overwritten, it defaults to
using the entire state.
Args:
states: the states to convert to tasks.
Returns:
tasks: the tasks extracted from the states.
"""
tasks = states
return tasks
@property
def tasks(self):
return None
class Dynamics(object):
"""Implements the task-agnostic dynamics.
The motivationg for decoupling the task distribution from the dynamics is
that we can define multiple task distributions for the same dynamics, and
we can pass the task distribution to the replay buffer to perform relabelling.
"""
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def reset(self):
"""Resets the dynamics.
Returns:
state - a state from the initial state distribution.
"""
pass
def step(self, action):
"""Executes the action in the dynamics.
Args:
action: an action to take.
Returns:
next_state: the state of the environment after taking the action.
"""
pass
class Environment(gym.Env):
"""An environment defined in terms of a dynamics and a task distribution.
Internally, this environment samples tasks from the task distribution and
concatenates the tasks to the observations, and computes the rewards.
While decoupling the dynamics from the task distribution is convenient for
off-policy relabelling, it is still helpful to have a Gym environment for
data collection and interfacing with the underlying MaxEnt RL algorithm.
"""
def __init__(self, dynamics, task_distribution, constant_task=None):
"""Initialize the environment.
Args:
dynamics: an instance of Dynamics, which defines the task transitions.
task_distribution: an instance of TaskDistribution, which defines the
rewards and termination conditions.
constant_task: specifies a fixed task to use for all episodes. Set to None
to use tasks sampled from the task distribution.
"""
self._t = 0
self._dynamics = dynamics
self._task_distribution = task_distribution
assert isinstance(dynamics.observation_space, gym.spaces.Box)
assert isinstance(task_distribution.task_space, gym.spaces.Box)
if constant_task is None:
self._hide_task = False
low = task_distribution.combine([dynamics.observation_space.low],
[task_distribution.task_space.low])[0]
high = task_distribution.combine([dynamics.observation_space.high],
[task_distribution.task_space.high])[0]
else:
self._hide_task = True
low = dynamics.observation_space.low
high = dynamics.observation_space.high
constant_task = np.array(constant_task, dtype=np.float32)
self._constant_task = constant_task
# Needed to get TF Agents to work.
high[Ellipsis] = np.max(high)
low[Ellipsis] = np.min(low)
self.observation_space = gym.spaces.Box(low=low, high=high)
self.action_space = dynamics.action_space
self._state = None
def set_constant_task(self, task):
if task is not None:
assert self._task_distribution.task_space.contains(task)
task = np.array(task, dtype=self._task_distribution.task_space.dtype)
self._constant_task = task
def reset(self):
"""Resets the environment.
Returns:
state_and_task: an observation, which contains the state and task ID.
"""
self._t = 0
self._state = self._dynamics.reset()
if self._constant_task is None:
self._task = self._task_distribution.sample()
else:
self._task = self._constant_task
if self._hide_task:
state_and_task = self._state
else:
state_and_task = self._task_distribution.combine([self._state],
[self._task])[0]
return state_and_task
def step(self, action):
self._t += 1
# print('Step:', self._t)
rewards, dones = self._task_distribution.evaluate([self._state], [action],
[self._task])
reward = rewards[0]
done = dones[0]
self._state = self._dynamics.step(action)
if self._hide_task:
state_and_task = self._state
else:
state_and_task = self._task_distribution.combine([self._state],
[self._task])[0]
return state_and_task, reward, done, {}
| [
"gym.spaces.Box",
"numpy.max",
"tensorflow.concat",
"numpy.array",
"tensorflow.constant",
"numpy.min",
"numpy.shape",
"tensorflow.cast"
] | [((3067, 3095), 'tensorflow.cast', 'tf.cast', (['tasks', 'states.dtype'], {}), '(tasks, states.dtype)\n', (3074, 3095), True, 'import tensorflow as tf\n'), ((3107, 3142), 'tensorflow.concat', 'tf.concat', (['[states, tasks]'], {'axis': '(-1)'}), '([states, tasks], axis=-1)\n', (3116, 3142), True, 'import tensorflow as tf\n'), ((6896, 6908), 'numpy.max', 'np.max', (['high'], {}), '(high)\n', (6902, 6908), True, 'import numpy as np\n'), ((6929, 6940), 'numpy.min', 'np.min', (['low'], {}), '(low)\n', (6935, 6940), True, 'import numpy as np\n'), ((6970, 7004), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (6984, 7004), False, 'import gym\n'), ((6753, 6794), 'numpy.array', 'np.array', (['constant_task'], {'dtype': 'np.float32'}), '(constant_task, dtype=np.float32)\n', (6761, 6794), True, 'import numpy as np\n'), ((7214, 7276), 'numpy.array', 'np.array', (['task'], {'dtype': 'self._task_distribution.task_space.dtype'}), '(task, dtype=self._task_distribution.task_space.dtype)\n', (7222, 7276), True, 'import numpy as np\n'), ((1807, 1825), 'tensorflow.constant', 'tf.constant', (['array'], {}), '(array)\n', (1818, 1825), True, 'import tensorflow as tf\n'), ((1429, 1444), 'numpy.shape', 'np.shape', (['array'], {}), '(array)\n', (1437, 1444), True, 'import numpy as np\n')] |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import argparse, glob, json
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.offsetbox import *
from matplotlib.patches import *
from PIL import Image
def load_results(path):
res = []
skip_count = 0
for cur_path in glob.glob(os.path.join(path, '*.json')):
with open(cur_path, 'r', encoding='utf8') as fop:
cur_res = json.load(fop)
# skip incomplete sessions
if None in cur_res:
skip_count += 1
continue
res.append(cur_res)
if skip_count > 0: print("[Warning] Skipped %d incomplete sessions." % skip_count)
return np.array(res)
def load_task(config):
truths, options = [], []
options = [task['options'] for task in config['tasks']]
truths = [task['options'][task['truth']] for task in config['tasks']]
truth_idcs = [task['truth'] for task in config['tasks']]
return np.array(truth_idcs), np.array(truths), np.array(options)
def get_choice_matrix(results, options):
# convert results into (task, choices) choice count matrix
mat_choices = np.zeros((results.shape[1], 3))
for res_idx in range(results.shape[0]):
for task_idx in range(results.shape[1]):
choice_idx = np.where(options[task_idx] == results[res_idx, task_idx])
mat_choices[task_idx, choice_idx] += 1
return mat_choices
def calc_accuracies(results, truths):
mat_truths = np.repeat(truths.reshape((1, truths.shape[0])), results.shape[0], axis=0)
matches = np.sum(results == mat_truths, axis=1)
accuracies = matches / truths.shape[0]
return accuracies
def calc_kappa(choices, num_choices=3):
'''Calculate Fleiss' Kappa (based on https://en.wikibooks.org/wiki/Algorithm_Implementation/Statistics/Fleiss'_kappa)'''
num_evals = np.sum(choices[0])
num_tasks = choices.shape[0]
p = [0.0] * num_choices
for j in range(num_choices):
p[j] = 0.0
for i in range(num_tasks):
p[j] += choices[i][j]
p[j] /= num_tasks * num_evals
P = [0.0] * num_tasks
for i in range(num_tasks):
P[i] = 0.0
for j in range(num_choices):
P[i] += choices[i][j] * choices[i][j]
P[i] = (P[i] - num_evals) / (num_evals * (num_evals - 1))
Pbar = sum(P) / num_tasks
PbarE = 0.0
for pj in p:
PbarE += pj * pj
kappa = (Pbar - PbarE) / (1 - PbarE)
return kappa
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='SynEval Result Aggregation')
arg_parser.add_argument('config_path', help='path evaluation configuration JSON')
arg_parser.add_argument('result_path', help='path result files')
arg_parser.add_argument('--plot', action='store_true', help='plot results')
arg_parser.add_argument('--data_path', help='path to data (required for plotting)')
args = arg_parser.parse_args()
# load config
with open(args.config_path, 'r', encoding='utf8') as fop:
config = json.load(fop)
truth_idcs, truths, options = load_task(config)
# load results
results = load_results(args.result_path)
print("Loaded %d evaluation sessions with %d tasks each." % (results.shape[0], truths.shape[0]))
# calculate accuracy
accuracies = calc_accuracies(results, truths)
print("Accuracy: %.2f avg, %.2f stddev, %.2f max, %.2f min" % (np.mean(accuracies), np.std(accuracies), np.max(accuracies), np.min(accuracies)))
print(" ", accuracies)
# calculate accuracy per class
choices = get_choice_matrix(results, options)
print("Accuracy per class:")
for class_idx, class_dsc in enumerate(config['classes']):
class_correct = choices[[ti for ti, t in enumerate(truth_idcs) if t == class_idx], class_idx]
class_accuracy = np.sum(class_correct) / np.sum(choices[:, class_idx])
print(" '%s': %.2f avg" % (class_dsc, class_accuracy))
# calculate interevaluator agreement
max_agreement = np.max(choices)
max_agreement_idcs = np.where(choices == max_agreement)[0]
kappa = calc_kappa(choices)
print("Fleiss' Kappa: %.2f (max agreement: %.2f%% (%d tasks))." % (kappa, (max_agreement * 100)/results.shape[0], max_agreement_idcs.size))
print(" ", max_agreement_idcs)
correct_choice_counts = [choices[task_idx, truth_idx] for task_idx, truth_idx in enumerate(truth_idcs)]
max_correct_choices = np.max(correct_choice_counts)
max_correct_choices_idcs = np.where(correct_choice_counts == max_correct_choices)[0]
print("Maximum correct choices: %.2f%% (%d tasks)" % ((max_correct_choices*100)/results.shape[0], max_correct_choices_idcs.size))
print(" ", max_correct_choices_idcs)
min_correct_choices = np.min(correct_choice_counts)
min_correct_choices_idcs = np.where(correct_choice_counts == min_correct_choices)[0]
print("Minimum correct choices: %.2f%% (%d tasks)" % ((min_correct_choices*100)/results.shape[0], min_correct_choices_idcs.size))
print(" ", min_correct_choices_idcs)
# plot accuracy
if args.plot:
plot_str = []
for ti, ccc in enumerate(correct_choice_counts):
plot_str.append('task %d: %d\\%%' % (ti + 1, (ccc * 100)/results.shape[0]))
print("Plot description:")
print('Percentages of correct choices: ' + ', '.join(plot_str) + '.\n')
fig, ax = plt.subplots(figsize=(12, 3))
width = 0.15
colors = ['dimgrey', 'darkgrey', 'lightgrey']
# colors = ['orchid', 'royalblue', 'coral']
x_pos = np.arange(20, dtype=float)
ax.set_xlim([-.5, 19.5])
ax.set_ylim([-100, 100])
for oi in range(3):
# get position
positions = x_pos
if oi == 0:
positions = x_pos - width
elif oi == 2:
positions = x_pos + width
# plot bars
option_choices = np.maximum(((np.array(choices[:, oi])/results.shape[0]) * 100), np.ones(choices.shape[0]))
option_bars = ax.bar(positions, option_choices, width, color=colors[oi])
# set colors for correct bars
for bi in [i for i in range(choices.shape[0]) if truth_idcs[i] == oi]:
option_bars[bi].set_color('limegreen')
# create rule lines
ax.hlines([25, 50, 75, 100], -1, 20, colors='lightgrey', linestyles='dashed', linewidths=1, zorder=0)
ax.vlines(np.arange(0.5, 20, 1), 0, 100, colors='darkgrey', linestyles='solid', linewidths=1, zorder=0)
ax.plot([-1, 20], [np.mean(accuracies)*100, np.mean(accuracies)*100], color='limegreen', linestyle='dashed', linewidth=1, zorder=0)
# ax.plot([-1, 20], [(np.mean(accuracies) - np.std(accuracies))*100, (np.mean(accuracies) - np.std(accuracies))*100], color='palegreen', linestyle='dashed', linewidth=1, zorder=0)
# ax.plot([-1, 20], [(np.mean(accuracies) + np.std(accuracies))*100, (np.mean(accuracies) + np.std(accuracies))*100], color='palegreen', linestyle='dashed', linewidth=1, zorder=0)
# create ticks
ax.get_xaxis().set_ticks(np.arange(20, dtype=int))
ax.set_xticklabels(np.arange(1, 21, 1, dtype=int))
ax.spines['bottom'].set_position('center')
ax.get_yaxis().set_ticks(np.arange(0, 101, 25, dtype=int))
ax.plot([-1, 20], [-100, -100], 'k-')
# add images
for ti, task in enumerate(options):
for oi, option in enumerate(task):
img = Image.open(os.path.join(args.data_path, '%d_orig.png' % option))
img = img.resize((32, 32))
y_pos = -32 - (oi * 25)
bboxprops = dict(lw=6., ec=colors[oi])
if option == truths[ti]:
bboxprops = dict(lw=6., ec='limegreen')
ab = AnnotationBbox(OffsetImage(img, zoom=.6, cmap='gray'), (ti, y_pos), pad=0., bboxprops=bboxprops)
ax.add_artist(ab)
# show plot
fig.tight_layout()
fig.savefig(os.path.join(args.result_path, 'results.pdf'))
plt.show()
| [
"numpy.mean",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.where",
"numpy.std",
"os.path.join",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"os.path.dirname",
"numpy.min",
"json.load",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((745, 758), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (753, 758), True, 'import numpy as np\n'), ((1210, 1241), 'numpy.zeros', 'np.zeros', (['(results.shape[1], 3)'], {}), '((results.shape[1], 3))\n', (1218, 1241), True, 'import numpy as np\n'), ((1645, 1682), 'numpy.sum', 'np.sum', (['(results == mat_truths)'], {'axis': '(1)'}), '(results == mat_truths, axis=1)\n', (1651, 1682), True, 'import numpy as np\n'), ((1936, 1954), 'numpy.sum', 'np.sum', (['choices[0]'], {}), '(choices[0])\n', (1942, 1954), True, 'import numpy as np\n'), ((2625, 2690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SynEval Result Aggregation"""'}), "(description='SynEval Result Aggregation')\n", (2648, 2690), False, 'import argparse, glob, json\n'), ((4156, 4171), 'numpy.max', 'np.max', (['choices'], {}), '(choices)\n', (4162, 4171), True, 'import numpy as np\n'), ((4588, 4617), 'numpy.max', 'np.max', (['correct_choice_counts'], {}), '(correct_choice_counts)\n', (4594, 4617), True, 'import numpy as np\n'), ((4912, 4941), 'numpy.min', 'np.min', (['correct_choice_counts'], {}), '(correct_choice_counts)\n', (4918, 4941), True, 'import numpy as np\n'), ((45, 70), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (60, 70), False, 'import sys, os\n'), ((352, 380), 'os.path.join', 'os.path.join', (['path', '"""*.json"""'], {}), "(path, '*.json')\n", (364, 380), False, 'import sys, os\n'), ((1025, 1045), 'numpy.array', 'np.array', (['truth_idcs'], {}), '(truth_idcs)\n', (1033, 1045), True, 'import numpy as np\n'), ((1047, 1063), 'numpy.array', 'np.array', (['truths'], {}), '(truths)\n', (1055, 1063), True, 'import numpy as np\n'), ((1065, 1082), 'numpy.array', 'np.array', (['options'], {}), '(options)\n', (1073, 1082), True, 'import numpy as np\n'), ((3156, 3170), 'json.load', 'json.load', (['fop'], {}), '(fop)\n', (3165, 3170), False, 'import argparse, glob, json\n'), ((4198, 4232), 'numpy.where', 'np.where', (['(choices == max_agreement)'], {}), '(choices == max_agreement)\n', (4206, 4232), True, 'import numpy as np\n'), ((4650, 4704), 'numpy.where', 'np.where', (['(correct_choice_counts == max_correct_choices)'], {}), '(correct_choice_counts == max_correct_choices)\n', (4658, 4704), True, 'import numpy as np\n'), ((4974, 5028), 'numpy.where', 'np.where', (['(correct_choice_counts == min_correct_choices)'], {}), '(correct_choice_counts == min_correct_choices)\n', (4982, 5028), True, 'import numpy as np\n'), ((5559, 5588), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (5571, 5588), True, 'import matplotlib.pyplot as plt\n'), ((5736, 5762), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'float'}), '(20, dtype=float)\n', (5745, 5762), True, 'import numpy as np\n'), ((8290, 8300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8298, 8300), True, 'import matplotlib.pyplot as plt\n'), ((465, 479), 'json.load', 'json.load', (['fop'], {}), '(fop)\n', (474, 479), False, 'import argparse, glob, json\n'), ((1363, 1420), 'numpy.where', 'np.where', (['(options[task_idx] == results[res_idx, task_idx])'], {}), '(options[task_idx] == results[res_idx, task_idx])\n', (1371, 1420), True, 'import numpy as np\n'), ((3972, 3993), 'numpy.sum', 'np.sum', (['class_correct'], {}), '(class_correct)\n', (3978, 3993), True, 'import numpy as np\n'), ((3996, 4025), 'numpy.sum', 'np.sum', (['choices[:, class_idx]'], {}), '(choices[:, class_idx])\n', (4002, 4025), True, 'import numpy as np\n'), ((6639, 6660), 'numpy.arange', 'np.arange', (['(0.5)', '(20)', '(1)'], {}), '(0.5, 20, 1)\n', (6648, 6660), True, 'import numpy as np\n'), ((7310, 7334), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'int'}), '(20, dtype=int)\n', (7319, 7334), True, 'import numpy as np\n'), ((7364, 7394), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(1)'], {'dtype': 'int'}), '(1, 21, 1, dtype=int)\n', (7373, 7394), True, 'import numpy as np\n'), ((7482, 7514), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(25)'], {'dtype': 'int'}), '(0, 101, 25, dtype=int)\n', (7491, 7514), True, 'import numpy as np\n'), ((8234, 8279), 'os.path.join', 'os.path.join', (['args.result_path', '"""results.pdf"""'], {}), "(args.result_path, 'results.pdf')\n", (8246, 8279), False, 'import sys, os\n'), ((3547, 3566), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (3554, 3566), True, 'import numpy as np\n'), ((3568, 3586), 'numpy.std', 'np.std', (['accuracies'], {}), '(accuracies)\n', (3574, 3586), True, 'import numpy as np\n'), ((3588, 3606), 'numpy.max', 'np.max', (['accuracies'], {}), '(accuracies)\n', (3594, 3606), True, 'import numpy as np\n'), ((3608, 3626), 'numpy.min', 'np.min', (['accuracies'], {}), '(accuracies)\n', (3614, 3626), True, 'import numpy as np\n'), ((6180, 6205), 'numpy.ones', 'np.ones', (['choices.shape[0]'], {}), '(choices.shape[0])\n', (6187, 6205), True, 'import numpy as np\n'), ((6761, 6780), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (6768, 6780), True, 'import numpy as np\n'), ((6786, 6805), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (6793, 6805), True, 'import numpy as np\n'), ((7712, 7764), 'os.path.join', 'os.path.join', (['args.data_path', "('%d_orig.png' % option)"], {}), "(args.data_path, '%d_orig.png' % option)\n", (7724, 7764), False, 'import sys, os\n'), ((6129, 6153), 'numpy.array', 'np.array', (['choices[:, oi]'], {}), '(choices[:, oi])\n', (6137, 6153), True, 'import numpy as np\n')] |
'''
Biblioteca para calculo de refrigeracao regenerativa em motores foguetes bi propelentes
<NAME>
https://github.com/jeffersonmsb/rocket-cooling-calculator
'''
import csv
import numpy as np
import math
import pyCEA
from scipy import optimize
import os
import subprocess
def geometry(data_in, data_out):
with open(data_in['geometry_path']) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
data_out['geometry'] = list(csv_reader)
data_out['size'] = len(data_out['geometry'])
#Buscar geometria da garganta
Rt = float(data_out['geometry'][0][1])
zt = float(data_out['geometry'][0][0])
for row in data_out['geometry']:
if float(row[1]) < Rt:
Rt = float(row[1])
zt = float(row[0])
data_out['Rt'] = Rt
data_out['zt'] = zt
data_out['At'] = np.pi*np.power(Rt,2)
#Cálculo das razões de área
data_out['r1'] = []
data_out['r2'] = []
data_out['r3'] = []
data_out['Ae'] = []
data_out['Ae/At'] = []
data_out['z'] = []
data_out['N'] = []
data_out['CCH'] = []
data_out['CCW'] = []
data_out['FT'] = []
n = 0
for row in data_out['geometry']:
A = np.pi*np.power(float(row[1]),2)
data_out['r1'].append(float(row[1]))
r2 = float(row[1]) + data_in['IWT']
data_out['r2'].append(r2)
data_out['r3'].append(float(row[1]) + data_in['IWT'] + data_in['CCH'])
data_out['Ae'].append(A)
data_out['Ae/At'].append(A/data_out['At'])
data_out['z'].append(float(row[0]))
if float(row[0]) > data_in['channel_number'][n][0]:
n = n + 1
N = data_in['channel_number'][n][1]
data_out['N'].append(N)
data_out['CCH'].append(data_in['CCH'])
if data_in['dim_constant'] == 'FT':
data_out['FT'].append(data_in['FT'])
aux = (2*np.pi*r2)/N - data_in['FT']
if aux <= 0:
data_out['error_code'] = 1
return
data_out['CCW'].append(aux)
else:
data_out['CCW'].append(data_in['CCW'])
aux = (2*np.pi*r2)/N - data_in['CCW']
data_out['FT'].append(aux)
data_out['L'] = []
for i in range(0, data_out['size']):
if(i==0):
A = 0.5*(data_out['z'][i+1]+data_out['z'][i]) - data_out['z'][i]
B = 0.5*(data_out['r1'][i+1]+data_out['r1'][i]) - data_out['r1'][i]
data_out['L'].append(math.sqrt(A**2 + B**2))
else:
if(i!=(data_out['size']-1)):
A = 0.5*(data_out['z'][i+1]+data_out['z'][i]) - 0.5*(data_out['z'][i]+data_out['z'][i-1])
B = 0.5*(data_out['r1'][i+1]+data_out['r1'][i]) - 0.5*(data_out['r1'][i]+data_out['r1'][i-1])
data_out['L'].append(math.sqrt(A**2 + B**2))
else:
A = data_out['z'][i] - 0.5*(data_out['z'][i]+data_out['z'][i-1])
B = data_out['r1'][i] - 0.5*(data_out['r1'][i]+data_out['r1'][i-1])
data_out['L'].append(math.sqrt(A**2 + B**2))
data_out['error_code'] = 0
def coolant_prop(coolant_name, prop_name, temperature):
if coolant_name == 'RP-1':
if temperature > 800:
temperature = 800
if temperature < 300:
temperature = 300
if prop_name == 'ro':
return 820
if prop_name == 'cp':
return -2.82649e-3*temperature**2.0 + 6.77751e0*temperature - 2.45234e1 #BOYSAN
if prop_name == 'k':
return 9.64e-8*temperature**2-2.95e-4*temperature+0.261 #BOYSAN
if prop_name == 'mi':
return -1.46e-11*temperature**3+3.22e-8*temperature**2-2.39e-5*temperature+6E-3 #BOYSAN
if coolant_name == 'C2H5OH(L)':
if prop_name == 'ro':
return 785.3
if prop_name == 'cp':
return 2570
if prop_name == 'k':
return 0.167
if prop_name == 'mi':
return 1.36e-3
else:
print('Coolant proprieties not found')
return -1
def create_prop(data_in, data_out):
data_out['Tc'] = data_out['size']*[data_in['Tc_primary']]
data_out['Twg'] = data_out['size']*[data_in['Twg_primary']]
data_out['Twc'] = data_out['size']*[data_in['Twc_primary']]
data_out['Taw'] = data_out['size']*[data_in['Taw_primary']]
data_out['cp_c'] = data_out['size']*[None]
data_out['k_c'] = data_out['size']*[None]
data_out['mi_c'] = data_out['size']*[None]
data_out['Pr_c'] = data_out['size']*[None]
data_out['gama'] = data_out['size']*[None]
data_out['M'] = data_out['size']*[None]
data_out['cp'] = data_out['size']*[None]
data_out['R'] = data_out['size']*[None]
data_out['h_g'] = data_out['size']*[None]
data_out['Re_c'] = data_out['size']*[None]
data_out['D_h'] = data_out['size']*[None]
data_out['mi_s'] = data_out['size']*[None]
data_out['h_c'] = data_out['size']*[None]
data_out['Aa'] = data_out['size']*[None]
data_out['Atotal'] = data_out['size']*[None]
data_out['m'] = data_out['size']*[None]
data_out['eta_f'] = data_out['size']*[None]
data_out['eta_o'] = data_out['size']*[None]
data_out['R_c'] = data_out['size']*[None]
data_out['R_g'] = data_out['size']*[None]
data_out['R_w'] = data_out['size']*[None]
data_out['q'] = data_out['size']*[None]
data_out['Q'] = data_out['size']*[None]
data_out['f'] = data_out['size']*[None]
data_out['ro'] = data_out['size']*[None]
data_out['V_c'] = data_out['size']*[None]
data_out['hl'] = data_out['size']*[None]
data_out['deltap'] = data_out['size']*[None]
data_out['T_static'] = data_out['size']*[None]
data_out['p_static'] = data_out['size']*[6000000]
def calc_prop(data_in, data_out):
data_in['p0_pyCEA'] = data_in['p0']/1e5 #Conversão de [Pa] para [bar]
pyCEA.calcPropStagnationCEA(data_in['p0_pyCEA'],data_in['fuel'], data_in['oxidizer'],data_in['of'], data_in['motor_name'])
T0 = pyCEA.readPropStagnationCEA('t', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
cp0 = pyCEA.readPropStagnationCEA('cp', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
Pr0 = pyCEA.readPropStagnationCEA('pr', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
mi0 = pyCEA.readPropStagnationCEA('mi', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
Tc1 = data_in['Tc_primary']
IWT = data_in['IWT']
k_w = data_in['k_w']
mponto_c = data_in['m._c']
e = data_in['e']
p0 = data_in['p0']
Re_c = data_out['Re_c']
N = data_out['N']
mi_c = data_out['mi_c']
CCW = data_out['CCW']
FT = data_out['FT']
D_h = data_out['D_h']
mi_s = data_out['mi_s']
Tc = data_out['Tc']
Twg = data_out['Twg']
Twc = data_out['Twc']
Taw = data_out['Taw']
h_c = data_out['h_c']
k_c = data_out['k_c']
Pr_c = data_out['Pr_c']
Aa = data_out['Aa']
L = data_out['L']
r1 = data_out['r1']
r2 = data_out['r2']
r3 = data_out['r3']
Atotal = data_out['Atotal']
m = data_out['m']
eta_f = data_out['eta_f']
eta_o = data_out['eta_o']
R_c = data_out['R_c']
R_g = data_out['R_g']
R_w = data_out['R_w']
h_g = data_out['h_g']
q = data_out['q']
Q = data_out['Q']
cp_c = data_out['cp_c']
k_c = data_out['k_c']
f = data_out['f']
ro = data_out['ro']
V_c = data_out['V_c']
hl = data_out['hl']
deltap = data_out['deltap']
T_static = data_out['T_static']
p_static = data_out['p_static']
gama = data_out['gama']
M = data_out['M']
CCH = data_out['CCH']
data_out['p_drop'] = 0
def f_mach(M):
A = 2/(data_out['gama'][i]+1)
B = 1+(((data_out['gama'][i]-1)/2)*(M**2))
C = (data_out['gama'][i]+1)/(data_out['gama'][i]-1)
D = (data_out['Ae/At'][i]*M)**2
return ( (A*B)**C-D )
def f_coolebrook(f):
return (1/(-2*math.log(((e/D_h[i])/3.7)+(2.51/(Re_c[i]*f**0.5)), 10))**2-f)
for i in reversed(range(0,data_out['size'])):
cp_c[i] = coolant_prop(data_in['coolant'], 'cp', Tc[i])
k_c[i] = coolant_prop(data_in['coolant'], 'k', data_out['Tc'][i])
data_out['mi_c'][i] = coolant_prop(data_in['coolant'], 'mi', data_out['Tc'][i])
data_out['Pr_c'][i] = data_out['cp_c'][i]*data_out['mi_c'][i]/data_out['k_c'][i]
pyCEA.calcPropCEA(data_out['Taw'][i] , data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
data_out['cp'][i] = pyCEA.readPropCEA('cp', data_out['Taw'][i], data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
#data_out['cp'][i] = -5.84399e-05*data_out['Taw'][i]**2.0 + 4.23454e-01*data_out['Taw'][i] + 1.29256e+03
data_out['gama'][i] = 1.23854e-8*data_out['Taw'][i]**2 - 8.09028e-5*data_out['Taw'][i] + 1.34563
#Gama para o L-75
#data_out['gama'][i] = pyCEA.readPropCEA('gama', data_out['Taw'][i], data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
data_out['R'][i] = (data_out['cp'][i]*(1 - 1/data_out['gama'][i]))
mponto = data_in['p0']*data_out['At']*((data_out['gama'][i]/(data_out['R'][i]*T0))*(2/(data_out['gama'][i]+1))**((data_out['gama'][i]+1)/(data_out['gama'][i]-1)))**0.5
c = (data_in['p0']*data_out['At'])/mponto
if(data_out['z'][i] > data_out['zt']):
a = 1
b = 25
else:
a = 0
b = 1
data_out['M'][i] = optimize.bisect(f_mach, a, b, rtol=8.881784197001252e-16)
aux1 = 1 + ((data_out['gama'][i]-1)/2)*data_out['M'][i]**2
sigma = ((data_out['Twg'][i]/(2*T0))*aux1+0.5 )**-0.68 * aux1**-0.12
data_out['h_g'][i] = ( 0.026 * ((mi0/(2*data_out['Rt']))**0.2) * (cp0/(Pr0**0.6)) * (data_in['p0']/c)**0.8 * (data_out['At']/data_out['Ae'][i])**0.9 * sigma )
D_h[i] = (4*CCW[i]*CCH[i])/(2*(CCW[i]+CCH[i]))
Re_c[i] = (4*mponto)/(N[i]*mi_c[i]*2*(CCW[i]+CCH[i]))
mi_s[i] = coolant_prop(data_in['coolant'], 'mi', Twc[i])
h_c[i] = ((k_c[i]/D_h[i]) * 0.027 * Re_c[i]**0.8 * Pr_c[i]**(1/3) * (mi_c[i]/mi_s[i])**0.14 )
Aa[i] = (2*CCH[i]*L[i])
Atotal[i] = (N[i]*Aa[i] + L[i]*(2*math.pi*r2[i]-N[i]*FT[i]))
m[i] = math.sqrt((2*h_c[i])/(k_c[i]*FT[i]))
eta_f[i] = (math.tanh(m[i]*CCH[i])/(m[i]*CCH[i]))
eta_o[i] = 1-((N[i]*Aa[i]*(1-eta_f[i])) / Atotal[i])
R_g[i] = (1/(2*math.pi*r1[i]*L[i]*h_g[i]))
R_w[i] = (math.log(r2[i]/r1[i]) / (2*math.pi*L[i]*k_w))
R_c[i] = (1 / (eta_o[i]*h_c[i]*Atotal[i]))
q[i] = ((Taw[i] - Tc[i]) / (R_g[i] + R_w[i] + R_c[i]))
Q[i] = ( q[i]/(2*math.pi*r1[i]*L[i])/1000000 )
aux = 0.5*(data_out['gama'][i] - 1)*data_out['M'][i]**2
Taw[i] = (T0 * ((1 + Pr0**(1/3)*aux) / (1 + aux)))
Twg[i] = -R_g[i]*q[i]+Taw[i]
Twc[i] = -q[i]*(R_g[i]+R_w[i])+Taw[i]
lista = reversed(range( i,data_out['size']))
Tc1 = 303
for j in lista:
Tc2 = (q[j] / (mponto_c*cp_c[j])) + Tc1
Tc[j] = (Tc2+Tc1)/2
Tc1 = Tc2
p_static[i] = p0*(1+((gama[i]-1)/2)*M[i]**2)**-(gama[i]/(gama[i]-1))
#Cálculo da perda de carga
f[i] = optimize.bisect(f_coolebrook, 0.00001, 2, rtol=8.881784197001252e-16)
ro[i] = coolant_prop(data_in['coolant'], 'ro', Tc[i])
V_c[i] = mponto_c/(ro[i]*CCH[i]*CCW[i]*N[i])
hl[i] = f[i]*((L[i]/D_h[i])/(V_c[i]**2/2))
deltap[i] = ro[i]*hl[i]*N[i]
data_out['p_drop'] += deltap[i]
#Cálculo da temperatura estática e pressão estática
T_static[i] = T0*(1+((gama[i]-1)/2)*M[i]**2)**-1
def iteration(data_in , data_out):
geometry(data_in, data_out)
if data_out['error_code'] != 0:
print('CCW <= 0')
return
create_prop(data_in, data_out)
for i in range(0,data_in['max_iterations']):
print('Iteration {}'.format(i+1))
calc_prop(data_in, data_out)
if i==0:
Tc_0 = sum(data_out['Q'])
Twg_0 = sum(data_out['Twg'])
Twc_0 = sum(data_out['Twc'])
Taw_0 = sum(data_out['Taw'])
Tc_prev = Tc_0
Twg_prev = Twg_0
Twc_prev = Twc_0
Taw_prev = Taw_0
else:
Tc = sum(data_out['Q'])
Twg = sum(data_out['Twg'])
Twc = sum(data_out['Twc'])
Taw = sum(data_out['Taw'])
Tc_L1 = abs(Tc-Tc_prev)/Tc_0
Twg_L1 = abs(Twg-Twg_prev)/Twg_0
Twc_L1 = abs(Twc-Twc_prev)/Twc_0
Taw_L1 = abs(Taw-Taw_prev)/Taw_0
Tc_prev = Tc
Twg_prev = Twg
Twc_prev = Twc
Taw_prev = Taw
if Tc_L1 <= data_in['tol'] and Twg_L1 <= data_in['tol'] and Twc_L1 <= data_in['tol'] and Taw_L1 <= data_in['tol']:
break
print('Total Iteration Temperature: ' + str(i+1))
def optimize_channel2(data_in, data_out):
flag1 = False
flag2 = False
if data_in['dim_constant'] == 'FT':
dim_const = 'FT'
dim_var = 'CCW'
else:
dim_const = 'CCW'
dim_var = 'FT'
geometry(data_in, data_out)
m = 0
for i in range(0, data_out['size']):
if data_out['r2'][i] < data_out['r2'][m]:
m = i
dim_max = (2*np.pi*data_out['r2'][m])/data_out['N'][m] - data_in[dim_var + '_min']
if dim_max-data_in[dim_const + '_min'] <= 0:
print('Maior dimensão geométrica é menor que dimensão mínima.')
return
dim = (dim_max+data_in[dim_const + '_min'])/2
x = np.array([data_in['CCH'] , dim])
data_in[dim_const] = dim
iteration(data_in, data_out)
Q = max(data_out['Q'])
Q_prev = Q
Q0 = Q
w = data_in['w']
for opt in range(0,data_in['max_iterations_opt']):
grad = np.gradient(x)
xn = x - w*grad
if xn[0] <= data_in['CCH_min'] and flag1 == False:
flag1 = True
print('CCH_min')
if (xn[1] <= data_in[dim_const+'_min'] or xn[1] >= dim_max) and flag2 == False:
flag2 = True
print(dim_const+' min or max')
if flag1 == True:
xn[0] = x[0]
if flag2 == True:
xn[1] = x[1]
data_in['CCH'] = xn[0]
data_in[dim_const] = xn[1]
iteration(data_in, data_out)
Q = max(data_out['Q'])
if Q-Q_prev < 0:
w=w*-1
print(w)
continue
x = xn
print('Opt #{} Q:{} CCH:{} {}:{}'.format(opt, Q, x[0], dim_const, x[1]))
Q_diff = abs(Q-Q_prev)/Q0
Q_prev = Q
if Q_diff <= data_in['tol_opt']:
break
def plot(data_out):
data = []
for i in range(0,data_out['size']):
data_row = [ data_out['z'][i], data_out['Q'][i], data_out['Taw'][i], data_out['Twg'][i], data_out['Twc'][i], data_out['Tc'][i]]
data.append(data_row)
with open('rcc_plot_data.csv', mode='w', encoding='utf-8') as data_file:
csv_writer = csv.writer(data_file, delimiter=',')
csv_writer.writerows(data)
p = subprocess.Popen("gnuplot \'rcc_plot_config.gnu\'", shell = True)
os.waitpid(p.pid, 0)
'''p2 = subprocess.Popen("ristretto \'temps.png\'", shell = True)
os.waitpid(p2.pid, 1)'''
def calc_wall_thickness(p, path, sigmae, n=1):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
r = float(list(csv_reader)[0][1])
def f_t(t):
sigma1 = (p*r)/t
sigma2 = (p*r)/(2*t)
return math.sqrt(sigma1**2-sigma1*sigma2+sigma2**2)-sigmae
return (optimize.bisect(f_t, 1e-8, 1, rtol=8.881784197001252e-16))*n | [
"pyCEA.readPropStagnationCEA",
"scipy.optimize.bisect",
"os.waitpid",
"numpy.power",
"subprocess.Popen",
"csv.writer",
"pyCEA.readPropCEA",
"math.sqrt",
"math.log",
"pyCEA.calcPropStagnationCEA",
"numpy.array",
"numpy.gradient",
"csv.reader",
"math.tanh",
"pyCEA.calcPropCEA"
] | [((6107, 6236), 'pyCEA.calcPropStagnationCEA', 'pyCEA.calcPropStagnationCEA', (["data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "(data_in['p0_pyCEA'], data_in['fuel'], data_in[\n 'oxidizer'], data_in['of'], data_in['motor_name'])\n", (6134, 6236), False, 'import pyCEA\n'), ((6239, 6372), 'pyCEA.readPropStagnationCEA', 'pyCEA.readPropStagnationCEA', (['"""t"""', "data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "('t', data_in['p0_pyCEA'], data_in['fuel'],\n data_in['oxidizer'], data_in['of'], data_in['motor_name'])\n", (6266, 6372), False, 'import pyCEA\n'), ((6379, 6513), 'pyCEA.readPropStagnationCEA', 'pyCEA.readPropStagnationCEA', (['"""cp"""', "data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "('cp', data_in['p0_pyCEA'], data_in['fuel'],\n data_in['oxidizer'], data_in['of'], data_in['motor_name'])\n", (6406, 6513), False, 'import pyCEA\n'), ((6521, 6655), 'pyCEA.readPropStagnationCEA', 'pyCEA.readPropStagnationCEA', (['"""pr"""', "data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "('pr', data_in['p0_pyCEA'], data_in['fuel'],\n data_in['oxidizer'], data_in['of'], data_in['motor_name'])\n", (6548, 6655), False, 'import pyCEA\n'), ((6662, 6796), 'pyCEA.readPropStagnationCEA', 'pyCEA.readPropStagnationCEA', (['"""mi"""', "data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "('mi', data_in['p0_pyCEA'], data_in['fuel'],\n data_in['oxidizer'], data_in['of'], data_in['motor_name'])\n", (6689, 6796), False, 'import pyCEA\n'), ((14109, 14140), 'numpy.array', 'np.array', (["[data_in['CCH'], dim]"], {}), "([data_in['CCH'], dim])\n", (14117, 14140), True, 'import numpy as np\n'), ((15640, 15701), 'subprocess.Popen', 'subprocess.Popen', (['"""gnuplot \'rcc_plot_config.gnu\'"""'], {'shell': '(True)'}), '("gnuplot \'rcc_plot_config.gnu\'", shell=True)\n', (15656, 15701), False, 'import subprocess\n'), ((15710, 15730), 'os.waitpid', 'os.waitpid', (['p.pid', '(0)'], {}), '(p.pid, 0)\n', (15720, 15730), False, 'import os\n'), ((380, 415), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (390, 415), False, 'import csv\n'), ((8783, 8921), 'pyCEA.calcPropCEA', 'pyCEA.calcPropCEA', (["data_out['Taw'][i]", "data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "(data_out['Taw'][i], data_in['p0_pyCEA'], data_in['fuel'],\n data_in['oxidizer'], data_in['of'], data_in['motor_name'])\n", (8800, 8921), False, 'import pyCEA\n'), ((8947, 9092), 'pyCEA.readPropCEA', 'pyCEA.readPropCEA', (['"""cp"""', "data_out['Taw'][i]", "data_in['p0_pyCEA']", "data_in['fuel']", "data_in['oxidizer']", "data_in['of']", "data_in['motor_name']"], {}), "('cp', data_out['Taw'][i], data_in['p0_pyCEA'], data_in[\n 'fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])\n", (8964, 9092), False, 'import pyCEA\n'), ((9968, 10025), 'scipy.optimize.bisect', 'optimize.bisect', (['f_mach', 'a', 'b'], {'rtol': '(8.881784197001252e-16)'}), '(f_mach, a, b, rtol=8.881784197001252e-16)\n', (9983, 10025), False, 'from scipy import optimize\n'), ((10755, 10795), 'math.sqrt', 'math.sqrt', (['(2 * h_c[i] / (k_c[i] * FT[i]))'], {}), '(2 * h_c[i] / (k_c[i] * FT[i]))\n', (10764, 10795), False, 'import math\n'), ((11747, 11814), 'scipy.optimize.bisect', 'optimize.bisect', (['f_coolebrook', '(1e-05)', '(2)'], {'rtol': '(8.881784197001252e-16)'}), '(f_coolebrook, 1e-05, 2, rtol=8.881784197001252e-16)\n', (11762, 11814), False, 'from scipy import optimize\n'), ((14354, 14368), 'numpy.gradient', 'np.gradient', (['x'], {}), '(x)\n', (14365, 14368), True, 'import numpy as np\n'), ((15555, 15591), 'csv.writer', 'csv.writer', (['data_file'], {'delimiter': '""","""'}), "(data_file, delimiter=',')\n", (15565, 15591), False, 'import csv\n'), ((15933, 15968), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (15943, 15968), False, 'import csv\n'), ((16162, 16220), 'scipy.optimize.bisect', 'optimize.bisect', (['f_t', '(1e-08)', '(1)'], {'rtol': '(8.881784197001252e-16)'}), '(f_t, 1e-08, 1, rtol=8.881784197001252e-16)\n', (16177, 16220), False, 'from scipy import optimize\n'), ((884, 899), 'numpy.power', 'np.power', (['Rt', '(2)'], {}), '(Rt, 2)\n', (892, 899), True, 'import numpy as np\n'), ((10812, 10836), 'math.tanh', 'math.tanh', (['(m[i] * CCH[i])'], {}), '(m[i] * CCH[i])\n', (10821, 10836), False, 'import math\n'), ((10982, 11005), 'math.log', 'math.log', (['(r2[i] / r1[i])'], {}), '(r2[i] / r1[i])\n', (10990, 11005), False, 'import math\n'), ((16097, 16151), 'math.sqrt', 'math.sqrt', (['(sigma1 ** 2 - sigma1 * sigma2 + sigma2 ** 2)'], {}), '(sigma1 ** 2 - sigma1 * sigma2 + sigma2 ** 2)\n', (16106, 16151), False, 'import math\n'), ((2691, 2717), 'math.sqrt', 'math.sqrt', (['(A ** 2 + B ** 2)'], {}), '(A ** 2 + B ** 2)\n', (2700, 2717), False, 'import math\n'), ((3043, 3069), 'math.sqrt', 'math.sqrt', (['(A ** 2 + B ** 2)'], {}), '(A ** 2 + B ** 2)\n', (3052, 3069), False, 'import math\n'), ((3303, 3329), 'math.sqrt', 'math.sqrt', (['(A ** 2 + B ** 2)'], {}), '(A ** 2 + B ** 2)\n', (3312, 3329), False, 'import math\n'), ((8346, 8406), 'math.log', 'math.log', (['(e / D_h[i] / 3.7 + 2.51 / (Re_c[i] * f ** 0.5))', '(10)'], {}), '(e / D_h[i] / 3.7 + 2.51 / (Re_c[i] * f ** 0.5), 10)\n', (8354, 8406), False, 'import math\n')] |
"""Main implementation class of PFE
"""
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import imp
import time
import numpy as np
import tensorflow as tf
from utils.tflib import mutual_likelihood_score_loss
class Network:
def __init__(self):
self.graph = tf.Graph()
gpu_options = tf.GPUOptions(allow_growth=True)
tf_config = tf.ConfigProto(gpu_options=gpu_options,
allow_soft_placement=True, log_device_placement=False)
self.sess = tf.Session(graph=self.graph, config=tf_config)
def initialize(self, config, num_classes=None):
'''
Initialize the graph from scratch according to config.
'''
with self.graph.as_default():
with self.sess.as_default():
# Set up placeholders
h, w = config.image_size
channels = config.channels
self.images = tf.placeholder(tf.float32, shape=[None, h, w, channels], name='images')
self.labels = tf.placeholder(tf.int32, shape=[None], name='labels')
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.phase_train = tf.placeholder(tf.bool, name='phase_train')
self.global_step = tf.Variable(0, trainable=False, dtype=tf.int32, name='global_step')
# Initialialize the backbone network
network = imp.load_source('embedding_network', config.embedding_network)
mu, conv_final = network.inference(self.images, config.embedding_size)
# Initialize the uncertainty module
uncertainty_module = imp.load_source('uncertainty_module', config.uncertainty_module)
log_sigma_sq = uncertainty_module.inference(conv_final, config.embedding_size,
phase_train = self.phase_train, weight_decay = config.weight_decay,
scope='UncertaintyModule')
self.mu = tf.identity(mu, name='mu')
self.sigma_sq = tf.identity(tf.exp(log_sigma_sq), name='sigma_sq')
# Build all losses
loss_list = []
self.watch_list = {}
MLS_loss = mutual_likelihood_score_loss(self.labels, mu, log_sigma_sq)
loss_list.append(MLS_loss)
self.watch_list['loss'] = MLS_loss
# Collect all losses
reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), name='reg_loss')
loss_list.append(reg_loss)
self.watch_list['reg_loss'] = reg_loss
total_loss = tf.add_n(loss_list, name='total_loss')
grads = tf.gradients(total_loss, self.trainable_variables)
# Training Operaters
train_ops = []
opt = tf.train.MomentumOptimizer(self.learning_rate, momentum=0.9)
apply_gradient_op = opt.apply_gradients(list(zip(grads, self.trainable_variables)))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_ops.extend([apply_gradient_op] + update_ops)
train_ops.append(tf.assign_add(self.global_step, 1))
self.train_op = tf.group(*train_ops)
# Collect TF summary
for k,v in self.watch_list.items():
tf.summary.scalar('losses/' + k, v)
tf.summary.scalar('learning_rate', self.learning_rate)
self.summary_op = tf.summary.merge_all()
# Initialize variables
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=99)
return
@property
def trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='UncertaintyModule')
def save_model(self, model_dir, global_step):
with self.sess.graph.as_default():
checkpoint_path = os.path.join(model_dir, 'ckpt')
metagraph_path = os.path.join(model_dir, 'graph.meta')
print('Saving variables...')
self.saver.save(self.sess, checkpoint_path, global_step=global_step, write_meta_graph=False)
if not os.path.exists(metagraph_path):
print('Saving metagraph...')
self.saver.export_meta_graph(metagraph_path)
def restore_model(self, model_dir, restore_scopes=None):
var_list = self.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
with self.sess.graph.as_default():
if restore_scopes is not None:
var_list = [var for var in var_list if any([scope in var.name for scope in restore_scopes])]
model_dir = os.path.expanduser(model_dir)
ckpt_file = tf.train.latest_checkpoint(model_dir)
print('Restoring {} variables from {} ...'.format(len(var_list), ckpt_file))
saver = tf.train.Saver(var_list)
saver.restore(self.sess, ckpt_file)
def load_model(self, model_path, scope=None):
with self.sess.graph.as_default():
model_path = os.path.expanduser(model_path)
# Load grapha and variables separatedly.
meta_files = [file for file in os.listdir(model_path) if file.endswith('.meta')]
assert len(meta_files) == 1
meta_file = os.path.join(model_path, meta_files[0])
ckpt_file = tf.train.latest_checkpoint(model_path)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(meta_file, clear_devices=True, import_scope=scope)
saver.restore(self.sess, ckpt_file)
# Setup the I/O Tensors
self.images = self.graph.get_tensor_by_name('images:0')
self.phase_train = self.graph.get_tensor_by_name('phase_train:0')
self.keep_prob = self.graph.get_tensor_by_name('keep_prob:0')
self.mu = self.graph.get_tensor_by_name('mu:0')
self.sigma_sq = self.graph.get_tensor_by_name('sigma_sq:0')
self.config = imp.load_source('network_config', os.path.join(model_path, 'config.py'))
def train(self, images_batch, labels_batch, learning_rate, keep_prob):
feed_dict = { self.images: images_batch,
self.labels: labels_batch,
self.learning_rate: learning_rate,
self.keep_prob: keep_prob,
self.phase_train: True,}
_, wl, sm = self.sess.run([self.train_op, self.watch_list, self.summary_op], feed_dict = feed_dict)
step = self.sess.run(self.global_step)
return wl, sm, step
def extract_feature(self, images, batch_size, proc_func=None, verbose=False):
num_images = len(images)
num_features = self.mu.shape[1]
mu = np.ndarray((num_images, num_features), dtype=np.float32)
sigma_sq = np.ndarray((num_images, num_features), dtype=np.float32)
start_time = time.time()
for start_idx in range(0, num_images, batch_size):
if verbose:
elapsed_time = time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time))
sys.stdout.write('# of images: %d Current image: %d Elapsed time: %s \t\r'
% (num_images, start_idx, elapsed_time))
end_idx = min(num_images, start_idx + batch_size)
images_batch = images[start_idx:end_idx]
if proc_func:
images_batch = proc_func(images_batch)
feed_dict = {self.images: images_batch,
self.phase_train: False,
self.keep_prob: 1.0}
mu[start_idx:end_idx], sigma_sq[start_idx:end_idx] = self.sess.run([self.mu, self.sigma_sq], feed_dict=feed_dict)
if verbose:
print('')
return mu, sigma_sq
| [
"tensorflow.local_variables_initializer",
"imp.load_source",
"sys.stdout.write",
"tensorflow.gradients",
"tensorflow.group",
"tensorflow.GPUOptions",
"tensorflow.Graph",
"os.path.exists",
"os.listdir",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.summ... | [((1348, 1358), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1356, 1358), True, 'import tensorflow as tf\n'), ((1381, 1413), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (1394, 1413), True, 'import tensorflow as tf\n'), ((1434, 1532), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, allow_soft_placement=True,\n log_device_placement=False)\n', (1448, 1532), True, 'import tensorflow as tf\n'), ((1565, 1611), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph', 'config': 'tf_config'}), '(graph=self.graph, config=tf_config)\n', (1575, 1611), True, 'import tensorflow as tf\n'), ((5120, 5198), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""UncertaintyModule"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='UncertaintyModule')\n", (5137, 5198), True, 'import tensorflow as tf\n'), ((8270, 8326), 'numpy.ndarray', 'np.ndarray', (['(num_images, num_features)'], {'dtype': 'np.float32'}), '((num_images, num_features), dtype=np.float32)\n', (8280, 8326), True, 'import numpy as np\n'), ((8346, 8402), 'numpy.ndarray', 'np.ndarray', (['(num_images, num_features)'], {'dtype': 'np.float32'}), '((num_images, num_features), dtype=np.float32)\n', (8356, 8402), True, 'import numpy as np\n'), ((8424, 8435), 'time.time', 'time.time', ([], {}), '()\n', (8433, 8435), False, 'import time\n'), ((5323, 5354), 'os.path.join', 'os.path.join', (['model_dir', '"""ckpt"""'], {}), "(model_dir, 'ckpt')\n", (5335, 5354), False, 'import os\n'), ((5384, 5421), 'os.path.join', 'os.path.join', (['model_dir', '"""graph.meta"""'], {}), "(model_dir, 'graph.meta')\n", (5396, 5421), False, 'import os\n'), ((6086, 6115), 'os.path.expanduser', 'os.path.expanduser', (['model_dir'], {}), '(model_dir)\n', (6104, 6115), False, 'import os\n'), ((6140, 6177), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (6166, 6177), True, 'import tensorflow as tf\n'), ((6288, 6312), 'tensorflow.train.Saver', 'tf.train.Saver', (['var_list'], {}), '(var_list)\n', (6302, 6312), True, 'import tensorflow as tf\n'), ((6480, 6510), 'os.path.expanduser', 'os.path.expanduser', (['model_path'], {}), '(model_path)\n', (6498, 6510), False, 'import os\n'), ((6722, 6761), 'os.path.join', 'os.path.join', (['model_path', 'meta_files[0]'], {}), '(model_path, meta_files[0])\n', (6734, 6761), False, 'import os\n'), ((6786, 6824), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_path'], {}), '(model_path)\n', (6812, 6824), True, 'import tensorflow as tf\n'), ((6963, 7040), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['meta_file'], {'clear_devices': '(True)', 'import_scope': 'scope'}), '(meta_file, clear_devices=True, import_scope=scope)\n', (6989, 7040), True, 'import tensorflow as tf\n'), ((1999, 2070), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, h, w, channels]', 'name': '"""images"""'}), "(tf.float32, shape=[None, h, w, channels], name='images')\n", (2013, 2070), True, 'import tensorflow as tf\n'), ((2101, 2154), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""labels"""'}), "(tf.int32, shape=[None], name='labels')\n", (2115, 2154), True, 'import tensorflow as tf\n'), ((2193, 2241), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (2207, 2241), True, 'import tensorflow as tf\n'), ((2275, 2319), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (2289, 2319), True, 'import tensorflow as tf\n'), ((2355, 2398), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""phase_train"""'}), "(tf.bool, name='phase_train')\n", (2369, 2398), True, 'import tensorflow as tf\n'), ((2434, 2501), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'dtype': 'tf.int32', 'name': '"""global_step"""'}), "(0, trainable=False, dtype=tf.int32, name='global_step')\n", (2445, 2501), True, 'import tensorflow as tf\n'), ((2582, 2644), 'imp.load_source', 'imp.load_source', (['"""embedding_network"""', 'config.embedding_network'], {}), "('embedding_network', config.embedding_network)\n", (2597, 2644), False, 'import imp\n'), ((2822, 2886), 'imp.load_source', 'imp.load_source', (['"""uncertainty_module"""', 'config.uncertainty_module'], {}), "('uncertainty_module', config.uncertainty_module)\n", (2837, 2886), False, 'import imp\n'), ((3185, 3211), 'tensorflow.identity', 'tf.identity', (['mu'], {'name': '"""mu"""'}), "(mu, name='mu')\n", (3196, 3211), True, 'import tensorflow as tf\n'), ((3443, 3502), 'utils.tflib.mutual_likelihood_score_loss', 'mutual_likelihood_score_loss', (['self.labels', 'mu', 'log_sigma_sq'], {}), '(self.labels, mu, log_sigma_sq)\n', (3471, 3502), False, 'from utils.tflib import mutual_likelihood_score_loss\n'), ((3878, 3916), 'tensorflow.add_n', 'tf.add_n', (['loss_list'], {'name': '"""total_loss"""'}), "(loss_list, name='total_loss')\n", (3886, 3916), True, 'import tensorflow as tf\n'), ((3941, 3991), 'tensorflow.gradients', 'tf.gradients', (['total_loss', 'self.trainable_variables'], {}), '(total_loss, self.trainable_variables)\n', (3953, 3991), True, 'import tensorflow as tf\n'), ((4085, 4145), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['self.learning_rate'], {'momentum': '(0.9)'}), '(self.learning_rate, momentum=0.9)\n', (4111, 4145), True, 'import tensorflow as tf\n'), ((4276, 4318), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4293, 4318), True, 'import tensorflow as tf\n'), ((4488, 4508), 'tensorflow.group', 'tf.group', (['*train_ops'], {}), '(*train_ops)\n', (4496, 4508), True, 'import tensorflow as tf\n'), ((4671, 4725), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.learning_rate'], {}), "('learning_rate', self.learning_rate)\n", (4688, 4725), True, 'import tensorflow as tf\n'), ((4760, 4782), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4780, 4782), True, 'import tensorflow as tf\n'), ((5588, 5618), 'os.path.exists', 'os.path.exists', (['metagraph_path'], {}), '(metagraph_path)\n', (5602, 5618), False, 'import os\n'), ((7538, 7575), 'os.path.join', 'os.path.join', (['model_path', '"""config.py"""'], {}), "(model_path, 'config.py')\n", (7550, 7575), False, 'import os\n'), ((8629, 8748), 'sys.stdout.write', 'sys.stdout.write', (["('# of images: %d Current image: %d Elapsed time: %s \\t\\r' % (num_images,\n start_idx, elapsed_time))"], {}), "('# of images: %d Current image: %d Elapsed time: %s \\t\\r' %\n (num_images, start_idx, elapsed_time))\n", (8645, 8748), False, 'import sys\n'), ((3256, 3276), 'tensorflow.exp', 'tf.exp', (['log_sigma_sq'], {}), '(log_sigma_sq)\n', (3262, 3276), True, 'import tensorflow as tf\n'), ((3677, 3730), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (3694, 3730), True, 'import tensorflow as tf\n'), ((4420, 4454), 'tensorflow.assign_add', 'tf.assign_add', (['self.global_step', '(1)'], {}), '(self.global_step, 1)\n', (4433, 4454), True, 'import tensorflow as tf\n'), ((4619, 4654), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('losses/' + k)", 'v'], {}), "('losses/' + k, v)\n", (4636, 4654), True, 'import tensorflow as tf\n'), ((4853, 4885), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4883, 4885), True, 'import tensorflow as tf\n'), ((4917, 4950), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4948, 4950), True, 'import tensorflow as tf\n'), ((4996, 5020), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5018, 5020), True, 'import tensorflow as tf\n'), ((6608, 6630), 'os.listdir', 'os.listdir', (['model_path'], {}), '(model_path)\n', (6618, 6630), False, 'import os\n'), ((8588, 8599), 'time.time', 'time.time', ([], {}), '()\n', (8597, 8599), False, 'import time\n')] |
'''
@author : jhhalls
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from .datasets import make_wave
from .plot_helpers import cm2
def plot_linear_regression_wave():
X, y = make_wave(n_samples=60)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
line = np.linspace(-3, 3, 100).reshape(-1, 1)
lr = LinearRegression().fit(X_train, y_train)
print("w[0]: %f b: %f" % (lr.coef_[0], lr.intercept_))
plt.figure(figsize=(8, 8))
plt.plot(line, lr.predict(line))
plt.plot(X, y, 'o', c=cm2(0))
ax = plt.gca()
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.set_ylim(-3, 3)
#ax.set_xlabel("Feature")
#ax.set_ylabel("Target")
ax.legend(["model", "training data"], loc="best")
ax.grid(True)
ax.set_aspect('equal')
| [
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figure",
"numpy.linspace",
"sklearn.linear_model.LinearRegression"
] | [((355, 394), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(42)'}), '(X, y, random_state=42)\n', (371, 394), False, 'from sklearn.model_selection import train_test_split\n'), ((562, 588), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (572, 588), True, 'import matplotlib.pyplot as plt\n'), ((669, 678), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (676, 678), True, 'import matplotlib.pyplot as plt\n'), ((407, 430), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (418, 430), True, 'import numpy as np\n'), ((456, 474), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (472, 474), False, 'from sklearn.linear_model import LinearRegression\n')] |
"""Contains functions for geometrical calculations on a globe."""
import numpy as np
from math import sin, cos, atan2, asin, pi
# =============================================================================
# haversine
# =============================================================================
def haversine(start_coords, end_coords, radius=6371000):
"""Calculate the distance and initial bearing between two points.
Parameters
----------
start_coords : tuple
Start coordinates (lat, lon) in decimal degrees (+ve = north/east)
end_coords : tuple
End coordinates (lat, lon) in decimal degrees (+ve = north/east)
radius : float
Radius of the body in meters. Default is set to the Earth radius
(6731km)
Returns
-------
distance : float
The linear distance between the two points in meters
bearing : float
The initial bearing between the two points (radians)
"""
# Unpack the coordinates and convert to radians
lat1, lon1 = np.radians(start_coords)
lat2, lon2 = np.radians(end_coords)
# Calculate the change in lat and lon
dlat = lat2 - lat1
dlon = lon2 - lon1
# Calculate the square of the half chord length
a = (sin(dlat/2))**2 + (cos(lat1) * cos(lat2) * (sin(dlon/2))**2)
# Calculate the angular distance
c = 2 * atan2(np.sqrt(a), np.sqrt(1-a))
# Find distance moved
distance = radius * c
# Calculate the initial bearing
bearing = atan2(sin(dlon) * cos(lat2),
(cos(lat1)*sin(lat2)) - (sin(lat1)*cos(lat2)*cos(dlon)))
# Check the bearing is between 0 and 2pi
bearing = bearing_check(bearing)
return distance, bearing
# =============================================================================
# calc_end_point
# =============================================================================
def calc_end_point(start_coords, dist, bearing, radius=6371000):
"""Calculate the final coordinates given a starting position and vector.
Parameters
----------
start_coords : tuple
Starting coordinates (lat, lon) in decimal degrees (+ve = north/east)
dist : float
The distance moved in meters
bearing : float
The bearing of travel in degrees clockwise from north
radius : float
Radius of the body in meters. Default is set to the Earth radius
(6731km)
Returns
-------
end_coords, tuple
The final coordinates (lat, lon) in decimal degrees (+ve = north/east)
"""
# Convert the inputs to radians
lat, lon = np.radians(start_coords)
theta = np.radians(bearing)
# Calculate the angular distance moved
ang_dist = dist / radius
# Calculate the final latitude
end_lat = asin(np.add((sin(lat) * cos(ang_dist)),
(cos(lat) * sin(ang_dist) * cos(theta))))
# Calculate the final longitude
end_lon = lon + atan2(sin(theta) * sin(ang_dist) * cos(lat),
cos(ang_dist) - (sin(lat)*sin(end_lat)))
return np.degrees([end_lat, end_lon])
# =============================================================================
# Add a bearing checker
# =============================================================================
def bearing_check(bearing, radians=True, max_iter=1000):
"""Check for a valid bearing (between 0 -> 360 degrees).
Parameters
----------
bearing : float
The bearing value to check.
radians : bool, optional
If True then the bearing is treated as radians. Otherwise it is assumed
to be degrees. Default is True
max_iter : int
Maximum number of checks to avoid infinite loops. Default is 1000
Returns
-------
float
The checked bearing.
"""
# Check if degrees
if not radians:
bearing = np.radians(bearing)
i = 0
while bearing < 0 or bearing >= 2*pi:
if bearing < 0:
bearing += 2*pi
continue
if bearing >= 2*pi:
bearing -= 2*pi
continue
i += 1
if i >= max_iter:
msg = f'Max iteration in bearing check reached {max_iter}'
raise ValueError(msg)
# Check if degrees
if not radians:
bearing = np.degrees(bearing)
return bearing
if __name__ == '__main__':
dist, bearing = haversine([37.7505, 14.9934], [37.7905, 15.1386])
print(dist, np.degrees(bearing))
lat, lon = calc_end_point([37.7505, 14.9934], 13520, 70.74)
print(lat, lon)
| [
"numpy.radians",
"numpy.sqrt",
"math.cos",
"numpy.degrees",
"math.sin"
] | [((1034, 1058), 'numpy.radians', 'np.radians', (['start_coords'], {}), '(start_coords)\n', (1044, 1058), True, 'import numpy as np\n'), ((1076, 1098), 'numpy.radians', 'np.radians', (['end_coords'], {}), '(end_coords)\n', (1086, 1098), True, 'import numpy as np\n'), ((2605, 2629), 'numpy.radians', 'np.radians', (['start_coords'], {}), '(start_coords)\n', (2615, 2629), True, 'import numpy as np\n'), ((2642, 2661), 'numpy.radians', 'np.radians', (['bearing'], {}), '(bearing)\n', (2652, 2661), True, 'import numpy as np\n'), ((3074, 3104), 'numpy.degrees', 'np.degrees', (['[end_lat, end_lon]'], {}), '([end_lat, end_lon])\n', (3084, 3104), True, 'import numpy as np\n'), ((3871, 3890), 'numpy.radians', 'np.radians', (['bearing'], {}), '(bearing)\n', (3881, 3890), True, 'import numpy as np\n'), ((4304, 4323), 'numpy.degrees', 'np.degrees', (['bearing'], {}), '(bearing)\n', (4314, 4323), True, 'import numpy as np\n'), ((4460, 4479), 'numpy.degrees', 'np.degrees', (['bearing'], {}), '(bearing)\n', (4470, 4479), True, 'import numpy as np\n'), ((1250, 1263), 'math.sin', 'sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (1253, 1263), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1367, 1377), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (1374, 1377), True, 'import numpy as np\n'), ((1379, 1393), 'numpy.sqrt', 'np.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (1386, 1393), True, 'import numpy as np\n'), ((1503, 1512), 'math.sin', 'sin', (['dlon'], {}), '(dlon)\n', (1506, 1512), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1515, 1524), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (1518, 1524), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1269, 1278), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (1272, 1278), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1281, 1290), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (1284, 1290), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1294, 1307), 'math.sin', 'sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (1297, 1307), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1547, 1556), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (1550, 1556), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1557, 1566), 'math.sin', 'sin', (['lat2'], {}), '(lat2)\n', (1560, 1566), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1591, 1600), 'math.cos', 'cos', (['dlon'], {}), '(dlon)\n', (1594, 1600), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2798, 2806), 'math.sin', 'sin', (['lat'], {}), '(lat)\n', (2801, 2806), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2809, 2822), 'math.cos', 'cos', (['ang_dist'], {}), '(ang_dist)\n', (2812, 2822), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2879, 2889), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2882, 2889), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2985, 2993), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (2988, 2993), False, 'from math import sin, cos, atan2, asin, pi\n'), ((3021, 3034), 'math.cos', 'cos', (['ang_dist'], {}), '(ang_dist)\n', (3024, 3034), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1571, 1580), 'math.sin', 'sin', (['lat1'], {}), '(lat1)\n', (1574, 1580), False, 'from math import sin, cos, atan2, asin, pi\n'), ((1581, 1590), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (1584, 1590), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2852, 2860), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (2855, 2860), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2863, 2876), 'math.sin', 'sin', (['ang_dist'], {}), '(ang_dist)\n', (2866, 2876), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2956, 2966), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2959, 2966), False, 'from math import sin, cos, atan2, asin, pi\n'), ((2969, 2982), 'math.sin', 'sin', (['ang_dist'], {}), '(ang_dist)\n', (2972, 2982), False, 'from math import sin, cos, atan2, asin, pi\n'), ((3038, 3046), 'math.sin', 'sin', (['lat'], {}), '(lat)\n', (3041, 3046), False, 'from math import sin, cos, atan2, asin, pi\n'), ((3047, 3059), 'math.sin', 'sin', (['end_lat'], {}), '(end_lat)\n', (3050, 3059), False, 'from math import sin, cos, atan2, asin, pi\n')] |
"""
Module with utilities
Author: <NAME>
Email: <EMAIL>
"""
from ..tools import geometry
import os
import shutil
import subprocess
import contextlib
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import re
import logging
logger = logging.getLogger(__name__)
def translateToceroZcoord(moleculeRDkit):
"""
Translate the molecule to put the first atom in the origin of the coordinates
Parameters
----------
moleculeRDkit : RDkit molecule
An RDkit molecule
Returns
-------
List
List with the shift value applied to X, Y, Z
"""
from rdkit.Chem import rdMolTransforms
conf = moleculeRDkit.GetConformer()
# avoid first atom overlap with dummy 3
if abs(conf.GetAtomPosition(0).x-1.0)<1e-3 and abs(conf.GetAtomPosition(0).y-1.0)<1e-3 and abs(conf.GetAtomPosition(0).z-0.0)<1e-3:
shiftX = conf.GetAtomPosition(0).x - 1.0
shiftY = conf.GetAtomPosition(0).y - 1.0
shiftZ = conf.GetAtomPosition(0).z
translationMatrix = np.array( [[1, 0, 0, -shiftX],
[0, 1, 0, -shiftY],
[0, 0, 1, -shiftZ],
[0, 0, 0, 1]], dtype=np.double)
rdMolTransforms.TransformConformer(conf, translationMatrix)
else:
shiftX = 0.0
shiftY = 0.0
shiftZ = 0.0
return [shiftX, shiftY, shiftZ]
def generateFolder(folderName):
"""
Generate folder to place files and return full path
Parameters
----------
folderName : str
Folder name to be created
Returns
-------
[str]
Full path of the new created folder
"""
newfolderName = os.path.abspath(folderName)
if os.getcwd()!=newfolderName:
if os.path.exists(newfolderName): shutil.rmtree(newfolderName, ignore_errors=True)
os.makedirs(newfolderName)
return newfolderName
def generateRDkitMolecule(ifile, smile, workdir, molname, debug = False):
"""
Generate the RDkit molecule including reordering atoms in the molecule to avoid BOSS
conflicts
Parameters
----------
ifile : str
Input file name
smile : str
Input SMILE string
workdir : str
Working directory
molname : str
Molecule name
debug : bool, optional
Debug output in logger, by default False
Returns
-------
RDkit molecule
An RDkit molecule object
"""
if smile: logger.info('Generating molecule from input SMILES: '+ smile)
else: logger.info('Parsing molecule from input: '+ ifile)
sfile = os.path.join(workdir, molname+"-debug.pdb")
atomsNameOriginal, residueNameOriginal = [], []
if smile:
molecule = Chem.MolFromSmiles(smile)
molecule = Chem.AddHs(molecule)
AllChem.EmbedMolecule(molecule,randomSeed=0xf00d)
else:
if ifile.lower().endswith('.pdb'):
molecule = Chem.MolFromPDBFile(ifile,removeHs=False)
atomsNameOriginal, residueNameOriginal = getAtomsNameAndMolFromPDB(ifile)
elif ifile.lower().endswith('.mol2'):
molecule = Chem.MolFromMol2File(ifile,removeHs=False)
atomsNameOriginal, residueNameOriginal = getAtomsNameAndMolFromMOL2(ifile)
elif ifile.lower().endswith('.mol'): molecule = Chem.MolFromMolFile(ifile,removeHs=False)
else:
babel = shutil.which("obabel")
if babel != None:
subprocess.run([babel, ifile, "-opdb", "-O", sfile], stdin =subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
molecule = Chem.MolFromPDBFile(sfile,removeHs=False)
else:
logger.error('obabel NOT found in the system')
logger.error('Input file format CAN NOT be process: '+ ifile)
logger.error('Please install open BABEL in your system or provide an input in PDB or MOL file')
exit()
atomsIndexLstWithRightOrder = buildProperOrderAtomIndexLst(molecule, molname, workdir)
molecule, newIndexToOriginalIndex = generateNewMoleculeWithProperOrder(molecule, atomsIndexLstWithRightOrder)
if smile: newIndexToOriginalIndex = {i:i for i in range(len(atomsIndexLstWithRightOrder))}
if debug:
Chem.MolToPDBFile(molecule, sfile)
if isAtomOrderWrong(molecule):
logger.error('ERROR in atom order!!!')
exit()
return molecule, newIndexToOriginalIndex, atomsNameOriginal, residueNameOriginal
def getAtomsNameAndMolFromPDB(ifile):
logger.info('Retreiving atom names form PDB:' + ifile)
atomsData = [[line[12:16], line[17:20]] for line in open(ifile) if line.startswith('ATOM') or line.startswith('HETATM')]
atomsName = [item[0] for item in atomsData]
resName = atomsData[0][1]
return atomsName, resName
def guessFormalMoleculeCharge(moleculeRDkit, userCharge):
"""
Guess formal charge of the input molecules or keep original user defined charge
Parameters
----------
moleculeRDkit : rdkit molecule object
RDkit molecule object
userCharge : int
Charge defined by the user
Returns
-------
int
Final charge of the molecule
"""
autoCharge = Chem.rdmolops.GetFormalCharge(moleculeRDkit)
if userCharge == None:
userCharge = autoCharge
logger.info('AUTO DETECTED CHARGE: ' + str(autoCharge))
else:
if autoCharge != userCharge:
logger.warning('AUTO DETECTED CHARGE: ' + str(autoCharge) + ' USER INPUT CHARGE: ' + str(userCharge) + ' (NOT MATCHING CHARGES)')
logger.info('MOLECULE CHARGE USED: ' + str(userCharge))
return userCharge
def guessResname(residueNameOriginal, userResname):
"""
Define the residue Name if user is not defining it
Parameters
----------
residueNameOriginal : str
Residue name found in input file (if there is one)
userResname : str
Residue name defined by user
Returns
-------
str
Residue name
"""
if userResname == None:
if len(residueNameOriginal) != 0: userResname = residueNameOriginal
else: userResname = 'MOL'
logger.info('AUTO ASSIGNED RESIDUE NAME: '+ userResname)
else: logger.info('RESIDUE NAME USED: '+ userResname)
return userResname[:3]
def guessMoleculeName(ifileName, userMolName, label):
"""
Guess the molecule from the inputs
Parameters
----------
ifileName : str
Input file name
userMolName : str
Input molecule name
label : str
label to be added to the molecule name (such as A or B to have mol_A or mol_B)
Returns
-------
str
Final generated molecule name
"""
if userMolName == None:
if ifileName == None: userMolName = 'mol_' + label
else: userMolName = os.path.splitext(os.path.basename(ifileName))[0]
logger.info('AUTO ASSIGNED MOLECULE NAME: '+ userMolName)
else:
if label != 'A': userMolName = userMolName + '_B'
logger.info('MOLECULE NAME USED: '+ userMolName)
return userMolName
def guessChargeAlgorithm(userChargeAlgorithm, charge):
"""
Define the charge method applied to generate the molecular charges. CM1A-LBCC just valid
for neutral molecules.
Parameters
----------
userChargeAlgorithm : str
Charge method defined by the user (CM1A or CM1A-LBCC)
charge : int
Formal charge of the input molecule
Returns
-------
str
Charge method (CM1A or CM1A-LBCC)
"""
if userChargeAlgorithm == None:
if charge == 0: userChargeAlgorithm = 'CM1A-LBCC'
else: userChargeAlgorithm = 'CM1A'
logger.info('AUTO DETECTED CHARGE ALGORITHM: ' + userChargeAlgorithm)
else:
if charge != 0 and userChargeAlgorithm == 'CM1A-LBCC':
userChargeAlgorithm = 'CM1A'
logger.warning('INCOMPATIBLE CHARGE ALGORTIHM CM1A-LBCC with charged molecules')
logger.warning('AUTO ASSIGNED CM1A charge model')
logger.info('CHARGE ALGORITHM USED: ' + userChargeAlgorithm)
return userChargeAlgorithm
def getAtomsNameAndMolFromMOL2(ifile):
logger.info('Retreiving atom names form MOL:' + ifile)
with open(ifile) as f: data = f.read()
atomsDat = re.search(r'@<TRIPOS>ATOM(.*?)@<TRIPOS>BOND', data, re.DOTALL).group().splitlines()[1:-1]
atomsData = [[line[8:12], line[59:62]] for line in atomsDat]
atomsName = [item[0] for item in atomsData]
resName = atomsData[0][1]
return atomsName, resName
def getClosestAtomToTheCenterOfMassIndex(molecule):
"""
Get closest atom to the center of mass
Parameters
----------
molecule : RDkit molecule
RDkit molecule
Returns
-------
atomClosestToCenterOfMass : int
Index of the atom closest to the center
"""
conf = molecule.GetConformer(0)
comPoint = Chem.rdMolTransforms.ComputeCentroid(conf)
distances = []
for atom in molecule.GetAtoms():
if atom.GetSymbol() == 'H': continue # Skip hydrogens
atom_position = conf.GetAtomPosition(atom.GetIdx())
distances.append([geometry.getDistance([atom_position.x, atom_position.y, atom_position.z], comPoint), atom])
atomClosestToCenterOfMass = sorted(distances, key=lambda x: x[0])[0]
logger.info('Center of mass position: %6.3f %6.3f %6.3f' % (comPoint.x, comPoint.y, comPoint.z))
logger.info('Closest atom to the center of mass: ' + atomClosestToCenterOfMass[1].GetSymbol()+ ' ' +str(atomClosestToCenterOfMass[1].GetIdx())
+ ' with distance : %6.3f ' % atomClosestToCenterOfMass[0] + ' (A) ')
return atomClosestToCenterOfMass[1].GetIdx()
def generateNewMoleculeWithProperOrder(molecule, atomsIndexLstWithRightOrder):
"""
Regenerate the same molecule but using the right atoms order
Parameters
----------
molecule : RDkit molecule
RDkit molecule with the original atom order
atomsIndexLstWithRightOrder : List
Index atoms list with the right atom order
Returns
-------
newMolecule : RDkit molecule
New RDkit molecule with right atom order for zmat
newIndexToOriginalIndex : Dict
Dictionary with original atom order to new right atom order correspondency
"""
conf = molecule.GetConformer(0)
newAtomLabels = {}
newMoleculeBlock = ''
newIndexToOriginalIndex = {}
for i, item in enumerate(atomsIndexLstWithRightOrder):
newIndexToOriginalIndex[i] = item
atom = molecule.GetAtomWithIdx(item)
atom_position = conf.GetAtomPosition(item)
try:
residueInfo = atom.GetPDBResidueInfo()
resname = residueInfo.GetResidueName()
atomName = residueInfo.GetName()
except:
element = atom.GetSymbol()
if not element in newAtomLabels.keys(): newAtomLabels[element] = 1
else: newAtomLabels[element] += 1
atomName = element + str(newAtomLabels[element])
resname = 'MOL'
line = 'ATOM%7d%5s%4s%6d%12.3f%8.3f%8.3f 1.00 0.00%12s \n' % (i, atomName, resname, 1,atom_position.x,
atom_position.y, atom_position.z, atom.GetSymbol())
newMoleculeBlock += line
newMolecule = Chem.MolFromPDBBlock(newMoleculeBlock, removeHs=False)
return newMolecule, newIndexToOriginalIndex
def isAtomOrderWrong(moleculeA):
"""
Check if atoms in the molecule are in a proper order.
So, each atom position can be generated from previous atoms.
Parameters
----------
moleculeA : RDkit molecule
An RDkit molecule object
Returns
-------
Bool
True if there is a wrong order.
"""
bonds = sorted([sorted([x.GetBeginAtomIdx(), x.GetEndAtomIdx()],reverse=True) for x in moleculeA.GetBonds()],key=lambda x: x[0])
bondsAtomA = list(dict.fromkeys([bond[0] for bond in bonds]))
missingAtomsInBondLst = set(range(min(bondsAtomA), max(bondsAtomA)+1)) - set(bondsAtomA)
if len(missingAtomsInBondLst) >0: return True
else: return False
def fixNonIntegerCharge(molecule):
sum = np.sum([atom.charge for atom in molecule.atoms[molecule.numberOfStructuralDummyAtoms:]])
diff = np.round(sum) - sum
if diff>1e-7 or diff<-1e-7: molecule.atoms[-1].charge += diff
sum = np.sum([atom.charge for atom in molecule.atoms[molecule.numberOfStructuralDummyAtoms:]])
def buildProperOrderAtomIndexLst(molecule, molname, workdir):
"""
Generate atoms index list using the closest atom the centre of mass as the first one and considering
that atoms have to be generated from the previous atoms positions (Zmat condition)
Parameters
----------
molecule : RDkit molecule
Rdkit molecule object
molname : str
Molecule name
workdir : str
Working folder
Returns
-------
List
Atoms index List in the right order
"""
heavyAtomsIndexLst = [atom.GetIdx() for atom in molecule.GetAtoms() if atom.GetSymbol() != 'H']
firstAtomIndex = getClosestAtomToTheCenterOfMassIndex(molecule)
atomsIndexLstWithRightOrder = [firstAtomIndex]
addAtomsToTheList(molecule, atomsIndexLstWithRightOrder, heavyAtomsIndexLst)
hydrogenAtomsIndexLst = [atom.GetIdx() for atom in molecule.GetAtoms() if atom.GetSymbol() == 'H']
addAtomsToTheList(molecule, atomsIndexLstWithRightOrder, hydrogenAtomsIndexLst)
return atomsIndexLstWithRightOrder
def addAtomsToTheList(molecule, atomsIndexLst, heavyAtomsIndexLst):
"""
Add atoms to the list using recursivity
Parameters
----------
molecule : RDkit molecule
RDkit molecule
atomsIndexLst : list
Atoms index list
heavyAtomsIndexLst : list
heavy atoms index
"""
missingAtomsToAdd = []
for atomIndex in heavyAtomsIndexLst:
atom = molecule.GetAtomWithIdx(atomIndex)
neighborsIndex = [x.GetIdx() for x in atom.GetNeighbors()]
if atomIndex not in atomsIndexLst:
if any([True for neighIndex in neighborsIndex if neighIndex in atomsIndexLst]):
atomsIndexLst.append(atom.GetIdx())
else:
missingAtomsToAdd.append(atom.GetIdx())
if len(missingAtomsToAdd)==0: return
else: addAtomsToTheList(molecule, atomsIndexLst, heavyAtomsIndexLst)
def generateLogger(filename, workdir):
FORMATTER = logging.Formatter("%(asctime)s — %(levelname)s — %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
# %(funcName)s
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
LOG_FILE = os.path.join(workdir, filename+'.log')
file_handler = logging.FileHandler(LOG_FILE,'w')
file_handler.setFormatter(FORMATTER)
logger = logging.getLogger(__name__)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
return logger
@contextlib.contextmanager
def changedir(dirname):
"""
Context manager to change temporally the working folder
Parameters
----------
dirname : str
Folder to work
"""
currentdir = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(currentdir) | [
"logging.getLogger",
"rdkit.Chem.MolFromMol2File",
"logging.StreamHandler",
"numpy.array",
"rdkit.Chem.MolFromPDBBlock",
"rdkit.Chem.rdMolTransforms.TransformConformer",
"re.search",
"os.path.exists",
"rdkit.Chem.MolFromPDBFile",
"rdkit.Chem.rdMolTransforms.ComputeCentroid",
"subprocess.run",
... | [((262, 289), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (279, 289), False, 'import logging\n'), ((1767, 1794), 'os.path.abspath', 'os.path.abspath', (['folderName'], {}), '(folderName)\n', (1782, 1794), False, 'import os\n'), ((2694, 2739), 'os.path.join', 'os.path.join', (['workdir', "(molname + '-debug.pdb')"], {}), "(workdir, molname + '-debug.pdb')\n", (2706, 2739), False, 'import os\n'), ((5383, 5427), 'rdkit.Chem.rdmolops.GetFormalCharge', 'Chem.rdmolops.GetFormalCharge', (['moleculeRDkit'], {}), '(moleculeRDkit)\n', (5412, 5427), False, 'from rdkit import Chem\n'), ((9146, 9188), 'rdkit.Chem.rdMolTransforms.ComputeCentroid', 'Chem.rdMolTransforms.ComputeCentroid', (['conf'], {}), '(conf)\n', (9182, 9188), False, 'from rdkit import Chem\n'), ((11568, 11622), 'rdkit.Chem.MolFromPDBBlock', 'Chem.MolFromPDBBlock', (['newMoleculeBlock'], {'removeHs': '(False)'}), '(newMoleculeBlock, removeHs=False)\n', (11588, 11622), False, 'from rdkit import Chem\n'), ((12434, 12527), 'numpy.sum', 'np.sum', (['[atom.charge for atom in molecule.atoms[molecule.numberOfStructuralDummyAtoms:]\n ]'], {}), '([atom.charge for atom in molecule.atoms[molecule.\n numberOfStructuralDummyAtoms:]])\n', (12440, 12527), True, 'import numpy as np\n'), ((12633, 12726), 'numpy.sum', 'np.sum', (['[atom.charge for atom in molecule.atoms[molecule.numberOfStructuralDummyAtoms:]\n ]'], {}), '([atom.charge for atom in molecule.atoms[molecule.\n numberOfStructuralDummyAtoms:]])\n', (12639, 12726), True, 'import numpy as np\n'), ((14739, 14835), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s — %(levelname)s — %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "('%(asctime)s — %(levelname)s — %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (14756, 14835), False, 'import logging\n'), ((14884, 14907), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (14905, 14907), False, 'import logging\n'), ((14968, 15008), 'os.path.join', 'os.path.join', (['workdir', "(filename + '.log')"], {}), "(workdir, filename + '.log')\n", (14980, 15008), False, 'import os\n'), ((15026, 15060), 'logging.FileHandler', 'logging.FileHandler', (['LOG_FILE', '"""w"""'], {}), "(LOG_FILE, 'w')\n", (15045, 15060), False, 'import logging\n'), ((15116, 15143), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (15133, 15143), False, 'import logging\n'), ((15492, 15503), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15501, 15503), False, 'import os\n'), ((1068, 1173), 'numpy.array', 'np.array', (['[[1, 0, 0, -shiftX], [0, 1, 0, -shiftY], [0, 0, 1, -shiftZ], [0, 0, 0, 1]]'], {'dtype': 'np.double'}), '([[1, 0, 0, -shiftX], [0, 1, 0, -shiftY], [0, 0, 1, -shiftZ], [0, 0,\n 0, 1]], dtype=np.double)\n', (1076, 1173), True, 'import numpy as np\n'), ((1301, 1360), 'rdkit.Chem.rdMolTransforms.TransformConformer', 'rdMolTransforms.TransformConformer', (['conf', 'translationMatrix'], {}), '(conf, translationMatrix)\n', (1335, 1360), False, 'from rdkit.Chem import rdMolTransforms\n'), ((1803, 1814), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1812, 1814), False, 'import os\n'), ((1844, 1873), 'os.path.exists', 'os.path.exists', (['newfolderName'], {}), '(newfolderName)\n', (1858, 1873), False, 'import os\n'), ((1941, 1967), 'os.makedirs', 'os.makedirs', (['newfolderName'], {}), '(newfolderName)\n', (1952, 1967), False, 'import os\n'), ((2827, 2852), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (2845, 2852), False, 'from rdkit import Chem\n'), ((2872, 2892), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['molecule'], {}), '(molecule)\n', (2882, 2892), False, 'from rdkit import Chem\n'), ((2901, 2950), 'rdkit.Chem.AllChem.EmbedMolecule', 'AllChem.EmbedMolecule', (['molecule'], {'randomSeed': '(61453)'}), '(molecule, randomSeed=61453)\n', (2922, 2950), False, 'from rdkit.Chem import AllChem\n'), ((4419, 4453), 'rdkit.Chem.MolToPDBFile', 'Chem.MolToPDBFile', (['molecule', 'sfile'], {}), '(molecule, sfile)\n', (4436, 4453), False, 'from rdkit import Chem\n'), ((12535, 12548), 'numpy.round', 'np.round', (['sum'], {}), '(sum)\n', (12543, 12548), True, 'import numpy as np\n'), ((15529, 15546), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (15537, 15546), False, 'import os\n'), ((15583, 15603), 'os.chdir', 'os.chdir', (['currentdir'], {}), '(currentdir)\n', (15591, 15603), False, 'import os\n'), ((1875, 1923), 'shutil.rmtree', 'shutil.rmtree', (['newfolderName'], {'ignore_errors': '(True)'}), '(newfolderName, ignore_errors=True)\n', (1888, 1923), False, 'import shutil\n'), ((3043, 3085), 'rdkit.Chem.MolFromPDBFile', 'Chem.MolFromPDBFile', (['ifile'], {'removeHs': '(False)'}), '(ifile, removeHs=False)\n', (3062, 3085), False, 'from rdkit import Chem\n'), ((3256, 3299), 'rdkit.Chem.MolFromMol2File', 'Chem.MolFromMol2File', (['ifile'], {'removeHs': '(False)'}), '(ifile, removeHs=False)\n', (3276, 3299), False, 'from rdkit import Chem\n'), ((3444, 3486), 'rdkit.Chem.MolFromMolFile', 'Chem.MolFromMolFile', (['ifile'], {'removeHs': '(False)'}), '(ifile, removeHs=False)\n', (3463, 3486), False, 'from rdkit import Chem\n'), ((3521, 3543), 'shutil.which', 'shutil.which', (['"""obabel"""'], {}), "('obabel')\n", (3533, 3543), False, 'import shutil\n'), ((7047, 7074), 'os.path.basename', 'os.path.basename', (['ifileName'], {}), '(ifileName)\n', (7063, 7074), False, 'import os\n'), ((3592, 3719), 'subprocess.run', 'subprocess.run', (["[babel, ifile, '-opdb', '-O', sfile]"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([babel, ifile, '-opdb', '-O', sfile], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", (3606, 3719), False, 'import subprocess\n'), ((3745, 3787), 'rdkit.Chem.MolFromPDBFile', 'Chem.MolFromPDBFile', (['sfile'], {'removeHs': '(False)'}), '(sfile, removeHs=False)\n', (3764, 3787), False, 'from rdkit import Chem\n'), ((8521, 8582), 're.search', 're.search', (['"""@<TRIPOS>ATOM(.*?)@<TRIPOS>BOND"""', 'data', 're.DOTALL'], {}), "('@<TRIPOS>ATOM(.*?)@<TRIPOS>BOND', data, re.DOTALL)\n", (8530, 8582), False, 'import re\n')] |
import argparse
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import PIL
import torch
import torch.optim as optim
import torchvision
from PIL import Image
from torch import nn
from torch.nn import functional as F
from models.vanilla_vae_q import QuaternionVanillaVAE
from models.vanilla_vae import VanillaVAE
#pylint:disable=E1101
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_num', type=int, default=4)
parser.add_argument('--QVAE', type=bool, default=False, help="VAE or QVAE model to use for generation.")
parser.add_argument('--n_channels', type=int, default=3, help="3 for real-valued inputs, 4 for quaternion-valued inputs.")
parser.add_argument('--latent_dim', type=int, default=100, help="Dimension of the latent space.")
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--image_size', type=int, default=64)
parser.add_argument('--num_samples', type=int, default=8, help="Number of samples to generate.")
parser.add_argument('--root_dir', type=str, default='../Datasets/img_align_celeba/test')
opt = parser.parse_args()
if opt.QVAE:
opt.n_channels = 4
##### DATASET #####
# Perepare the CelebA dataset
class CelebaDataset(torch.utils.data.Dataset):
def __init__(self, root_dir, im_name_list, resize_dim, transform=None):
self.root_dir = root_dir
self.im_list = im_name_list
self.resize_dim = resize_dim
self.transform = transform
def __len__(self):
return len(self.im_list)
def __getitem__(self, idx):
im = Image.open(os.path.join(self.root_dir, self.im_list[idx])).resize(self.resize_dim, resample=PIL.Image.NEAREST)
im = np.array(im)
im = im / 255
if self.transform:
im = self.transform(im)
if opt.QVAE:
# Manipulation for quaternion net
npad = ((1, 0), (0, 0), (0, 0))
im = np.pad(im, pad_width=npad, mode='constant', constant_values=0)
return im
celeba_dataset = CelebaDataset(opt.root_dir, os.listdir(opt.root_dir), (opt.image_size, opt.image_size),
torchvision.transforms.Compose([torchvision.transforms.ToTensor()]))
test_loader = torch.utils.data.DataLoader(celeba_dataset, batch_size=len(celeba_dataset), shuffle=False)
def save_image_single(img, filename):
'''Save image with given filename.'''
img = img.detach().cpu().numpy()
img = img.transpose(1, 2, 0)
plt.imsave(filename, img)
def generate_new_samples(model, num_samples):
'''Sample from a Gaussian N(0,1) and then generates new samples.
Take model and number of samples to generate in input and store generated images.'''
model.eval()
image = model.sample(num_samples=num_samples, current_device=opt.gpu_num)
for idx, i in enumerate(image):
if opt.QVAE:
save_image_single(i[1:, :, :], "RESULTS_EVALUATION/gen_test_MidQVAE_linearlayers/%i.png" %idx)
else:
save_image_single(i, "RESULTS_EVALUATION/gen_test_VAE_kldi/%i.png" %idx)
def compare_reconstructions(model):
'''Reconstruct and store images given in input from the test set.'''
model.eval()
for data in test_loader:
data = data.type(torch.FloatTensor).to(device)
image = model.generate(x=data)
for idx, i in enumerate(image):
if opt.QVAE:
save_image_single(i[1:, :, :], "RESULTS_EVALUATION/recons_test_MidQVAE_linearlayers/%i.png" %idx)
else:
save_image_single(i, "RESULTS_EVALUATION/recons_test_VAE_kldi/%i.png" %idx)
if opt.QVAE:
model = QuaternionVanillaVAE(in_channels=opt.n_channels, latent_dim=opt.latent_dim)
else:
model = VanillaVAE(in_channels=opt.n_channels, latent_dim=opt.latent_dim)
if opt.cuda:
torch.cuda.set_device(opt.gpu_num)
device = "cuda:%i" %opt.gpu_num
model.cuda()
else:
device = "cpu"
# Load model state dictionary
if opt.QVAE:
model.load_state_dict(torch.load("checkpoints/model_midqvae_newloss_epoch49"))
else:
model.load_state_dict(torch.load("checkpoints/model_vae_nobn_kldi_epoch49"))
generate_new_samples(model, opt.num_samples)
compare_reconstructions(model)
| [
"os.listdir",
"argparse.ArgumentParser",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.imsave",
"torch.load",
"models.vanilla_vae.VanillaVAE",
"os.path.join",
"numpy.array",
"models.vanilla_vae_q.QuaternionVanillaVAE",
"numpy.pad",
"torch.cuda.set_device"
] | [((371, 396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (394, 396), False, 'import argparse\n'), ((2050, 2074), 'os.listdir', 'os.listdir', (['opt.root_dir'], {}), '(opt.root_dir)\n', (2060, 2074), False, 'import os\n'), ((2473, 2498), 'matplotlib.pyplot.imsave', 'plt.imsave', (['filename', 'img'], {}), '(filename, img)\n', (2483, 2498), True, 'import matplotlib.pyplot as plt\n'), ((3637, 3712), 'models.vanilla_vae_q.QuaternionVanillaVAE', 'QuaternionVanillaVAE', ([], {'in_channels': 'opt.n_channels', 'latent_dim': 'opt.latent_dim'}), '(in_channels=opt.n_channels, latent_dim=opt.latent_dim)\n', (3657, 3712), False, 'from models.vanilla_vae_q import QuaternionVanillaVAE\n'), ((3731, 3796), 'models.vanilla_vae.VanillaVAE', 'VanillaVAE', ([], {'in_channels': 'opt.n_channels', 'latent_dim': 'opt.latent_dim'}), '(in_channels=opt.n_channels, latent_dim=opt.latent_dim)\n', (3741, 3796), False, 'from models.vanilla_vae import VanillaVAE\n'), ((3814, 3848), 'torch.cuda.set_device', 'torch.cuda.set_device', (['opt.gpu_num'], {}), '(opt.gpu_num)\n', (3835, 3848), False, 'import torch\n'), ((1686, 1698), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1694, 1698), True, 'import numpy as np\n'), ((4001, 4056), 'torch.load', 'torch.load', (['"""checkpoints/model_midqvae_newloss_epoch49"""'], {}), "('checkpoints/model_midqvae_newloss_epoch49')\n", (4011, 4056), False, 'import torch\n'), ((4090, 4143), 'torch.load', 'torch.load', (['"""checkpoints/model_vae_nobn_kldi_epoch49"""'], {}), "('checkpoints/model_vae_nobn_kldi_epoch49')\n", (4100, 4143), False, 'import torch\n'), ((1923, 1985), 'numpy.pad', 'np.pad', (['im'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(im, pad_width=npad, mode='constant', constant_values=0)\n", (1929, 1985), True, 'import numpy as np\n'), ((2174, 2207), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (2205, 2207), False, 'import torchvision\n'), ((1573, 1619), 'os.path.join', 'os.path.join', (['self.root_dir', 'self.im_list[idx]'], {}), '(self.root_dir, self.im_list[idx])\n', (1585, 1619), False, 'import os\n')] |
# $Id$
#
# Copyright (C) 2008-2011 <NAME>
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Chem
from rdkit import RDConfig
import numpy
import math
import sys
import copy
import pprint
from rdkit.six import cmp
periodicTable = Chem.GetPeriodicTable()
class Font(object):
face = 'sans'
size = '12'
weight = 'normal'
name = None
def __init__(self, face=None, size=None, name=None, weight=None):
if face:
self.face = face
if size:
self.size = size
if name:
self.name = name
if weight:
self.weight = weight
class DrawingOptions(object):
dotsPerAngstrom = 30
useFraction = 0.85
atomLabelFontFace = "sans"
atomLabelFontSize = 12
atomLabelMinFontSize = 7
atomLabelDeuteriumTritium = False
bondLineWidth = 1.2
dblBondOffset = .25
dblBondLengthFrac = .8
defaultColor = (1, 0, 0)
selectColor = (1, 0, 0)
bgColor = (1, 1, 1)
colorBonds = True
noCarbonSymbols = True
includeAtomNumbers = False
atomNumberOffset = 0
radicalSymbol = u'\u2219'
dash = (4, 4)
wedgeDashedBonds = True
showUnknownDoubleBonds = True
# used to adjust overall scaling for molecules that have been laid out with non-standard
# bond lengths
coordScale = 1.0
elemDict = {
1: (0.55, 0.55, 0.55),
7: (0, 0, 1),
8: (1, 0, 0),
9: (.2, .8, .8),
15: (1, .5, 0),
16: (.8, .8, 0),
17: (0, .8, 0),
35: (.5, .3, .1),
53: (.63, .12, .94),
0: (.5, .5, .5),
}
class MolDrawing(object):
atomPs = None
canvas = None
canvasSize = None
def __init__(self, canvas=None, drawingOptions=None):
self.canvas = canvas
if canvas:
self.canvasSize = canvas.size
self.atomPs = {}
if drawingOptions is None:
self.drawingOptions = DrawingOptions()
else:
self.drawingOptions = drawingOptions
self.boundingBoxes = {}
if self.drawingOptions.bgColor is not None:
self.canvas.addCanvasPolygon(((0, 0), (canvas.size[0], 0), (canvas.size[0], canvas.size[1]),
(0, canvas.size[1])), color=self.drawingOptions.bgColor,
fill=True, stroke=False)
def transformPoint(self, pos):
res = [0, 0]
res[0] = (pos[0] + self.molTrans[0]
) * self.currDotsPerAngstrom * self.drawingOptions.useFraction + self.drawingTrans[0]
res[1] = self.canvasSize[1]-((pos[1] + self.molTrans[1])*self.currDotsPerAngstrom*self.drawingOptions.useFraction + \
self.drawingTrans[1])
return res
def _getBondOffset(self, p1, p2):
# get the vector between the points:
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
# figure out the angle and the perpendicular:
ang = math.atan2(dy, dx)
perp = ang + math.pi / 2.
# here's the offset for the parallel bond:
offsetX = math.cos(perp) * self.drawingOptions.dblBondOffset * self.currDotsPerAngstrom
offsetY = math.sin(perp) * self.drawingOptions.dblBondOffset * self.currDotsPerAngstrom
return perp, offsetX, offsetY
def _getOffsetBondPts(self, p1, p2, offsetX, offsetY, lenFrac=None):
if not lenFrac:
lenFrac = self.drawingOptions.dblBondLengthFrac
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
# ----
# now figure out where to start and end it:
# offset the start point:
fracP1 = p1[0] + offsetX, p1[1] + offsetY
# now move a portion of the way along the line to the neighbor:
frac = (1. - lenFrac) / 2
fracP1 = fracP1[0]+dx*frac,\
fracP1[1]+dy*frac
fracP2 = fracP1[0]+dx*lenFrac,\
fracP1[1]+dy*lenFrac
return fracP1, fracP2
def _offsetDblBond(self, p1, p2, bond, a1, a2, conf, dir=1, lenFrac=None):
perp, offsetX, offsetY = self._getBondOffset(p1, p2)
offsetX = offsetX * dir
offsetY = offsetY * dir
# if we're a ring bond, we may need to flip over to the other side:
if bond.IsInRing():
bondIdx = bond.GetIdx()
a1Idx = a1.GetIdx()
a2Idx = a2.GetIdx()
# find a ring bond from a1 to an atom other than a2:
for otherBond in a1.GetBonds():
if otherBond.GetIdx()!=bondIdx and \
otherBond.IsInRing():
sharedRing = False
for ring in self.bondRings:
if bondIdx in ring and otherBond.GetIdx() in ring:
sharedRing = True
break
if not sharedRing:
continue
a3 = otherBond.GetOtherAtom(a1)
if a3.GetIdx() != a2Idx:
p3 = self.transformPoint(
conf.GetAtomPosition(a3.GetIdx()) * self.drawingOptions.coordScale)
dx2 = p3[0] - p1[0]
dy2 = p3[1] - p1[1]
dotP = dx2 * offsetX + dy2 * offsetY
if dotP < 0:
perp += math.pi
offsetX = math.cos(
perp) * self.drawingOptions.dblBondOffset * self.currDotsPerAngstrom
offsetY = math.sin(
perp) * self.drawingOptions.dblBondOffset * self.currDotsPerAngstrom
fracP1, fracP2 = self._getOffsetBondPts(p1, p2, offsetX, offsetY, lenFrac=lenFrac)
return fracP1, fracP2
def _getBondAttachmentCoordinates(self, p1, p2, labelSize):
newpos = [None, None]
if labelSize != None:
labelSizeOffset = [labelSize[0][0] / 2 + (cmp(p2[0], p1[0]) * labelSize[0][2]),
labelSize[0][1] / 2]
if p1[1] == p2[1]:
newpos[0] = p1[0] + cmp(p2[0], p1[0]) * labelSizeOffset[0]
else:
if abs(labelSizeOffset[1] * (p2[0] - p1[0]) / (p2[1] - p1[1])) < labelSizeOffset[0]:
newpos[0] = p1[0] + cmp(p2[0], p1[0]) * abs(labelSizeOffset[1] * (p2[0] - p1[0]) /
(p2[1] - p1[1]))
else:
newpos[0] = p1[0] + cmp(p2[0], p1[0]) * labelSizeOffset[0]
if p1[0] == p2[0]:
newpos[1] = p1[1] + cmp(p2[1], p1[1]) * labelSizeOffset[1]
else:
if abs(labelSizeOffset[0] * (p1[1] - p2[1]) / (p2[0] - p1[0])) < labelSizeOffset[1]:
newpos[1] = p1[1] + cmp(p2[1], p1[1]) * abs(labelSizeOffset[0] * (p1[1] - p2[1]) /
(p2[0] - p1[0]))
else:
newpos[1] = p1[1] + cmp(p2[1], p1[1]) * labelSizeOffset[1]
else:
newpos = copy.deepcopy(p1)
return newpos
def _drawWedgedBond(self, bond, pos, nbrPos, width=None, color=None, dash=None):
if width is None:
width = self.drawingOptions.bondLineWidth
if color is None:
color = self.drawingOptions.defaultColor
perp, offsetX, offsetY = self._getBondOffset(pos, nbrPos)
offsetX *= .75
offsetY *= .75
poly = ((pos[0], pos[1]), (nbrPos[0] + offsetX, nbrPos[1] + offsetY),
(nbrPos[0] - offsetX, nbrPos[1] - offsetY))
#canvas.drawPolygon(poly,edgeColor=color,edgeWidth=1,fillColor=color,closed=1)
if not dash:
self.canvas.addCanvasPolygon(poly, color=color)
elif self.drawingOptions.wedgeDashedBonds and self.canvas.addCanvasDashedWedge:
self.canvas.addCanvasDashedWedge(poly[0], poly[1], poly[2], color=color)
else:
self.canvas.addCanvasLine(pos, nbrPos, linewidth=width * 2, color=color, dashes=dash)
def _drawBond(self, bond, atom, nbr, pos, nbrPos, conf, width=None, color=None, color2=None,
labelSize1=None, labelSize2=None):
if width is None:
width = self.drawingOptions.bondLineWidth
if color is None:
color = self.drawingOptions.defaultColor
p1_raw = copy.deepcopy(pos)
p2_raw = copy.deepcopy(nbrPos)
newpos = self._getBondAttachmentCoordinates(p1_raw, p2_raw, labelSize1)
newnbrPos = self._getBondAttachmentCoordinates(p2_raw, p1_raw, labelSize2)
bType = bond.GetBondType()
if bType == Chem.BondType.SINGLE:
bDir = bond.GetBondDir()
if bDir in (Chem.BondDir.BEGINWEDGE, Chem.BondDir.BEGINDASH):
# if the bond is "backwards", change the drawing direction:
if bond.GetBeginAtom().GetChiralTag() in (Chem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.ChiralType.CHI_TETRAHEDRAL_CCW):
p1, p2 = newpos, newnbrPos
wcolor = color
else:
p2, p1 = newpos, newnbrPos
if color2 is not None:
wcolor = color2
else:
wcolor = self.drawingOptions.defaultColor
if bDir == Chem.BondDir.BEGINWEDGE:
self._drawWedgedBond(bond, p1, p2, color=wcolor, width=width)
elif bDir == Chem.BondDir.BEGINDASH:
self._drawWedgedBond(bond, p1, p2, color=wcolor, width=width,
dash=self.drawingOptions.dash)
else:
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2)
elif bType == Chem.BondType.DOUBLE:
crossBond = (self.drawingOptions.showUnknownDoubleBonds and \
bond.GetStereo() == Chem.BondStereo.STEREOANY)
if not crossBond and \
( bond.IsInRing() or (atom.GetDegree()!=1 and bond.GetOtherAtom(atom).GetDegree()!=1) ):
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2)
fp1, fp2 = self._offsetDblBond(newpos, newnbrPos, bond, atom, nbr, conf)
self.canvas.addCanvasLine(fp1, fp2, linewidth=width, color=color, color2=color2)
else:
fp1, fp2 = self._offsetDblBond(newpos, newnbrPos, bond, atom, nbr, conf, dir=.5,
lenFrac=1.0)
fp3, fp4 = self._offsetDblBond(newpos, newnbrPos, bond, atom, nbr, conf, dir=-.5,
lenFrac=1.0)
if crossBond:
fp2, fp4 = fp4, fp2
self.canvas.addCanvasLine(fp1, fp2, linewidth=width, color=color, color2=color2)
self.canvas.addCanvasLine(fp3, fp4, linewidth=width, color=color, color2=color2)
elif bType == Chem.BondType.AROMATIC:
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2)
fp1, fp2 = self._offsetDblBond(newpos, newnbrPos, bond, atom, nbr, conf)
self.canvas.addCanvasLine(fp1, fp2, linewidth=width, color=color, color2=color2,
dash=self.drawingOptions.dash)
elif bType == Chem.BondType.TRIPLE:
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2)
fp1, fp2 = self._offsetDblBond(newpos, newnbrPos, bond, atom, nbr, conf)
self.canvas.addCanvasLine(fp1, fp2, linewidth=width, color=color, color2=color2)
fp1, fp2 = self._offsetDblBond(newpos, newnbrPos, bond, atom, nbr, conf, dir=-1)
self.canvas.addCanvasLine(fp1, fp2, linewidth=width, color=color, color2=color2)
else:
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2,
dash=(1, 2))
def scaleAndCenter(self, mol, conf, coordCenter=False, canvasSize=None, ignoreHs=False):
if canvasSize is None:
canvasSize = self.canvasSize
xAccum = 0
yAccum = 0
minX = 1e8
minY = 1e8
maxX = -1e8
maxY = -1e8
nAts = mol.GetNumAtoms()
for i in range(nAts):
if ignoreHs and mol.GetAtomWithIdx(i).GetAtomicNum() == 1:
continue
pos = conf.GetAtomPosition(i) * self.drawingOptions.coordScale
xAccum += pos[0]
yAccum += pos[1]
minX = min(minX, pos[0])
minY = min(minY, pos[1])
maxX = max(maxX, pos[0])
maxY = max(maxY, pos[1])
dx = abs(maxX - minX)
dy = abs(maxY - minY)
xSize = dx * self.currDotsPerAngstrom
ySize = dy * self.currDotsPerAngstrom
if coordCenter:
molTrans = -xAccum / nAts, -yAccum / nAts
else:
molTrans = -(minX + (maxX - minX) / 2), -(minY + (maxY - minY) / 2)
self.molTrans = molTrans
if xSize >= .95 * canvasSize[0]:
scale = .9 * canvasSize[0] / xSize
xSize *= scale
ySize *= scale
self.currDotsPerAngstrom *= scale
self.currAtomLabelFontSize = max(self.currAtomLabelFontSize * scale,
self.drawingOptions.atomLabelMinFontSize)
if ySize >= .95 * canvasSize[1]:
scale = .9 * canvasSize[1] / ySize
xSize *= scale
ySize *= scale
self.currDotsPerAngstrom *= scale
self.currAtomLabelFontSize = max(self.currAtomLabelFontSize * scale,
self.drawingOptions.atomLabelMinFontSize)
drawingTrans = canvasSize[0] / 2, canvasSize[1] / 2
self.drawingTrans = drawingTrans
def _drawLabel(self, label, pos, baseOffset, font, color=None, **kwargs):
if color is None:
color = self.drawingOptions.defaultColor
x1 = pos[0]
y1 = pos[1]
labelP = x1, y1
labelSize = self.canvas.addCanvasText(label, (x1, y1, baseOffset), font, color, **kwargs)
return labelSize
def AddMol(self, mol, centerIt=True, molTrans=None, drawingTrans=None, highlightAtoms=[],
confId=-1, flagCloseContactsDist=2, highlightMap=None, ignoreHs=False,
highlightBonds=[], **kwargs):
"""Set the molecule to be drawn.
Parameters:
hightlightAtoms -- list of atoms to highlight (default [])
highlightMap -- dictionary of (atom, color) pairs (default None)
Notes:
- specifying centerIt will cause molTrans and drawingTrans to be ignored
"""
conf = mol.GetConformer(confId)
if 'coordScale' in kwargs:
self.drawingOptions.coordScale = kwargs['coordScale']
self.currDotsPerAngstrom = self.drawingOptions.dotsPerAngstrom
self.currAtomLabelFontSize = self.drawingOptions.atomLabelFontSize
if centerIt:
self.scaleAndCenter(mol, conf, ignoreHs=ignoreHs)
else:
if molTrans is None:
molTrans = (0, 0)
self.molTrans = molTrans
if drawingTrans is None:
drawingTrans = (0, 0)
self.drawingTrans = drawingTrans
font = Font(face=self.drawingOptions.atomLabelFontFace, size=self.currAtomLabelFontSize)
obds = None
if not mol.HasProp('_drawingBondsWedged'):
# this is going to modify the molecule, get ready to undo that
obds = [x.GetBondDir() for x in mol.GetBonds()]
Chem.WedgeMolBonds(mol, conf)
includeAtomNumbers = kwargs.get('includeAtomNumbers', self.drawingOptions.includeAtomNumbers)
self.atomPs[mol] = {}
self.boundingBoxes[mol] = [0] * 4
self.activeMol = mol
self.bondRings = mol.GetRingInfo().BondRings()
labelSizes = {}
for atom in mol.GetAtoms():
labelSizes[atom.GetIdx()] = None
if ignoreHs and atom.GetAtomicNum() == 1:
drawAtom = False
else:
drawAtom = True
idx = atom.GetIdx()
pos = self.atomPs[mol].get(idx, None)
if pos is None:
pos = self.transformPoint(conf.GetAtomPosition(idx) * self.drawingOptions.coordScale)
self.atomPs[mol][idx] = pos
if drawAtom:
self.boundingBoxes[mol][0] = min(self.boundingBoxes[mol][0], pos[0])
self.boundingBoxes[mol][1] = min(self.boundingBoxes[mol][1], pos[1])
self.boundingBoxes[mol][2] = max(self.boundingBoxes[mol][2], pos[0])
self.boundingBoxes[mol][3] = max(self.boundingBoxes[mol][3], pos[1])
if not drawAtom:
continue
nbrSum = [0, 0]
for bond in atom.GetBonds():
nbr = bond.GetOtherAtom(atom)
if ignoreHs and nbr.GetAtomicNum() == 1:
continue
nbrIdx = nbr.GetIdx()
if nbrIdx > idx:
nbrPos = self.atomPs[mol].get(nbrIdx, None)
if nbrPos is None:
nbrPos = self.transformPoint(
conf.GetAtomPosition(nbrIdx) * self.drawingOptions.coordScale)
self.atomPs[mol][nbrIdx] = nbrPos
self.boundingBoxes[mol][0] = min(self.boundingBoxes[mol][0], nbrPos[0])
self.boundingBoxes[mol][1] = min(self.boundingBoxes[mol][1], nbrPos[1])
self.boundingBoxes[mol][2] = max(self.boundingBoxes[mol][2], nbrPos[0])
self.boundingBoxes[mol][3] = max(self.boundingBoxes[mol][3], nbrPos[1])
else:
nbrPos = self.atomPs[mol][nbrIdx]
nbrSum[0] += nbrPos[0] - pos[0]
nbrSum[1] += nbrPos[1] - pos[1]
iso = atom.GetIsotope()
labelIt= not self.drawingOptions.noCarbonSymbols or \
atom.GetAtomicNum()!=6 or \
atom.GetFormalCharge()!=0 or \
atom.GetNumRadicalElectrons() or \
includeAtomNumbers or \
iso or \
atom.HasProp('molAtomMapNumber') or \
atom.GetDegree()==0
orient = ''
if labelIt:
baseOffset = 0
if includeAtomNumbers:
symbol = str(atom.GetIdx())
symbolLength = len(symbol)
else:
base = atom.GetSymbol()
if (base == 'H' and (iso == 2 or iso == 3) and
self.drawingOptions.atomLabelDeuteriumTritium):
if (iso == 2):
base = 'D'
else:
base = 'T'
iso = 0
symbolLength = len(base)
if not atom.HasQuery():
nHs = atom.GetTotalNumHs()
else:
nHs = 0
if nHs > 0:
if nHs > 1:
hs = 'H<sub>%d</sub>' % nHs
symbolLength += 1 + len(str(nHs))
else:
hs = 'H'
symbolLength += 1
else:
hs = ''
chg = atom.GetFormalCharge()
if chg != 0:
if chg == 1:
chg = '+'
elif chg == -1:
chg = '-'
elif chg > 1:
chg = '+%d' % chg
elif chg < -1:
chg = '-%d' % chg
symbolLength += len(chg)
else:
chg = ''
if chg:
chg = '<sup>%s</sup>' % chg
if atom.GetNumRadicalElectrons():
rad = self.drawingOptions.radicalSymbol * atom.GetNumRadicalElectrons()
rad = '<sup>%s</sup>' % rad
symbolLength += atom.GetNumRadicalElectrons()
else:
rad = ''
isotope = ''
isotopeLength = 0
if iso:
isotope = '<sup>%d</sup>' % atom.GetIsotope()
isotopeLength = len(str(atom.GetIsotope()))
symbolLength += isotopeLength
mapNum = ''
mapNumLength = 0
if atom.HasProp('molAtomMapNumber'):
mapNum = ':' + atom.GetProp('molAtomMapNumber')
mapNumLength = 1 + len(str(atom.GetProp('molAtomMapNumber')))
symbolLength += mapNumLength
deg = atom.GetDegree()
# This should be done in a better way in the future:
# 'baseOffset' should be determined by getting the size of 'isotope' and the size of 'base', or the size of 'mapNum' and the size of 'base'
# (depending on 'deg' and 'nbrSum[0]') in order to determine the exact position of the base
if deg == 0:
if periodicTable.GetElementSymbol(atom.GetAtomicNum()) in ('O', 'S', 'Se', 'Te', 'F',
'Cl', 'Br', 'I', 'At'):
symbol = '%s%s%s%s%s%s' % (hs, isotope, base, chg, rad, mapNum)
else:
symbol = '%s%s%s%s%s%s' % (isotope, base, hs, chg, rad, mapNum)
elif deg > 1 or nbrSum[0] < 1:
symbol = '%s%s%s%s%s%s' % (isotope, base, hs, chg, rad, mapNum)
baseOffset = 0.5 - (isotopeLength + len(base) / 2.) / symbolLength
else:
symbol = '%s%s%s%s%s%s' % (rad, chg, hs, isotope, base, mapNum)
baseOffset = -0.5 + (mapNumLength + len(base) / 2.) / symbolLength
if deg == 1:
if abs(nbrSum[1]) > 1:
islope = nbrSum[0] / abs(nbrSum[1])
else:
islope = nbrSum[0]
if abs(islope) > .3:
if islope > 0:
orient = 'W'
else:
orient = 'E'
elif abs(nbrSum[1]) > 10:
if nbrSum[1] > 0:
orient = 'N'
else:
orient = 'S'
else:
orient = 'C'
if highlightMap and idx in highlightMap:
color = highlightMap[idx]
elif highlightAtoms and idx in highlightAtoms:
color = self.drawingOptions.selectColor
else:
color = self.drawingOptions.elemDict.get(atom.GetAtomicNum(), (0, 0, 0))
labelSize = self._drawLabel(symbol, pos, baseOffset, font, color=color, orientation=orient)
labelSizes[atom.GetIdx()] = [labelSize, orient]
for bond in mol.GetBonds():
atom, idx = bond.GetBeginAtom(), bond.GetBeginAtomIdx()
nbr, nbrIdx = bond.GetEndAtom(), bond.GetEndAtomIdx()
pos = self.atomPs[mol].get(idx, None)
nbrPos = self.atomPs[mol].get(nbrIdx, None)
if highlightBonds and bond.GetIdx() in highlightBonds:
width = 2.0 * self.drawingOptions.bondLineWidth
color = self.drawingOptions.selectColor
color2 = self.drawingOptions.selectColor
elif highlightAtoms and idx in highlightAtoms and nbrIdx in highlightAtoms:
width = 2.0 * self.drawingOptions.bondLineWidth
color = self.drawingOptions.selectColor
color2 = self.drawingOptions.selectColor
elif highlightMap is not None and idx in highlightMap and nbrIdx in highlightMap:
width = 2.0 * self.drawingOptions.bondLineWidth
color = highlightMap[idx]
color2 = highlightMap[nbrIdx]
else:
width = self.drawingOptions.bondLineWidth
if self.drawingOptions.colorBonds:
color = self.drawingOptions.elemDict.get(atom.GetAtomicNum(), (0, 0, 0))
color2 = self.drawingOptions.elemDict.get(nbr.GetAtomicNum(), (0, 0, 0))
else:
color = self.drawingOptions.defaultColor
color2 = color
self._drawBond(bond, atom, nbr, pos, nbrPos, conf, color=color, width=width, color2=color2,
labelSize1=labelSizes[idx], labelSize2=labelSizes[nbrIdx])
# if we modified the bond wedging state, undo those changes now
if obds:
for i, d in enumerate(obds):
mol.GetBondWithIdx(i).SetBondDir(d)
if flagCloseContactsDist > 0:
tol = flagCloseContactsDist * flagCloseContactsDist
for i, atomi in enumerate(mol.GetAtoms()):
pi = numpy.array(self.atomPs[mol][i])
for j in range(i + 1, mol.GetNumAtoms()):
pj = numpy.array(self.atomPs[mol][j])
d = pj - pi
dist2 = d[0] * d[0] + d[1] * d[1]
if dist2 <= tol:
self.canvas.addCanvasPolygon(
((pi[0] - 2 * flagCloseContactsDist, pi[1] - 2 * flagCloseContactsDist),
(pi[0] + 2 * flagCloseContactsDist, pi[1] - 2 * flagCloseContactsDist), (
pi[0] + 2 * flagCloseContactsDist, pi[1] + 2 * flagCloseContactsDist),
(pi[0] - 2 * flagCloseContactsDist,
pi[1] + 2 * flagCloseContactsDist)), color=(1., 0, 0), fill=False, stroke=True)
| [
"rdkit.Chem.GetPeriodicTable",
"rdkit.Chem.WedgeMolBonds",
"math.cos",
"numpy.array",
"rdkit.six.cmp",
"math.atan2",
"copy.deepcopy",
"math.sin"
] | [((420, 443), 'rdkit.Chem.GetPeriodicTable', 'Chem.GetPeriodicTable', ([], {}), '()\n', (441, 443), False, 'from rdkit import Chem\n'), ((2912, 2930), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (2922, 2930), False, 'import math\n'), ((7678, 7696), 'copy.deepcopy', 'copy.deepcopy', (['pos'], {}), '(pos)\n', (7691, 7696), False, 'import copy\n'), ((7710, 7731), 'copy.deepcopy', 'copy.deepcopy', (['nbrPos'], {}), '(nbrPos)\n', (7723, 7731), False, 'import copy\n'), ((6471, 6488), 'copy.deepcopy', 'copy.deepcopy', (['p1'], {}), '(p1)\n', (6484, 6488), False, 'import copy\n'), ((14351, 14380), 'rdkit.Chem.WedgeMolBonds', 'Chem.WedgeMolBonds', (['mol', 'conf'], {}), '(mol, conf)\n', (14369, 14380), False, 'from rdkit import Chem\n'), ((3023, 3037), 'math.cos', 'math.cos', (['perp'], {}), '(perp)\n', (3031, 3037), False, 'import math\n'), ((3115, 3129), 'math.sin', 'math.sin', (['perp'], {}), '(perp)\n', (3123, 3129), False, 'import math\n'), ((22555, 22587), 'numpy.array', 'numpy.array', (['self.atomPs[mol][i]'], {}), '(self.atomPs[mol][i])\n', (22566, 22587), False, 'import numpy\n'), ((22653, 22685), 'numpy.array', 'numpy.array', (['self.atomPs[mol][j]'], {}), '(self.atomPs[mol][j])\n', (22664, 22685), False, 'import numpy\n'), ((5474, 5491), 'rdkit.six.cmp', 'cmp', (['p2[0]', 'p1[0]'], {}), '(p2[0], p1[0])\n', (5477, 5491), False, 'from rdkit.six import cmp\n'), ((5611, 5628), 'rdkit.six.cmp', 'cmp', (['p2[0]', 'p1[0]'], {}), '(p2[0], p1[0])\n', (5614, 5628), False, 'from rdkit.six import cmp\n'), ((6055, 6072), 'rdkit.six.cmp', 'cmp', (['p2[1]', 'p1[1]'], {}), '(p2[1], p1[1])\n', (6058, 6072), False, 'from rdkit.six import cmp\n'), ((5785, 5802), 'rdkit.six.cmp', 'cmp', (['p2[0]', 'p1[0]'], {}), '(p2[0], p1[0])\n', (5788, 5802), False, 'from rdkit.six import cmp\n'), ((5963, 5980), 'rdkit.six.cmp', 'cmp', (['p2[0]', 'p1[0]'], {}), '(p2[0], p1[0])\n', (5966, 5980), False, 'from rdkit.six import cmp\n'), ((6229, 6246), 'rdkit.six.cmp', 'cmp', (['p2[1]', 'p1[1]'], {}), '(p2[1], p1[1])\n', (6232, 6246), False, 'from rdkit.six import cmp\n'), ((6407, 6424), 'rdkit.six.cmp', 'cmp', (['p2[1]', 'p1[1]'], {}), '(p2[1], p1[1])\n', (6410, 6424), False, 'from rdkit.six import cmp\n'), ((4983, 4997), 'math.cos', 'math.cos', (['perp'], {}), '(perp)\n', (4991, 4997), False, 'import math\n'), ((5102, 5116), 'math.sin', 'math.sin', (['perp'], {}), '(perp)\n', (5110, 5116), False, 'import math\n')] |
#####################################################################
# #
# /labscript_devices/IMAQdxCamera/blacs_workers.py #
# #
# Copyright 2019, Monash University and contributors #
# #
# This file is part of labscript_devices, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
# Original imaqdx_camera server by dt, with modifications by rpanderson and cbillington.
# Refactored as a BLACS worker by cbillington
import sys
from time import perf_counter
from blacs.tab_base_classes import Worker
import threading
import numpy as np
from labscript_utils import dedent
import labscript_utils.h5_lock
import h5py
import labscript_utils.properties
import zmq
from labscript_utils.ls_zprocess import Context
from labscript_utils.shared_drive import path_to_local
from labscript_utils.properties import set_attributes
# Don't import nv yet so as not to throw an error, allow worker to run as a dummy
# device, or for subclasses to import this module to inherit classes without requiring
# nivision
nv = None
def _monkeypatch_imaqdispose():
"""Monkeypatch a fix to a memory leak bug in pynivision. The pynivision project is
no longer active, so we can't contribute this fix upstream. In the long run,
hopefully someone (perhaps us) forks it so that bugs can be addressed in the
normal way"""
import nivision.core
import ctypes
_imaqDispose = nivision.core._imaqDispose
def imaqDispose(obj):
if getattr(obj, "_contents", None) is not None:
_imaqDispose(ctypes.byref(obj._contents))
obj._contents = None
if getattr(obj, "value", None) is not None:
_imaqDispose(obj)
obj.value = None
# This is the bugfix: pointers as raw ints were not being disposed:
if isinstance(obj, int):
_imaqDispose(obj)
nivision.core.imaqDispose = nv.imaqDispose = imaqDispose
class MockCamera(object):
"""Mock camera class that returns fake image data."""
def __init__(self):
print("Starting device worker as a mock device")
self.attributes = {}
def set_attributes(self, attributes):
self.attributes.update(attributes)
def get_attribute(self, name):
return self.attributes[name]
def get_attribute_names(self, visibility_level=None):
return list(self.attributes.keys())
def configure_acquisition(self, continuous=False, bufferCount=5):
pass
def grab(self):
return self.snap()
def grab_multiple(self, n_images, images, waitForNextBuffer=True):
print(f"Attempting to grab {n_images} (mock) images.")
for i in range(n_images):
images.append(self.grab())
print(f"Got (mock) image {i+1} of {n_images}.")
print(f"Got {len(images)} of {n_images} (mock) images.")
def snap(self):
N = 500
A = 500
x = np.linspace(-5, 5, 500)
y = x.reshape((N, 1))
clean_image = A * (1 - 0.5 * np.exp(-(x ** 2 + y ** 2)))
# Write text on the image that says "NOT REAL DATA"
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.load_default()
canvas = Image.new('L', [N // 5, N // 5], (0,))
draw = ImageDraw.Draw(canvas)
draw.text((10, 20), "NOT REAL DATA", font=font, fill=1)
clean_image += 0.2 * A * np.asarray(canvas.resize((N, N)).rotate(20))
return np.random.poisson(clean_image)
def stop_acquisition(self):
pass
def abort_acquisition(self):
pass
def close(self):
pass
class IMAQdx_Camera(object):
def __init__(self, serial_number):
global nv
import nivision as nv
_monkeypatch_imaqdispose()
# Find the camera:
print("Finding camera...")
for cam in nv.IMAQdxEnumerateCameras(True):
if serial_number == (cam.SerialNumberHi << 32) + cam.SerialNumberLo:
self.camera = cam
break
else:
msg = f"No connected camera with serial number {serial_number:X} found"
raise Exception(msg)
# Connect to the camera:
print("Connecting to camera...")
self.imaqdx = nv.IMAQdxOpenCamera(
self.camera.InterfaceName, nv.IMAQdxCameraControlModeController
)
# Keep an img attribute so we don't have to create it every time
self.img = nv.imaqCreateImage(nv.IMAQ_IMAGE_U16)
self._abort_acquisition = False
def set_attributes(self, attr_dict):
for k, v in attr_dict.items():
self.set_attribute(k, v)
def set_attribute(self, name, value):
"""Set the value of the attribute of the given name to the given value"""
_value = value # Keep the original for the sake of the error message
if isinstance(_value, str):
_value = _value.encode('utf8')
try:
nv.IMAQdxSetAttribute(self.imaqdx, name.encode('utf8'), _value)
except Exception as e:
# Add some info to the exception:
msg = f"failed to set attribute {name} to {value}"
raise Exception(msg) from e
def get_attribute_names(self, visibility_level, writeable_only=True):
"""Return a list of all attribute names of readable attributes, for the given
visibility level. Optionally return only writeable attributes"""
visibilities = {
'simple': nv.IMAQdxAttributeVisibilitySimple,
'intermediate': nv.IMAQdxAttributeVisibilityIntermediate,
'advanced': nv.IMAQdxAttributeVisibilityAdvanced,
}
visibility_level = visibilities[visibility_level.lower()]
attributes = []
for a in nv.IMAQdxEnumerateAttributes2(self.imaqdx, b'', visibility_level):
if writeable_only and not a.Writable:
continue
if not a.Readable:
continue
attributes.append(a.Name.decode('utf8'))
return sorted(attributes)
def get_attribute(self, name):
"""Return current value of attribute of the given name"""
try:
value = nv.IMAQdxGetAttribute(self.imaqdx, name.encode('utf8'))
if isinstance(value, nv.core.IMAQdxEnumItem):
value = value.Name
if isinstance(value, bytes):
value = value.decode('utf8')
return value
except Exception as e:
# Add some info to the exception:
raise Exception(f"Failed to get attribute {name}") from e
def snap(self):
"""Acquire a single image and return it"""
nv.IMAQdxSnap(self.imaqdx, self.img)
return self._decode_image_data(self.img)
def configure_acquisition(self, continuous=True, bufferCount=5):
nv.IMAQdxConfigureAcquisition(
self.imaqdx, continuous=continuous, bufferCount=bufferCount
)
nv.IMAQdxStartAcquisition(self.imaqdx)
def grab(self, waitForNextBuffer=True):
nv.IMAQdxGrab(self.imaqdx, self.img, waitForNextBuffer=waitForNextBuffer)
return self._decode_image_data(self.img)
def grab_multiple(self, n_images, images, waitForNextBuffer=True):
print(f"Attempting to grab {n_images} images.")
for i in range(n_images):
while True:
if self._abort_acquisition:
print("Abort during acquisition.")
self._abort_acquisition = False
return
try:
images.append(self.grab(waitForNextBuffer))
print(f"Got image {i+1} of {n_images}.")
break
except nv.ImaqDxError as e:
if e.code == nv.IMAQdxErrorTimeout.value:
print('.', end='')
continue
raise
print(f"Got {len(images)} of {n_images} images.")
def stop_acquisition(self):
nv.IMAQdxStopAcquisition(self.imaqdx)
nv.IMAQdxUnconfigureAcquisition(self.imaqdx)
def abort_acquisition(self):
self._abort_acquisition = True
def _decode_image_data(self, img):
img_array = nv.imaqImageToArray(img)
img_array_shape = (img_array[2], img_array[1])
# bitdepth in bytes
bitdepth = len(img_array[0]) // (img_array[1] * img_array[2])
dtype = {1: np.uint8, 2: np.uint16, 4: np.uint32}[bitdepth]
data = np.frombuffer(img_array[0], dtype=dtype).reshape(img_array_shape)
return data.copy()
def close(self):
nv.IMAQdxCloseCamera(self.imaqdx)
class IMAQdxCameraWorker(Worker):
# Subclasses may override this if their interface class takes only the serial number
# as an instantiation argument, otherwise they may reimplement get_camera():
interface_class = IMAQdx_Camera
def init(self):
self.camera = self.get_camera()
print("Setting attributes...")
self.smart_cache = {}
self.set_attributes_smart(self.camera_attributes)
self.set_attributes_smart(self.manual_mode_camera_attributes)
print("Initialisation complete")
self.images = None
self.n_images = None
self.attributes_to_save = None
self.exposures = None
self.acquisition_thread = None
self.h5_filepath = None
self.stop_acquisition_timeout = None
self.exception_on_failed_shot = None
self.continuous_stop = threading.Event()
self.continuous_thread = None
self.continuous_dt = None
self.image_socket = Context().socket(zmq.REQ)
self.image_socket.connect(
f'tcp://{self.parent_host}:{self.image_receiver_port}'
)
def get_camera(self):
"""Return an instance of the camera interface class. Subclasses may override
this method to pass required arguments to their class if they require more
than just the serial number."""
if self.mock:
return MockCamera()
else:
return self.interface_class(self.serial_number)
def set_attributes_smart(self, attributes):
"""Call self.camera.set_attributes() to set the given attributes, only setting
those that differ from their value in, or are absent from self.smart_cache.
Update self.smart_cache with the newly-set values"""
uncached_attributes = {}
for name, value in attributes.items():
if name not in self.smart_cache or self.smart_cache[name] != value:
uncached_attributes[name] = value
self.smart_cache[name] = value
self.camera.set_attributes(uncached_attributes)
def get_attributes_as_dict(self, visibility_level):
"""Return a dict of the attributes of the camera for the given visibility
level"""
names = self.camera.get_attribute_names(visibility_level)
attributes_dict = {name: self.camera.get_attribute(name) for name in names}
return attributes_dict
def get_attributes_as_text(self, visibility_level):
"""Return a string representation of the attributes of the camera for
the given visibility level"""
attrs = self.get_attributes_as_dict(visibility_level)
# Format it nicely:
lines = [f' {repr(key)}: {repr(value)},' for key, value in attrs.items()]
dict_repr = '\n'.join(['{'] + lines + ['}'])
return self.device_name + '_camera_attributes = ' + dict_repr
def snap(self):
"""Acquire one frame in manual mode. Send it to the parent via
self.image_socket. Wait for a response from the parent."""
image = self.camera.snap()
self._send_image_to_parent(image)
def _send_image_to_parent(self, image):
"""Send the image to the GUI to display. This will block if the parent process
is lagging behind in displaying frames, in order to avoid a backlog."""
metadata = dict(dtype=str(image.dtype), shape=image.shape)
self.image_socket.send_json(metadata, zmq.SNDMORE)
self.image_socket.send(image, copy=False)
response = self.image_socket.recv()
assert response == b'ok', response
def continuous_loop(self, dt):
"""Acquire continuously in a loop, with minimum repetition interval dt"""
while True:
if dt is not None:
t = perf_counter()
image = self.camera.grab()
self._send_image_to_parent(image)
if dt is None:
timeout = 0
else:
timeout = t + dt - perf_counter()
if self.continuous_stop.wait(timeout):
self.continuous_stop.clear()
break
def start_continuous(self, dt):
"""Begin continuous acquisition in a thread with minimum repetition interval
dt"""
assert self.continuous_thread is None
self.camera.configure_acquisition()
self.continuous_thread = threading.Thread(
target=self.continuous_loop, args=(dt,), daemon=True
)
self.continuous_thread.start()
self.continuous_dt = dt
def stop_continuous(self, pause=False):
"""Stop the continuous acquisition thread"""
assert self.continuous_thread is not None
self.continuous_stop.set()
self.continuous_thread.join()
self.continuous_thread = None
self.camera.stop_acquisition()
# If we're just 'pausing', then do not clear self.continuous_dt. That way
# continuous acquisition can be resumed with the same interval by calling
# start(self.continuous_dt), without having to get the interval from the parent
# again, and the fact that self.continuous_dt is not None can be used to infer
# that continuous acquisiton is paused and should be resumed after a buffered
# run is complete:
if not pause:
self.continuous_dt = None
def transition_to_buffered(self, device_name, h5_filepath, initial_values, fresh):
if getattr(self, 'is_remote', False):
h5_filepath = path_to_local(h5_filepath)
if self.continuous_thread is not None:
# Pause continuous acquistion during transition_to_buffered:
self.stop_continuous(pause=True)
with h5py.File(h5_filepath, 'r') as f:
group = f['devices'][self.device_name]
if not 'EXPOSURES' in group:
return {}
self.h5_filepath = h5_filepath
self.exposures = group['EXPOSURES'][:]
self.n_images = len(self.exposures)
# Get the camera_attributes from the device_properties
properties = labscript_utils.properties.get(
f, self.device_name, 'device_properties'
)
camera_attributes = properties['camera_attributes']
self.stop_acquisition_timeout = properties['stop_acquisition_timeout']
self.exception_on_failed_shot = properties['exception_on_failed_shot']
saved_attr_level = properties['saved_attribute_visibility_level']
# Only reprogram attributes that differ from those last programmed in, or all of
# them if a fresh reprogramming was requested:
if fresh:
self.smart_cache = {}
self.set_attributes_smart(camera_attributes)
# Get the camera attributes, so that we can save them to the H5 file:
if saved_attr_level is not None:
self.attributes_to_save = self.get_attributes_as_dict(saved_attr_level)
else:
self.attributes_to_save = None
print(f"Configuring camera for {self.n_images} images.")
self.camera.configure_acquisition(continuous=False, bufferCount=self.n_images)
self.images = []
self.acquisition_thread = threading.Thread(
target=self.camera.grab_multiple,
args=(self.n_images, self.images),
daemon=True,
)
self.acquisition_thread.start()
return {}
def transition_to_manual(self):
if self.h5_filepath is None:
print('No camera exposures in this shot.\n')
return True
assert self.acquisition_thread is not None
self.acquisition_thread.join(timeout=self.stop_acquisition_timeout)
if self.acquisition_thread.is_alive():
msg = """Acquisition thread did not finish. Likely did not acquire expected
number of images. Check triggering is connected/configured correctly"""
if self.exception_on_failed_shot:
self.abort()
raise RuntimeError(dedent(msg))
else:
self.camera.abort_acquisition()
self.acquisition_thread.join()
print(dedent(msg), file=sys.stderr)
self.acquisition_thread = None
print("Stopping acquisition.")
self.camera.stop_acquisition()
print(f"Saving {len(self.images)}/{len(self.exposures)} images.")
with h5py.File(self.h5_filepath, 'r+') as f:
# Use orientation for image path, device_name if orientation unspecified
if self.orientation is not None:
image_path = 'images/' + self.orientation
else:
image_path = 'images/' + self.device_name
image_group = f.require_group(image_path)
image_group.attrs['camera'] = self.device_name
# Save camera attributes to the HDF5 file:
if self.attributes_to_save is not None:
set_attributes(image_group, self.attributes_to_save)
# Whether we failed to get all the expected exposures:
image_group.attrs['failed_shot'] = len(self.images) != len(self.exposures)
# key the images by name and frametype. Allow for the case of there being
# multiple images with the same name and frametype. In this case we will
# save an array of images in a single dataset.
images = {
(exposure['name'], exposure['frametype']): []
for exposure in self.exposures
}
# Iterate over expected exposures, sorted by acquisition time, to match them
# up with the acquired images:
self.exposures.sort(order='t')
for image, exposure in zip(self.images, self.exposures):
images[(exposure['name'], exposure['frametype'])].append(image)
# Save images to the HDF5 file:
for (name, frametype), imagelist in images.items():
data = imagelist[0] if len(imagelist) == 1 else np.array(imagelist)
print(f"Saving frame(s) {name}/{frametype}.")
group = image_group.require_group(name)
dset = group.create_dataset(
frametype, data=data, dtype='uint16', compression='gzip'
)
# Specify this dataset should be viewed as an image
dset.attrs['CLASS'] = np.string_('IMAGE')
dset.attrs['IMAGE_VERSION'] = np.string_('1.2')
dset.attrs['IMAGE_SUBCLASS'] = np.string_('IMAGE_GRAYSCALE')
dset.attrs['IMAGE_WHITE_IS_ZERO'] = np.uint8(0)
# If the images are all the same shape, send them to the GUI for display:
try:
image_block = np.stack(self.images)
except ValueError:
print("Cannot display images in the GUI, they are not all the same shape")
else:
self._send_image_to_parent(image_block)
self.images = None
self.n_images = None
self.attributes_to_save = None
self.exposures = None
self.h5_filepath = None
self.stop_acquisition_timeout = None
self.exception_on_failed_shot = None
print("Setting manual mode camera attributes.\n")
self.set_attributes_smart(self.manual_mode_camera_attributes)
if self.continuous_dt is not None:
# If continuous manual mode acquisition was in progress before the bufferd
# run, resume it:
self.start_continuous(self.continuous_dt)
return True
def abort(self):
if self.acquisition_thread is not None:
self.camera.abort_acquisition()
self.acquisition_thread.join()
self.acquisition_thread = None
self.camera.stop_acquisition()
self.camera._abort_acquisition = False
self.images = None
self.n_images = None
self.attributes_to_save = None
self.exposures = None
self.acquisition_thread = None
self.h5_filepath = None
self.stop_acquisition_timeout = None
self.exception_on_failed_shot = None
# Resume continuous acquisition, if any:
if self.continuous_dt is not None and self.continuous_thread is None:
self.start_continuous(self.continuous_dt)
return True
def abort_buffered(self):
return self.abort()
def abort_transition_to_buffered(self):
return self.abort()
def program_manual(self, values):
return {}
def shutdown(self):
if self.continuous_thread is not None:
self.stop_continuous()
self.camera.close()
| [
"numpy.uint8",
"PIL.Image.new",
"numpy.array",
"PIL.ImageDraw.Draw",
"labscript_utils.properties.set_attributes",
"nivision.IMAQdxStartAcquisition",
"nivision.IMAQdxSnap",
"nivision.IMAQdxGrab",
"labscript_utils.shared_drive.path_to_local",
"numpy.random.poisson",
"PIL.ImageFont.load_default",
... | [((3432, 3455), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (3443, 3455), True, 'import numpy as np\n'), ((3680, 3704), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (3702, 3704), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3722, 3760), 'PIL.Image.new', 'Image.new', (['"""L"""', '[N // 5, N // 5]', '(0,)'], {}), "('L', [N // 5, N // 5], (0,))\n", (3731, 3760), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3776, 3798), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['canvas'], {}), '(canvas)\n', (3790, 3798), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3956, 3986), 'numpy.random.poisson', 'np.random.poisson', (['clean_image'], {}), '(clean_image)\n', (3973, 3986), True, 'import numpy as np\n'), ((4350, 4381), 'nivision.IMAQdxEnumerateCameras', 'nv.IMAQdxEnumerateCameras', (['(True)'], {}), '(True)\n', (4375, 4381), True, 'import nivision as nv\n'), ((4747, 4836), 'nivision.IMAQdxOpenCamera', 'nv.IMAQdxOpenCamera', (['self.camera.InterfaceName', 'nv.IMAQdxCameraControlModeController'], {}), '(self.camera.InterfaceName, nv.\n IMAQdxCameraControlModeController)\n', (4766, 4836), True, 'import nivision as nv\n'), ((4946, 4983), 'nivision.imaqCreateImage', 'nv.imaqCreateImage', (['nv.IMAQ_IMAGE_U16'], {}), '(nv.IMAQ_IMAGE_U16)\n', (4964, 4983), True, 'import nivision as nv\n'), ((6259, 6324), 'nivision.IMAQdxEnumerateAttributes2', 'nv.IMAQdxEnumerateAttributes2', (['self.imaqdx', "b''", 'visibility_level'], {}), "(self.imaqdx, b'', visibility_level)\n", (6288, 6324), True, 'import nivision as nv\n'), ((7166, 7202), 'nivision.IMAQdxSnap', 'nv.IMAQdxSnap', (['self.imaqdx', 'self.img'], {}), '(self.imaqdx, self.img)\n', (7179, 7202), True, 'import nivision as nv\n'), ((7330, 7424), 'nivision.IMAQdxConfigureAcquisition', 'nv.IMAQdxConfigureAcquisition', (['self.imaqdx'], {'continuous': 'continuous', 'bufferCount': 'bufferCount'}), '(self.imaqdx, continuous=continuous,\n bufferCount=bufferCount)\n', (7359, 7424), True, 'import nivision as nv\n'), ((7451, 7489), 'nivision.IMAQdxStartAcquisition', 'nv.IMAQdxStartAcquisition', (['self.imaqdx'], {}), '(self.imaqdx)\n', (7476, 7489), True, 'import nivision as nv\n'), ((7543, 7616), 'nivision.IMAQdxGrab', 'nv.IMAQdxGrab', (['self.imaqdx', 'self.img'], {'waitForNextBuffer': 'waitForNextBuffer'}), '(self.imaqdx, self.img, waitForNextBuffer=waitForNextBuffer)\n', (7556, 7616), True, 'import nivision as nv\n'), ((8509, 8546), 'nivision.IMAQdxStopAcquisition', 'nv.IMAQdxStopAcquisition', (['self.imaqdx'], {}), '(self.imaqdx)\n', (8533, 8546), True, 'import nivision as nv\n'), ((8555, 8599), 'nivision.IMAQdxUnconfigureAcquisition', 'nv.IMAQdxUnconfigureAcquisition', (['self.imaqdx'], {}), '(self.imaqdx)\n', (8586, 8599), True, 'import nivision as nv\n'), ((8733, 8757), 'nivision.imaqImageToArray', 'nv.imaqImageToArray', (['img'], {}), '(img)\n', (8752, 8757), True, 'import nivision as nv\n'), ((9117, 9150), 'nivision.IMAQdxCloseCamera', 'nv.IMAQdxCloseCamera', (['self.imaqdx'], {}), '(self.imaqdx)\n', (9137, 9150), True, 'import nivision as nv\n'), ((10009, 10026), 'threading.Event', 'threading.Event', ([], {}), '()\n', (10024, 10026), False, 'import threading\n'), ((13530, 13600), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.continuous_loop', 'args': '(dt,)', 'daemon': '(True)'}), '(target=self.continuous_loop, args=(dt,), daemon=True)\n', (13546, 13600), False, 'import threading\n'), ((16387, 16489), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.camera.grab_multiple', 'args': '(self.n_images, self.images)', 'daemon': '(True)'}), '(target=self.camera.grab_multiple, args=(self.n_images,\n self.images), daemon=True)\n', (16403, 16489), False, 'import threading\n'), ((14664, 14690), 'labscript_utils.shared_drive.path_to_local', 'path_to_local', (['h5_filepath'], {}), '(h5_filepath)\n', (14677, 14690), False, 'from labscript_utils.shared_drive import path_to_local\n'), ((14869, 14896), 'h5py.File', 'h5py.File', (['h5_filepath', '"""r"""'], {}), "(h5_filepath, 'r')\n", (14878, 14896), False, 'import h5py\n'), ((17591, 17624), 'h5py.File', 'h5py.File', (['self.h5_filepath', '"""r+"""'], {}), "(self.h5_filepath, 'r+')\n", (17600, 17624), False, 'import h5py\n'), ((19946, 19967), 'numpy.stack', 'np.stack', (['self.images'], {}), '(self.images)\n', (19954, 19967), True, 'import numpy as np\n'), ((2069, 2096), 'ctypes.byref', 'ctypes.byref', (['obj._contents'], {}), '(obj._contents)\n', (2081, 2096), False, 'import ctypes\n'), ((8994, 9034), 'numpy.frombuffer', 'np.frombuffer', (['img_array[0]'], {'dtype': 'dtype'}), '(img_array[0], dtype=dtype)\n', (9007, 9034), True, 'import numpy as np\n'), ((10127, 10136), 'labscript_utils.ls_zprocess.Context', 'Context', ([], {}), '()\n', (10134, 10136), False, 'from labscript_utils.ls_zprocess import Context\n'), ((12930, 12944), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (12942, 12944), False, 'from time import perf_counter\n'), ((18132, 18184), 'labscript_utils.properties.set_attributes', 'set_attributes', (['image_group', 'self.attributes_to_save'], {}), '(image_group, self.attributes_to_save)\n', (18146, 18184), False, 'from labscript_utils.properties import set_attributes\n'), ((19599, 19618), 'numpy.string_', 'np.string_', (['"""IMAGE"""'], {}), "('IMAGE')\n", (19609, 19618), True, 'import numpy as np\n'), ((19665, 19682), 'numpy.string_', 'np.string_', (['"""1.2"""'], {}), "('1.2')\n", (19675, 19682), True, 'import numpy as np\n'), ((19730, 19759), 'numpy.string_', 'np.string_', (['"""IMAGE_GRAYSCALE"""'], {}), "('IMAGE_GRAYSCALE')\n", (19740, 19759), True, 'import numpy as np\n'), ((19812, 19823), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (19820, 19823), True, 'import numpy as np\n'), ((3523, 3549), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2))'], {}), '(-(x ** 2 + y ** 2))\n', (3529, 3549), True, 'import numpy as np\n'), ((13138, 13152), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (13150, 13152), False, 'from time import perf_counter\n'), ((17206, 17217), 'labscript_utils.dedent', 'dedent', (['msg'], {}), '(msg)\n', (17212, 17217), False, 'from labscript_utils import dedent\n'), ((17354, 17365), 'labscript_utils.dedent', 'dedent', (['msg'], {}), '(msg)\n', (17360, 17365), False, 'from labscript_utils import dedent\n'), ((19215, 19234), 'numpy.array', 'np.array', (['imagelist'], {}), '(imagelist)\n', (19223, 19234), True, 'import numpy as np\n')] |
import collections
import json
import os
import warnings
import copy
import attr
import numpy as np
from functools import lru_cache
import torch
from transformers import AutoModel, AutoTokenizer, AutoConfig
from text2qdmr.model.modules import abstract_preproc
from text2qdmr.model.modules import encoder_modules
from text2qdmr.datasets.utils.spider_match_utils import (
compute_schema_linking,
compute_cell_value_linking
)
from text2qdmr.utils import corenlp
from text2qdmr.utils import registry
from text2qdmr.utils import serialization
from text2qdmr.utils.serialization import ComplexEncoder, ComplexDecoder
from text2qdmr.datasets.qdmr import QDMRStepArg
from qdmr2sparql.structures import QdmrInstance, GroundingKey
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@attr.s
class EncoderState:
state = attr.ib()
memory = attr.ib()
question_memory = attr.ib()
schema_memory = attr.ib()
words = attr.ib()
pointer_memories = attr.ib()
pointer_maps = attr.ib()
m2c_align_mat = attr.ib()
m2t_align_mat = attr.ib()
value_memories = attr.ib()
values = attr.ib()
value_emb = attr.ib()
grnd_idx = attr.ib()
def find_word_occurrences(self, word):
return [i for i, w in enumerate(self.words) if w == word]
@attr.s
class PreprocessedSchema:
column_names = attr.ib(factory=list)
table_names = attr.ib(factory=list)
table_bounds = attr.ib(factory=list)
column_to_table = attr.ib(factory=dict)
table_to_columns = attr.ib(factory=dict)
foreign_keys = attr.ib(factory=dict)
foreign_keys_tables = attr.ib(factory=lambda: collections.defaultdict(set))
primary_keys = attr.ib(factory=list)
# only for bert version
normalized_column_names = attr.ib(factory=list)
normalized_table_names = attr.ib(factory=list)
def preprocess_schema_uncached(schema,
tokenize_func,
include_table_name_in_column,
fix_issue_16_primary_keys,
delete_first=False,
shift=False,
pretrained_type='bert'):
"""If it's bert, we also cache the normalized version of
question/column/table for schema linking"""
r = PreprocessedSchema()
last_table_id = None
start_idx = 1 if delete_first else 0
col_shift = len(schema.tables) if shift else 0
for i, column in enumerate(schema.columns[start_idx:]):
if pretrained_type == 'bert':
col_toks = tokenize_func(column.name, column.unsplit_name)
elif pretrained_type == 'roberta' or pretrained_type == 'grappa':
col_toks, col_idx_map = tokenize_func(column.name, column.unsplit_name)
else:
raise Exception(f'Unkonwn pretrained type {pretrained_type}')
# assert column.type in ["text", "number", "time", "boolean", "others"]
type_tok = f'<type: {column.type}>'
# for bert, we take the representation of the first word
column_name = col_toks
if include_table_name_in_column:
if column.table is None:
table_name = ['<any-table>']
else:
table_name = tokenize_func(
column.table.name, column.table.unsplit_name)
column_name += ['<table-sep>'] + table_name
column_name += [type_tok]
if pretrained_type == 'bert':
r.normalized_column_names.append(Bertokens(col_toks))
elif pretrained_type == 'roberta' or pretrained_type == 'grappa':
r.normalized_column_names.append(Robertatokens(col_toks, col_idx_map))
else:
raise Exception(f'Unkonwn pretrained type {pretrained_type}')
r.column_names.append(column_name)
table_id = None if column.table is None else column.table.id
r.column_to_table[str(i + col_shift)] = table_id
if table_id is not None:
columns = r.table_to_columns.setdefault(str(table_id), [])
columns.append(i + col_shift)
if last_table_id != table_id:
r.table_bounds.append(i + col_shift)
last_table_id = table_id
if column.foreign_key_for is not None:
r.foreign_keys[str(column.id - start_idx + col_shift)] = column.foreign_key_for.id - start_idx + col_shift
r.foreign_keys_tables[str(column.table.id)].add(column.foreign_key_for.table.id)
r.table_bounds.append(len(schema.columns[start_idx:]))
assert len(r.table_bounds) == len(schema.tables) + 1
for i, table in enumerate(schema.tables):
if pretrained_type == 'bert':
table_toks = tokenize_func(table.name, table.unsplit_name)
r.normalized_table_names.append(Bertokens(table_toks))
elif pretrained_type == 'roberta' or pretrained_type == 'grappa':
table_toks, table_idx_map = tokenize_func(table.name, table.unsplit_name)
r.normalized_table_names.append(Robertatokens(table_toks, table_idx_map))
else:
raise Exception(f'Unkonwn pretrained type {pretrained_type}')
r.table_names.append(table_toks)
last_table = schema.tables[-1]
r.foreign_keys_tables = serialization.to_dict_with_sorted_values(r.foreign_keys_tables)
r.primary_keys = [
column.id - start_idx + col_shift
for table in schema.tables
for column in table.primary_keys
] if fix_issue_16_primary_keys else [
column.id - start_idx + col_shift
for column in last_table.primary_keys
for table in schema.tables
]
return r
def get_value_unit_dict(item, tokenizer, pretrained_type='bert', shuffle_values=False):
assert len(item.values) == 1
value_units = item.values[0]
# group by tokens
value_unit_by_toks = collections.defaultdict(list)
for val_unit in value_units:
# tokenize
if pretrained_type == 'bert':
val_tokens = tokenizer(str(val_unit))
bert_tokens = Bertokens(val_tokens)
elif pretrained_type == 'roberta' or pretrained_type == 'grappa':
splited = str(val_unit).split()
val_tokens = []
val_idx_map = {}
for i, token in enumerate(splited):
toks = tokenizer(token)
val_idx_map[i] = len(val_tokens)
val_tokens += toks
val_idx_map[len(splited)] = len(val_tokens)
bert_tokens = Robertatokens(val_tokens, val_idx_map)
else:
raise Exception(f'Unkonwn pretrained type {pretrained_type}')
value_unit_by_toks[repr(val_tokens)].append(attr.evolve(val_unit, tokenized_value=val_tokens, bert_tokens=bert_tokens))
value_unit_by_toks_keys = list(value_unit_by_toks.keys())
if shuffle_values:
num_items = len(value_unit_by_toks_keys)
item_shuffle_order = torch.randperm(num_items) # using random form torch to have worker seeds under control
value_unit_by_toks_keys = [value_unit_by_toks_keys[i] for i in item_shuffle_order]
value_unit_dict = {}
for i, value_unit_key in enumerate(value_unit_by_toks_keys):
value_units = value_unit_by_toks[value_unit_key]
cur_idx = [0]
cur_value_unit = attr.evolve(value_units[0], idx=i)
str_value = str(cur_value_unit)
assert str_value not in value_unit_dict, (value_unit_dict, cur_value_unit)
values, str_values = [cur_value_unit], [str_value]
idx_match = cur_value_unit.q_match['idx_question'] if cur_value_unit.q_match else None
type_match = cur_value_unit.q_match['match'] if cur_value_unit.q_match else None
for val_unit in value_units[1:]:
assert cur_value_unit.tokenized_value == val_unit.tokenized_value, (val_unit, str_values, values)
val_unit = attr.evolve(val_unit, idx=i)
res = compare_value_units(cur_value_unit, val_unit)
if res is None:
# both are from schema or qdmr
cur_idx.append(len(str_values))
str_values.append(str(val_unit))
values.append(val_unit)
elif res is False:
for idx in cur_idx[::-1]:
del str_values[idx]
del values[idx]
cur_idx = [len(str_values)]
cur_value_unit = val_unit
str_values.append(str(cur_value_unit))
values.append(cur_value_unit)
if val_unit.q_match:
if idx_match:
left = max(val_unit.q_match['idx_question'][0], idx_match[0])
right = min(val_unit.q_match['idx_question'][-1], idx_match[-1])
idx_match = tuple(range(left, right + 1))
type_match = 'VEM' if type_match == 'VEM' or val_unit.q_match['match'] == 'VEM' else 'VPM'
else:
idx_match = val_unit.q_match['idx_question']
type_match = val_unit.q_match['match']
if idx_match:
for i, val_unit in enumerate(values):
if val_unit.source == 'qdmr' and not val_unit.q_match:
values[i] = attr.evolve(val_unit, q_match={'idx_question': idx_match, 'match': type_match})
value_unit_dict[tuple(str_values)] = values
return value_unit_dict
def compare_value_units(value_unit1, value_unit2):
# filter only units from text that are not numbers
if value_unit1.source != 'text' and value_unit2.source != 'text':
return
elif value_unit1.source == 'text' and value_unit2.source != 'text':
if value_unit1.value_type == 'number':
return
return False
elif value_unit1.source != 'text' and value_unit2.source == 'text':
if value_unit2.value_type == 'number':
return
return True
else:
assert value_unit1.q_match and value_unit2.q_match, (value_unit1, value_unit2)
assert value_unit1.source == value_unit2.source == 'text', (value_unit1, value_unit2)
if len(value_unit1.orig_value) <= len(value_unit2.orig_value):
return True
else:
return False
def get_tokenized_values(value_unit_dict):
tokenized_values = []
for val_units in value_unit_dict.values():
tokenized_values.append(val_units[0].tokenized_value)
return [tokenized_values]
class BreakFullEncoderBertPreproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
save_path,
db_path,
fix_issue_16_primary_keys=False,
include_table_name_in_column=False,
pretrained_version="bert",
compute_sc_link=True,
compute_cv_link=False,
use_bert_unlimited_length=False,
use_column_type=False,
use_general_grounding=True,
use_graph_relations= False,
use_type_relations=False,
merge_sc_link=False,
add_cellmatch=False,
construct_general_grounding=False,
use_bert_masks=False):
self.data_dir = os.path.join(save_path, 'enc')
self.db_path = db_path
self.texts = collections.defaultdict(list)
self.fix_issue_16_primary_keys = fix_issue_16_primary_keys
self.include_table_name_in_column = include_table_name_in_column
self.compute_sc_link = compute_sc_link
self.compute_cv_link = compute_cv_link
self.use_bert_unlimited_length = use_bert_unlimited_length
self.use_column_type = use_column_type
self.use_bert_masks = use_bert_masks
self.pretrained_version = pretrained_version
self.counted_db_ids = set()
self.preprocessed_schemas = {}
if self.pretrained_version == 'bert':
self.pretrained_modelname = 'bert-large-uncased-whole-word-masking'
elif self.pretrained_version == 'grappa':
self.pretrained_modelname = 'Salesforce/grappa_large_jnt'
elif self.pretrained_version == 'roberta':
self.pretrained_modelname = 'roberta-large'
self.config = AutoConfig.from_pretrained(self.pretrained_modelname)
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_modelname)
except Exception as e:
print("WARNING: could not run the tokenizer normally, seeing this error:", e)
print("Trying to run with local_files_only=True")
self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_modelname, local_files_only=True)
# TODO: should get types from the data
column_types = ["text", "number", "time", "boolean", "others"]
new_tokens = [f"<type: {t}>" for t in column_types]
if include_table_name_in_column:
new_tokens += ['<table-sep>', '<any-table>']
self.tokenizer.add_tokens(new_tokens)
self.use_general_grounding = use_general_grounding
self.merge_sc_link = merge_sc_link
self.add_cellmatch = add_cellmatch
self.construct_general_grounding = construct_general_grounding
self.use_graph_relations = use_graph_relations
self.use_type_relations = use_type_relations
def _tokenize(self, presplit, unsplit, pretokenized=None):
if self.tokenizer:
if pretokenized:
all_toks = []
idx_map = {}
for i, token in enumerate(pretokenized):
toks = self.tokenizer.tokenize(token)
idx_map[i] = len(all_toks)
all_toks += toks
idx_map[len(pretokenized)] = len(all_toks)
return all_toks, idx_map
elif presplit and self.pretrained_version != 'bert':
all_toks = []
idx_map = {}
for i, token in enumerate(presplit):
toks = self.tokenizer.tokenize(token)
idx_map[i] = len(all_toks)
all_toks += toks
idx_map[len(presplit)] = len(all_toks)
return all_toks, idx_map
toks = self.tokenizer.tokenize(unsplit)
return toks
return presplit
def values_to_sc_link(self, sc_link, value_unit_dict, idx_map, schema):
for val_units in value_unit_dict.values():
for val_unit in val_units:
if (not self.merge_sc_link or self.merge_sc_link and self.add_cellmatch) and val_unit.column:
matched = False
for column in schema.columns:
if column.orig_name == val_unit.column and column.table.orig_name == val_unit.table:
assert not matched
sc_link['col_val_match'][f"{column.id},{val_unit.idx}"] = 'CELLMATCH'
matched = True
assert matched, (val_unit, schema.columns)
if val_unit.q_match:
for idx in val_unit.q_match['idx_question']:
for q_id in range(idx_map[idx], idx_map[idx + 1]):
link = sc_link['q_val_match'].get(f"{q_id},{val_unit.idx}")
if link == 'VEM' and val_unit.q_match['match'] == 'VPM':
continue
else:
sc_link['q_val_match'][f"{q_id},{val_unit.idx}"] = val_unit.q_match['match']
return sc_link
def recompute_sc_link(self, sc_link, m_type, shift_i=0, shift_j=0):
_match = {}
for ij_str in sc_link[m_type].keys():
i_str, j_str = ij_str.split(",")
i_str, j_str = int(i_str), int(j_str)
_match[f"{i_str + shift_i},{j_str + shift_j}"] = sc_link[m_type][ij_str]
sc_link[m_type] = _match
return sc_link
def add_item(self, item, section, idx_to_add, validation_info):
preprocessed = self.preprocess_item(item, idx_to_add, validation_info)
self.texts[section].append(preprocessed)
def clear_items(self):
self.texts = collections.defaultdict(list)
def preprocess_item(self, item, idx_to_add, validation_info):
value_unit_dict = validation_info
if item.orig_spider_entry is not None:
raw_question = item.orig_spider_entry['question']
else:
raw_question = item.text
question, idx_map = self._tokenize(item.text_toks, raw_question, item.text_toks_for_val)
all_tokenized_values = get_tokenized_values(value_unit_dict)
has_schema = item.schema is not None
if has_schema:
preproc_schema = self._preprocess_schema(item.schema,
delete_first=self.construct_general_grounding or self.use_general_grounding,
shift=self.use_general_grounding)
if self.pretrained_version == 'bert':
question_bert_tokens = Bertokens(question)
else:
question_bert_tokens = Robertatokens(question, idx_map)
if self.compute_sc_link:
sc_link = question_bert_tokens.bert_schema_linking(
preproc_schema.normalized_column_names,
preproc_schema.normalized_table_names,
value_unit_dict
)
if value_unit_dict:
sc_link = self.values_to_sc_link(sc_link, value_unit_dict, idx_map, item.schema)
else:
sc_link = {"q_col_match": {}, "q_tab_match": {}, "q_val_match": {}, "col_val_match": {}}
if self.compute_cv_link:
cv_link = question_bert_tokens.bert_cv_linking(item.schema)
else:
cv_link = {"num_date_match": {}, "cell_match": {}}
else:
preproc_schema = None
sc_link = {"q_col_match": {}, "q_tab_match": {}, "q_val_match": {}, "col_val_match": {}}
cv_link = {"num_date_match": {}, "cell_match": {}}
if self.compute_sc_link:
if value_unit_dict:
sc_link = self.values_to_sc_link(sc_link, value_unit_dict, idx_map, None)
general_grounding = []
general_grounding_types = {}
if has_schema:
general_grounding += preproc_schema.table_names
tab_len = len(general_grounding)
general_grounding_types['table'] = tab_len
if self.compute_sc_link and not self.construct_general_grounding:
sc_link = self.recompute_sc_link(sc_link, 'q_col_match', shift_j=tab_len)
if not self.use_column_type:
general_grounding += [c[:-1] for c in preproc_schema.column_names]
else:
general_grounding += preproc_schema.column_names # without '*'
general_grounding_types['column'] = len(preproc_schema.column_names)
if self.compute_sc_link and not self.construct_general_grounding:
sc_link = self.recompute_sc_link(sc_link, 'col_val_match', shift_i=tab_len - 1, shift_j=len(general_grounding))
sc_link = self.recompute_sc_link(sc_link, 'q_val_match', shift_j=len(general_grounding))
if self.compute_cv_link and self.construct_general_grounding:
cv_link = self.recompute_sc_link(cv_link, 'num_date_match', shift_j=-1)
cv_link = self.recompute_sc_link(cv_link, 'cell_match', shift_j=-1)
elif self.compute_cv_link:
cv_link = self.recompute_sc_link(cv_link, 'num_date_match', shift_j=tab_len - 1)
cv_link = self.recompute_sc_link(cv_link, 'cell_match', shift_j=tab_len - 1)
assert len(all_tokenized_values) == 1, len(all_tokenized_values)
if all_tokenized_values[0]:
general_grounding += all_tokenized_values[0] # one grounding
general_grounding_types['value'] = len(all_tokenized_values[0])
if self.merge_sc_link:
new_sc_link = {"q_col_match": {}, "q_tab_match": {}, "q_val_match": {}, "col_val_match": {}}
for m_type in sc_link.keys():
for ij_str, match in sc_link[m_type].items():
if m_type.find('q_') >= 0:
new_sc_link['q_col_match'][ij_str] = 'CEM' if sc_link[m_type][ij_str].find('EM') >= 0 else 'CPM'
else:
assert m_type == 'col_val_match'
if self.add_cellmatch:
new_sc_link['col_val_match'][ij_str] = sc_link[m_type][ij_str]
sc_link = new_sc_link
assert len(general_grounding) == sum(list(general_grounding_types.values())), (general_grounding, general_grounding_types)
return {
'raw_question': raw_question,
'question': question,
'db_id': item.schema.db_id if item.schema is not None else None,
'sc_link': sc_link,
'cv_link': cv_link,
'columns': preproc_schema.column_names if has_schema and self.use_graph_relations else None,
'tables': preproc_schema.table_names if has_schema and self.use_graph_relations else None,
'tokenized_values': all_tokenized_values if not self.use_general_grounding else None,
'values': None,
'table_bounds': preproc_schema.table_bounds if has_schema and self.use_graph_relations else None,
'column_to_table': preproc_schema.column_to_table if has_schema and self.use_graph_relations else None,
'table_to_columns': preproc_schema.table_to_columns if has_schema and self.use_graph_relations else None,
'foreign_keys': preproc_schema.foreign_keys if has_schema and self.use_graph_relations else None,
'foreign_keys_tables': preproc_schema.foreign_keys_tables if has_schema and self.use_graph_relations else None,
'primary_keys': preproc_schema.primary_keys if has_schema and self.use_graph_relations else None,
'general_grounding': general_grounding,
'general_grounding_types': general_grounding_types if not self.merge_sc_link or \
self.use_type_relations or self.use_graph_relations else None,
'idx': item.subset_idx,
'full_name': item.full_name,
'subset_name': item.subset_name,
}
def validate_item(self, item, section,
shuffle_tables=False, shuffle_columns=False, shuffle_values=False,
shuffle_sort_dir=False,
shuffle_compsup_op=False,
shuffle_qdmr_ordering=False):
has_schema = item.schema is not None
if has_schema:
if shuffle_tables or shuffle_columns:
shuffled = self.shuffle_schema_inplace(item.schema, shuffle_tables, shuffle_columns)
recompute_cache = shuffled
else:
recompute_cache = False
preproc_schema = self._preprocess_schema(item.schema,
delete_first=self.construct_general_grounding or self.use_general_grounding,
shift=self.use_general_grounding,
recompute_cache=recompute_cache)
if shuffle_qdmr_ordering:
shuffled, item.qdmr_code, item.qdmr_args, item.qdmr_ops, item.grounding =\
self.generate_random_topsort_qdmr(item.qdmr_code, item.qdmr_args, item.qdmr_ops, item.grounding)
num_choices = len(item.values)
assert num_choices == 1
all_results = [True] * num_choices
value_unit_dict = get_value_unit_dict(item, self.tokenizer.tokenize, pretrained_type=self.pretrained_version, shuffle_values=shuffle_values)
if item.orig_spider_entry is not None:
raw_question = item.orig_spider_entry['question']
else:
raw_question = item.text
if shuffle_sort_dir:
shuffled, item.qdmr_code, item.qdmr_args, item.qdmr_ops, item.grounding, raw_question, text_toks_for_val, value_unit_dict =\
self.shuffle_qdmr_sort_dir(item.qdmr_code, item.qdmr_args, item.qdmr_ops, item.grounding, raw_question, item.text_toks_for_val, value_unit_dict)
item.orig_spider_entry = None
item.text = raw_question
item.text_toks_for_val = text_toks_for_val
if shuffle_compsup_op:
shuffled, item.qdmr_code, item.qdmr_args, item.qdmr_ops, item.grounding, raw_question, text_toks_for_val, value_unit_dict =\
self.shuffle_compsup_op(item.qdmr_code, item.qdmr_args, item.qdmr_ops, item.grounding, raw_question, item.text_toks_for_val, value_unit_dict)
item.orig_spider_entry = None
item.text = raw_question
item.text_toks_for_val = text_toks_for_val
if self.tokenizer and item.text_toks_for_val:
question, idx_map = self._tokenize(item.text_toks, raw_question, item.text_toks_for_val)
else:
question = self._tokenize(item.text_toks, raw_question, item.text_toks_for_val)
tokenized_values = get_tokenized_values(value_unit_dict)
for i in range(num_choices):
tokenized_values = tokenized_values[0]
if has_schema:
table_names = preproc_schema.table_names
column_names = preproc_schema.column_names # without '*'
if not self.use_column_type:
column_names = [c[:-1] for c in column_names]
else:
column_names, table_names = [], []
num_words = len(question) + 2 + \
sum(len(c) + 1 for c in column_names) + \
sum(len(t) + 1 for t in table_names) + \
sum(len(v) + 1 for v in tokenized_values)
if not self.use_bert_unlimited_length and num_words > 512:
print('Skipping {}: too long sequence {}'.format(item.full_name, num_words))
all_results[i] = False # remove long sequences
return all_results, value_unit_dict
def _preprocess_schema(self, schema, delete_first=False, shift=False, recompute_cache=False):
if schema.db_id in self.preprocessed_schemas and not recompute_cache:
return self.preprocessed_schemas[schema.db_id]
result = preprocess_schema_uncached(schema, self._tokenize,
self.include_table_name_in_column,
self.fix_issue_16_primary_keys,
pretrained_type=self.pretrained_version,
delete_first=delete_first, shift=shift)
self.preprocessed_schemas[schema.db_id] = result
return result
@classmethod
def shuffle_qdmr_sort_dir(cls, qdmr_code, qdmr_args, qdmr_ops, grounding, raw_question, text_toks_for_val, value_unit_dict):
shuffled = False
qdmr_code_cur = qdmr_code[0]
qdmr_code_new = copy.deepcopy(qdmr_code_cur)
for i_step, step in enumerate(qdmr_code_cur):
op = step[0]
args = step[1]
if op.lower() == "sort" and len(args) == 3:
sort_dir_arg = args[2]
if not isinstance(sort_dir_arg, QDMRStepArg):
continue
sort_dir_arg = sort_dir_arg.arg
if not isinstance(sort_dir_arg, GroundingKey) or not sort_dir_arg.issortdir():
continue
sort_dir_arg = sort_dir_arg.keys[0]
# pattern "from ... to ... "
def match_fromto_pattern(text_toks):
none_output = None, None, None
if "from" not in text_toks:
return none_output
index_from = text_toks.index("from")
if "to" not in text_toks[index_from:]:
return none_output
index_to = text_toks[index_from:].index("to") + index_from
if index_to == len(text_toks) - 1:
# 'to' is the last token - cannot do anything
return none_output
toks_replace_tgt = ["from"] + [text_toks[index_to + 1]] + ["to"] + text_toks[index_from+1 : index_to]
tok_prefix_pos = index_from
tok_suffix_pos = index_to + 2
return tok_prefix_pos, tok_suffix_pos, toks_replace_tgt
# direction keywords
tok_replace_src = None
toks_replace_tgt = None
if sort_dir_arg == "ascending":
if "ascending" in text_toks_for_val:
tok_replace_src = "ascending"
toks_replace_tgt = ["descending"]
elif "increasing" in text_toks_for_val:
tok_replace_src = "increasing"
toks_replace_tgt = ["decreasing"]
elif "alphabetical" in text_toks_for_val:
tok_replace_src = "alphabetical"
toks_replace_tgt = ["descending", "alphabetical"]
elif "alphabetic" in text_toks_for_val:
tok_replace_src = "alphabetic"
toks_replace_tgt = ["descending", "alphabetic"]
elif "lexicographically" in text_toks_for_val:
tok_replace_src = "lexicographically"
toks_replace_tgt = ["reverse", "lexicographically"]
elif sort_dir_arg == "descending":
if "descending" in text_toks_for_val:
tok_replace_src = "descending"
toks_replace_tgt = ["ascending"]
elif "decreasing" in text_toks_for_val:
tok_replace_src = "decreasing"
toks_replace_tgt = ["increasing"]
else:
continue
if tok_replace_src is not None:
# replace only one token
tok_prefix_pos = text_toks_for_val.index(tok_replace_src)
tok_suffix_pos = tok_prefix_pos + 1
else:
tok_prefix_pos, tok_suffix_pos, toks_replace_tgt = match_fromto_pattern(text_toks_for_val)
if toks_replace_tgt is not None and tok_prefix_pos is not None and tok_suffix_pos is not None:
# can replace augmentation
# get random number to decide
shuffle_this = torch.randint(2, (1,)).item()
if shuffle_this:
# fix value matching
for key, vals in value_unit_dict.items():
for val in vals:
if isinstance(val.q_match, dict) and "idx_question" in val.q_match:
idxs = val.q_match["idx_question"]
idxs_new = []
for idx in idxs:
if idx < tok_prefix_pos:
idxs_new.append(idx)
elif idx >= tok_prefix_pos and idx < tok_suffix_pos:
# the value was matched right inside the tokens being changed - try to match to the same toekn in the new setup
matched_token = text_toks_for_val[idx]
if matched_token in toks_replace_tgt:
idxs_new.append(tok_prefix_pos + toks_replace_tgt.index(matched_token))
else:
idxs_new.append(idx - (tok_suffix_pos - tok_prefix_pos) + len(toks_replace_tgt))
val.q_match["idx_question"] = idxs_new
text_toks_for_val = text_toks_for_val[:tok_prefix_pos] + toks_replace_tgt + text_toks_for_val[tok_suffix_pos:]
sort_dir_arg = "descending" if sort_dir_arg == "ascending" else "ascending"
qdmr_code_new[i_step][1][2] = attr.evolve(qdmr_code_new[i_step][1][2],
arg=GroundingKey.make_sortdir_grounding(ascending=(sort_dir_arg=="ascending")))
shuffled = True
if shuffled:
qdmr_args = None # not updating grounding fow now (seems to not being used)
qdmr_ops = None # not updating grounding fow now (seems to not being used)
grounding = None # not updating grounding fow now (seems to not being used)
raw_question = None # not updating grounding fow now (seems to not being used)
return True, [qdmr_code_new], qdmr_args, qdmr_ops, grounding, raw_question, text_toks_for_val, value_unit_dict
else:
return False, qdmr_code, qdmr_args, qdmr_ops, grounding, raw_question, text_toks_for_val, value_unit_dict
@classmethod
def shuffle_compsup_op(cls, qdmr_code, qdmr_args, qdmr_ops, grounding, raw_question, text_toks_for_val, value_unit_dict):
shuffled = False
qdmr_code_cur = qdmr_code[0]
qdmr_code_new = copy.deepcopy(qdmr_code_cur)
op_kerwords_for_op = {}
op_kerwords_for_op[">"] = ["larger", "bigger", "higher",
"greater","better",
"more", "over", "after", "above"]
op_kerwords_for_op["<"] = ["lower", "less", "smaller",
"fewer", "worse", "below",
"under", "before"]
op_kerwords_for_op[">="] = ["larger than or equal".split(" "),
"bigger than or equal".split(" "),
"higher than or equal".split(" "),
"more than or equal".split(" "),
"at least".split(" ")]
op_kerwords_for_op["<="] = ["smaller than or equal".split(" "),
"less than or equal".split(" "),
"fewer than or equal".split(" "),
"lower than or equal".split(" "),
"at most".split(" ")]
op_kerwords_for_op["max"] = ["max", "maximum", "largest", "biggest", "highest", "most"]
op_kerwords_for_op["min"] = ["min", "minimum", "smallest", "fewest", "lowest", "least"]
op_substitute_for_op = {}
op_substitute_for_op[">"] = [">", "<", ">=", "<="]
op_substitute_for_op["<"] = [">", "<", ">=", "<="]
op_substitute_for_op[">="] = [">", "<", ">=", "<="]
op_substitute_for_op["<="] = [">", "<", ">=", "<="]
op_substitute_for_op["min"] = ["min", "max"]
op_substitute_for_op["max"] = ["min", "max"]
for op, patterns in op_kerwords_for_op.items():
for i_p, pattern in enumerate(patterns):
if isinstance(pattern, str):
patterns[i_p] = (pattern,)
elif isinstance(pattern, list):
patterns[i_p] = tuple(pattern)
elif isinstance(pattern, tuple):
pass
else:
raise RuntimeError(f"Unknown pattern {pattern} for op {op} of type {type(pattern)}")
for i_step, step in enumerate(qdmr_code_cur):
op = step[0]
args = step[1]
if op.lower() == "comparative" and len(args) == 3:
comparative_arg = args[2]
if not isinstance(comparative_arg, QDMRStepArg):
continue
comparative_arg = comparative_arg.arg
if not isinstance(comparative_arg, GroundingKey) or not comparative_arg.iscomp():
continue
comparative_op = comparative_arg.keys[0]
elif op.lower() == "superlative" and len(args) == 3:
comparative_arg = args[0]
if not isinstance(comparative_arg, QDMRStepArg):
continue
comparative_arg = comparative_arg.arg
if comparative_arg not in ["min", "max"]:
continue
comparative_op = comparative_arg
else:
continue
if comparative_op not in op_kerwords_for_op:
continue
# searching for the pattern in question tokens
patterns_found = {}
for i_pos in range(len(text_toks_for_val)):
patterns_to_search = op_kerwords_for_op[comparative_op]
for p in patterns_to_search:
if len(p) > len(text_toks_for_val) - i_pos:
# pattern won't fit at this position
continue
this_pattern_found = True
for i_tok, tok in enumerate(p):
if p[i_tok] != text_toks_for_val[i_pos + i_tok]:
this_pattern_found = False
break
if this_pattern_found:
patterns_found[(p, i_pos)] = True
if len(patterns_found) != 1:
continue
(pattern, tok_prefix_pos), _ = list(patterns_found.items())[0]
tok_suffix_pos = tok_prefix_pos + len(pattern)
new_op_options = op_substitute_for_op[comparative_op]
new_op = new_op_options[torch.randint(len(new_op_options), (1,)).item()]
new_pattern_indx = torch.randint(len(op_kerwords_for_op[new_op]), (1,)).item()
toks_replace_tgt = op_kerwords_for_op[new_op][new_pattern_indx]
if toks_replace_tgt is not None and tok_prefix_pos is not None and tok_suffix_pos is not None:
# can replace augmentation
# fix value matching
for key, vals in value_unit_dict.items():
for val in vals:
if isinstance(val.q_match, dict) and "idx_question" in val.q_match:
idxs = val.q_match["idx_question"]
idxs_new = []
for idx in idxs:
if idx < tok_prefix_pos:
idxs_new.append(idx)
elif idx >= tok_prefix_pos and idx < tok_suffix_pos:
# the value was matched right inside the tokens being changed - try to match to the same toekn in the new setup
matched_token = text_toks_for_val[idx]
if matched_token in toks_replace_tgt:
idxs_new.append(tok_prefix_pos + toks_replace_tgt.index(matched_token))
else:
idxs_new.append(idx - (tok_suffix_pos - tok_prefix_pos) + len(toks_replace_tgt))
val.q_match["idx_question"] = idxs_new
text_toks_for_val = text_toks_for_val[:tok_prefix_pos] + list(toks_replace_tgt) + text_toks_for_val[tok_suffix_pos:]
if isinstance(comparative_arg, GroundingKey):
if len(comparative_arg.keys) == 3:
new_grounding = GroundingKey.make_comparative_grounding(new_op, comparative_arg.keys[1], comparative_arg.keys[2])
else:
new_grounding = GroundingKey.make_comparative_grounding(new_op, comparative_arg.keys[1])
else:
new_grounding = new_op
if op == "comparative":
qdmr_code_new[i_step][1][2] = attr.evolve(qdmr_code_new[i_step][1][2], arg=new_grounding)
elif op == "superlative":
qdmr_code_new[i_step][1][0] = attr.evolve(qdmr_code_new[i_step][1][0], arg=new_grounding)
else:
raise RuntimeError(f"Do not know how to augment op {op}")
shuffled = True
if shuffled:
qdmr_args = None # not updating grounding fow now (seems to not being used)
qdmr_ops = None # not updating grounding fow now (seems to not being used)
grounding = None # not updating grounding fow now (seems to not being used)
raw_question = None # not updating grounding fow now (seems to not being used)
return True, [qdmr_code_new], qdmr_args, qdmr_ops, grounding, raw_question, text_toks_for_val, value_unit_dict
else:
return False, qdmr_code, qdmr_args, qdmr_ops, grounding, raw_question, text_toks_for_val, value_unit_dict
@classmethod
def generate_random_topsort_qdmr(cls, qdmr_code, qdmr_args, qdmr_ops, grounding):
qdmr_code_cur = qdmr_code[0]
num_qdmr_steps = len(qdmr_code_cur)
qdmr_adjacency_matrix = torch.zeros((num_qdmr_steps, num_qdmr_steps), dtype=torch.long)
for i_step, step in enumerate(qdmr_code_cur):
args = step[1]
for arg in args:
assert isinstance(arg, QDMRStepArg), f"Have arg {arg} at step {i_step} of parsed QDMR {qdmr_code_cur}, should have an instance of QDMRStepArg"
if arg.arg_type == "ref":
assert len(arg.arg) == 1, f"Should have only one arg in QDMRStepArg of arg_type ref"
ref = arg.arg[0]
assert QdmrInstance.is_good_qdmr_ref(ref), f"QDMR ref should be a str starting with # but have '{ref}' of type {type(ref)}"
i_ref = QdmrInstance.ref_to_index(ref)
qdmr_adjacency_matrix[i_ref, i_step] = 1
# add special case for a ref in the arg of comparative
if step[0] == "comparative" and len(step[1]) == 3 and\
step[1][2].arg is not None and step[1][2].arg.iscomp() and len(step[1][2].arg.keys) == 2\
and QdmrInstance.is_good_qdmr_ref(step[1][2].arg.keys[1]):
i_ref = QdmrInstance.ref_to_index(step[1][2].arg.keys[1])
qdmr_adjacency_matrix[i_ref, i_step] = 1
# select the order of elements
new_node_id = {}
for i_step in range(num_qdmr_steps):
num_in_edges = qdmr_adjacency_matrix.sum(0)
assert num_in_edges.min().item() == 0
items_to_choose = (num_in_edges == 0).nonzero().view(-1)
chosen_item = torch.randint(items_to_choose.numel(), (1,))
chosen_item = items_to_choose[chosen_item]
new_node_id[chosen_item.item()] = i_step
# delete all edges from the chosen node
qdmr_adjacency_matrix[chosen_item, :] = 0
# block the chosen node form selection
qdmr_adjacency_matrix[chosen_item, chosen_item] = num_qdmr_steps + 1
shuffled = False
for i_step in range(num_qdmr_steps):
if new_node_id[i_step] != i_step:
shuffled = True
break
if new_node_id[num_qdmr_steps - 1] != num_qdmr_steps - 1:
warnings.warn(f"The last node should be last, otherwise smth is wrong, have permutation {new_node_id}, aborting this shuffle")
shuffled = False
if shuffled:
# reorder steps
qdmr_code_new = [None] * num_qdmr_steps
for i_step in range(num_qdmr_steps):
qdmr_code_new[new_node_id[i_step]] = copy.deepcopy(qdmr_code_cur[i_step])
# fix qdmr refs
for step in qdmr_code_new:
args = step[1]
for arg in args:
assert isinstance(arg, QDMRStepArg), f"Have arg {arg} at step {i_step} of parsed QDMR {qdmr_code_new}, should have an instance of QDMRStepArg"
if arg.arg_type == "ref":
assert len(arg.arg) == 1, f"Should have only one arg in QDMRStepArg of arg_type ref"
ref = arg.arg[0]
assert QdmrInstance.is_good_qdmr_ref(ref), f"QDMR ref should be a str starting with # but have '{ref}' of type {type(ref)}"
i_ref = QdmrInstance.ref_to_index(ref)
arg.arg[0] = QdmrInstance.index_to_ref(new_node_id[i_ref])
# add special case for a ref in the arg of comparative
if step[0] == "comparative" and len(step[1]) == 3 and\
step[1][2].arg is not None and step[1][2].arg.iscomp() and len(step[1][2].arg.keys) == 2\
and QdmrInstance.is_good_qdmr_ref(step[1][2].arg.keys[1]):
i_ref = QdmrInstance.ref_to_index(step[1][2].arg.keys[1])
key_list = list(step[1][2].arg.keys)
key_list[1] = QdmrInstance.index_to_ref(new_node_id[i_ref])
step[1][2].arg.keys = tuple(key_list)
qdmr_args = None # not updating grounding fow now (seems to not being used)
qdmr_ops = None # not updating grounding fow now (seems to not being used)
grounding = None # not updating grounding fow now (seems to not being used)
return True, [qdmr_code_new], qdmr_args, qdmr_ops, grounding
else:
return False, qdmr_code, qdmr_args, qdmr_ops, grounding
@classmethod
def shuffle_schema_inplace(cls, schema, shuffle_tables, shuffle_columns):
# this function shuffles the schema inplace:
# the shuffled version will stay in memory for next batches
# delete items which we are not maintaining
schema.foreign_key_graph = None
schema.orig = None
num_tables = len(schema.tables)
table_order = torch.randperm(num_tables) if shuffle_tables else torch.arange(num_tables)
shuffled = (table_order != torch.arange(num_tables)).any().item()
if shuffled:
tables_new = []
database_schema_table_names = []
for i_table in range(num_tables):
t = schema.tables[table_order[i_table]]
t.id = i_table
tables_new.append(t)
database_schema_table_names.append(schema.database_schema.table_names[table_order[i_table]])
schema.tables = tables_new
schema.database_schema.table_names = database_schema_table_names
# shuffle tables according to shuffled tables
# first add columns with no tables
columns_new = [col for col in schema.columns if col.table is None]
for i_table in range(num_tables):
for col in schema.columns:
if col.table is not None and col.table.id == i_table:
columns_new.append(col)
# make ids of columns correspond to the order
for i_col, col in enumerate(columns_new):
col.id = i_col
schema.columns = columns_new
if shuffle_columns:
# shuffling column inside each table
for i_table in range(num_tables):
num_cols = len(schema.tables[i_table].columns)
min_id = min(col.id for col in schema.tables[i_table].columns)
col_order = torch.randperm(num_cols)
col_shuffled = (col_order != torch.arange(num_cols)).any().item()
# sanity check for column ids
col_ids = set(col.id for col in schema.tables[i_table].columns)
assert col_ids == set(i_ + min_id for i_ in range(num_cols)), f"Columns of table {schema.tables[i_table].orig_name} have wrong ids: {col_ids}"
if col_shuffled:
shuffled = True
for i_col, col in enumerate(schema.tables[i_table].columns):
col.id = min_id + col_order[i_col].item()
schema.tables[i_table].columns = sorted(schema.tables[i_table].columns, key=lambda col: col.id)
# sort all the columns by ids
schema.columns = sorted(schema.columns, key=lambda col: col.id)
return shuffled
def save(self, partition=None):
os.makedirs(self.data_dir, exist_ok=True)
if partition is None:
self.tokenizer.save_pretrained(self.data_dir)
self.config.save_pretrained(self.data_dir)
for section, texts in self.texts.items():
with open(os.path.join(self.data_dir, section + '.jsonl'), 'w') as f:
for text in texts:
f.write(json.dumps(text, cls=ComplexEncoder) + '\n')
def load(self):
self.tokenizer = AutoTokenizer.from_pretrained(self.data_dir)
def dataset(self, section):
return [
json.loads(line, cls=ComplexDecoder)
for line in open(os.path.join(self.data_dir, section + '.jsonl'))]
@registry.register('encoder', 'text2qdmr')
class BreakFullEncoderBert(torch.nn.Module):
Preproc = BreakFullEncoderBertPreproc
batched = True
def __init__(
self,
device,
preproc,
update_config={},
bert_token_type=False,
summarize_header="first",
use_column_type=True,
include_in_memory=('question', 'column', 'table'),
use_relations=False):
super().__init__()
self._device = device
self.preproc = preproc
self.bert_token_type = bert_token_type
self.base_enc_hidden_size = 1024
assert summarize_header in ["first", "avg"]
self.summarize_header = summarize_header
self.enc_hidden_size = self.base_enc_hidden_size
self.use_column_type = self.preproc.use_column_type
self.use_relations = use_relations
self.include_in_memory = set(include_in_memory)
update_modules = {
'relational_transformer':
encoder_modules.RelationalTransformerUpdate,
'none':
encoder_modules.NoOpUpdate,
}
self.encs_update = registry.instantiate(
update_modules[update_config['name']],
update_config,
unused_keys={"name"},
device=self._device,
hidden_size=self.enc_hidden_size,
sc_link=True,
)
self.bert_model = AutoModel.from_pretrained(self.preproc.pretrained_modelname)
self.tokenizer = self.preproc.tokenizer
self.bert_model.resize_token_embeddings(len(self.tokenizer)) # several tokens added
self.use_bert_masks = self.preproc.use_bert_masks
def forward(self, descs):
batch_token_lists = []
batch_id_to_retrieve_question = []
batch_id_to_retrieve_column = []
batch_id_to_retrieve_table = []
batch_id_to_retrieve_value = []
batch_id_to_retrieve_grnd = []
if self.summarize_header == "avg":
batch_id_to_retrieve_value_2 = []
batch_id_to_retrieve_column_2 = []
batch_id_to_retrieve_table_2 = []
batch_id_to_retrieve_grnd_2 = []
long_seq_set = set()
batch_id_map = {} # some long examples are not included
if not self.preproc.use_general_grounding:
# sample one grounding
all_grnd_idx = np.zeros(len(descs), dtype=np.int).tolist()
for batch_idx, desc in enumerate(descs):
qs = self.pad_single_sentence_for_bert(desc['question'], cls=True)
if not self.preproc.use_general_grounding:
if self.use_column_type:
cols = [self.pad_single_sentence_for_bert(c, cls=False) for c in desc['columns']]
else:
cols = [self.pad_single_sentence_for_bert(c[:-1], cls=False) for c in desc['columns']]
tabs = [self.pad_single_sentence_for_bert(t, cls=False) for t in desc['tables']]
vals = [self.pad_single_sentence_for_bert(v, cls=False) for v in desc['tokenized_values'][all_grnd_idx[batch_idx]]]
token_list = qs + [c for col in cols for c in col] + \
[t for tab in tabs for t in tab] + \
[v for val in vals for v in val]
else:
grnds = [self.pad_single_sentence_for_bert(g, cls=False) for g in desc['general_grounding']]
token_list = qs + [g for grnd in grnds for g in grnd]
assert self.check_bert_seq(token_list)
if not self.preproc.use_bert_unlimited_length and len(token_list) > 512:
print(f"{descs[batch_idx]['full_name']} is too long ({len(token_list)}) - skipping it")
long_seq_set.add(batch_idx)
continue
q_b = len(qs)
if not self.preproc.use_general_grounding:
col_b = q_b + sum(len(c) for c in cols)
tab_b = col_b + sum(len(t) for t in tabs)
else:
grnd_b = q_b + sum(len(g) for g in grnds)
# leave out [CLS] and [SEP]
question_indexes = list(range(q_b))[1:-1]
if not self.preproc.use_general_grounding:
# use the first representation for column/table
column_indexes = \
np.cumsum([q_b] + [len(token_list) for token_list in cols[:-1]]).tolist()
table_indexes = \
np.cumsum([col_b] + [len(token_list) for token_list in tabs[:-1]]).tolist()
value_indexes = \
np.cumsum([tab_b] + [len(token_list) for token_list in vals[:-1]]).tolist()
if self.summarize_header == "avg":
column_indexes_2 = \
np.cumsum([q_b - 2] + [len(token_list) for token_list in cols]).tolist()[1:]
table_indexes_2 = \
np.cumsum([col_b - 2] + [len(token_list) for token_list in tabs]).tolist()[1:]
value_indexes_2 = \
np.cumsum([tab_b - 2] + [len(token_list) for token_list in vals]).tolist()[1:]
else:
grnd_indexes = \
np.cumsum([q_b] + [len(token_list) for token_list in grnds[:-1]]).tolist()
if self.summarize_header == "avg":
grnd_indexes_2 = \
np.cumsum([q_b - 2] + [len(token_list) for token_list in grnds]).tolist()[1:]
indexed_token_list = self.tokenizer.convert_tokens_to_ids(token_list)
batch_token_lists.append(indexed_token_list)
question_rep_ids = torch.LongTensor(question_indexes).to(self._device)
batch_id_to_retrieve_question.append(question_rep_ids)
if not self.preproc.use_general_grounding:
column_rep_ids = torch.LongTensor(column_indexes).to(self._device)
batch_id_to_retrieve_column.append(column_rep_ids)
table_rep_ids = torch.LongTensor(table_indexes).to(self._device)
batch_id_to_retrieve_table.append(table_rep_ids)
value_rep_ids = torch.LongTensor(value_indexes).to(self._device)
batch_id_to_retrieve_value.append(value_rep_ids)
if self.summarize_header == "avg":
assert (all(i2 >= i1 for i1, i2 in zip(column_indexes, column_indexes_2)))
column_rep_ids_2 = torch.LongTensor(column_indexes_2).to(self._device)
batch_id_to_retrieve_column_2.append(column_rep_ids_2)
assert (all(i2 >= i1 for i1, i2 in zip(table_indexes, table_indexes_2)))
table_rep_ids_2 = torch.LongTensor(table_indexes_2).to(self._device)
batch_id_to_retrieve_table_2.append(table_rep_ids_2)
assert (all(i2 >= i1 for i1, i2 in zip(value_indexes, value_indexes_2)))
value_rep_ids_2 = torch.LongTensor(value_indexes_2).to(self._device)
batch_id_to_retrieve_value_2.append(value_rep_ids_2)
else:
grnd_rep_ids = torch.LongTensor(grnd_indexes).to(self._device)
batch_id_to_retrieve_grnd.append(grnd_rep_ids)
if self.summarize_header == "avg":
assert (all(i2 >= i1 for i1, i2 in zip(grnd_indexes, grnd_indexes_2))), descs[batch_idx]['full_name']
grnd_rep_ids_2 = torch.LongTensor(grnd_indexes_2).to(self._device)
batch_id_to_retrieve_grnd_2.append(grnd_rep_ids_2)
batch_id_map[batch_idx] = len(batch_id_map)
if len(long_seq_set) == len(descs):
# all batch elements al too long
return [None] * len(descs)
padded_token_lists, att_mask_lists, tok_type_lists, position_ids_lists = self.pad_sequence_for_bert_batch(batch_token_lists,
use_bert_unlimited_length=self.preproc.use_bert_unlimited_length,
use_bert_masks=self.use_bert_masks,
indexes=batch_id_to_retrieve_grnd, descs=descs)
tokens_tensor = torch.LongTensor(padded_token_lists).to(self._device)
att_masks_tensor = torch.LongTensor(att_mask_lists).to(self._device)
position_ids_tensor = torch.LongTensor(position_ids_lists).to(self._device)
if self.bert_token_type:
tok_type_tensor = torch.LongTensor(tok_type_lists).to(self._device)
bert_output = self.bert_model(tokens_tensor,
attention_mask=att_masks_tensor, token_type_ids=tok_type_tensor, position_ids=position_ids_tensor)[0]
else:
bert_output = self.bert_model(tokens_tensor,
attention_mask=att_masks_tensor, position_ids=position_ids_tensor)[0]
enc_output = bert_output
if not self.preproc.use_general_grounding:
has_vals = [len(desc['tokenized_values'][grnd_idx]) for desc, grnd_idx in zip(descs, all_grnd_idx)]
grnd_pointer_maps = [
{
i: [i]
for i in range(len(desc['general_grounding']))
}
for desc in descs
]
if self.preproc.use_general_grounding:
# the batched version of rat transformer
q_enc_batch = []
grnd_enc_batch = []
relations_batch = []
for batch_idx, desc in enumerate(descs):
if batch_idx in long_seq_set:
continue
bert_batch_idx = batch_id_map[batch_idx]
q_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_question[bert_batch_idx]]
grnd_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_grnd[bert_batch_idx]]
if self.summarize_header == "avg":
grnd_enc_2 = enc_output[bert_batch_idx][batch_id_to_retrieve_grnd_2[bert_batch_idx]]
grnd_enc = (grnd_enc + grnd_enc_2) / 2.0 # avg of first and last token
grnd_boundaries = list(range(len(desc["general_grounding"]) + 1))
assert q_enc.shape[0] == len(desc["question"])
assert grnd_enc.shape[0] == grnd_boundaries[-1]
total_enc_len = q_enc.shape[0] + grnd_enc.shape[0]
if self.use_relations:
general_grounding_types = desc.get('general_grounding_types')
input_types = general_grounding_types if not self.preproc.merge_sc_link else None
relation_map = self.encs_update.relation_map
# Catalogue which things are where
relations = self.encs_update.compute_relations(
desc,
enc_length=total_enc_len,
q_enc_length=q_enc.shape[0],
c_enc_length=grnd_enc.shape[0],
t_enc_length=None,
c_boundaries=grnd_boundaries,
t_boundaries=None,
input_types=input_types,
v_boundaries=None,
relation_map=relation_map)
else:
relations = np.zeros((total_enc_len, total_enc_len), dtype=np.int64)
relations = torch.as_tensor(relations)
q_enc_batch.append(q_enc)
grnd_enc_batch.append(grnd_enc)
relations_batch.append(relations)
q_enc_new_item_batch, grnd_enc_new_item_batch, _, align_mat_item_batch = \
self.encs_update.forward_batched(relations_batch, q_enc_batch, grnd_enc_batch)
result = []
num_processed_items = 0
for batch_idx, desc in enumerate(descs):
if batch_idx in long_seq_set:
result.append(None)
continue
else:
batch_idx = num_processed_items
num_processed_items = num_processed_items + 1
memory = []
q_enc_new_item = q_enc_new_item_batch[batch_idx].unsqueeze(0)
grnd_enc_new_item = grnd_enc_new_item_batch[batch_idx].unsqueeze(0)
if align_mat_item_batch:
align_mat_item = [align_mat_item_batch[0][batch_idx], align_mat_item_batch[1][batch_idx]]
else:
align_mat_item = (None, None)
if self.preproc.construct_general_grounding:
assert not self.preproc.use_general_grounding
if val_enc is not None:
grnd_enc_new_item = torch.cat((t_enc_new_item, c_enc_new_item, v_enc_new_item), dim=1)
else:
grnd_enc_new_item = torch.cat((t_enc_new_item, c_enc_new_item), dim=1)
if align_mat_item_batch and align_mat_item[2] is not None:
align_mat_item_grnd = torch.cat((align_mat_item[1], align_mat_item[0], align_mat_item[2]), dim=1)
elif align_mat_item_batch:
align_mat_item_grnd = torch.cat((align_mat_item[1], align_mat_item[0]), dim=1)
else:
align_mat_item_grnd = None
if 'question' in self.include_in_memory:
memory.append(q_enc_new_item)
if 'column' in self.include_in_memory:
memory.append(c_enc_new_item)
if 'table' in self.include_in_memory:
memory.append(t_enc_new_item)
if 'value' in self.include_in_memory and val_enc is not None:
memory.append(v_enc_new_item)
if 'grounding' in self.include_in_memory:
memory.append(grnd_enc_new_item)
memory = torch.cat(memory, dim=1)
result.append(EncoderState(
state=None,
memory=memory,
question_memory=None,
schema_memory=None,
# TODO: words should match memory
words=None,
pointer_memories={
'grounding': grnd_enc_new_item,
},
pointer_maps={
'grounding': grnd_pointer_maps[batch_idx],
},
m2c_align_mat=align_mat_item[0] if not self.preproc.construct_general_grounding else align_mat_item_grnd,
m2t_align_mat=align_mat_item[1],
value_memories=None,
values=None,
value_emb=None,
grnd_idx=0,
))
return result
assert len(long_seq_set) == 0 # remove them for now
result = []
for batch_idx, desc in enumerate(descs):
if not self.preproc.use_general_grounding:
c_boundary = list(range(len(desc["columns"]) + 1))
t_boundary = list(range(len(desc["tables"]) + 1))
v_boundary = list(range(len(desc["tokenized_values"][all_grnd_idx[batch_idx]]) + 1)) \
if 'tokenized_values' in desc else None
else:
g_boundary = list(range(len(desc["general_grounding"]) + 1))
if not self.preproc.use_general_grounding:
assert batch_idx not in long_seq_set
bert_batch_idx = batch_id_map[batch_idx]
q_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_question[bert_batch_idx]]
col_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_column[bert_batch_idx]]
tab_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_table[bert_batch_idx]]
if has_vals[batch_idx]:
val_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_value[bert_batch_idx]]
if self.summarize_header == "avg":
col_enc_2 = enc_output[bert_batch_idx][batch_id_to_retrieve_column_2[bert_batch_idx]]
tab_enc_2 = enc_output[bert_batch_idx][batch_id_to_retrieve_table_2[bert_batch_idx]]
if has_vals[batch_idx]:
val_enc_2 = enc_output[bert_batch_idx][batch_id_to_retrieve_value_2[bert_batch_idx]]
col_enc = (col_enc + col_enc_2) / 2.0 # avg of first and last token
tab_enc = (tab_enc + tab_enc_2) / 2.0 # avg of first and last token
if has_vals[batch_idx]:
val_enc = (val_enc + val_enc_2) / 2.0 # avg of first and last token
assert q_enc.size()[0] == len(desc["question"])
assert col_enc.size()[0] == c_boundary[-1]
assert tab_enc.size()[0] == t_boundary[-1]
if has_vals[batch_idx]:
assert val_enc.size()[0] == len(desc['tokenized_values'][all_grnd_idx[batch_idx]])
assert val_enc.size()[0] == v_boundary[-1]
else:
val_enc = None
q_enc_new_item, c_enc_new_item, t_enc_new_item, v_enc_new_item, align_mat_item = \
self.encs_update.forward_unbatched(
desc,
q_enc.unsqueeze(1),
col_enc.unsqueeze(1),
c_boundary,
tab_enc.unsqueeze(1),
t_boundary,
val_enc.unsqueeze(1) if val_enc is not None else None,
v_boundary)
else:
assert batch_idx not in long_seq_set
bert_batch_idx = batch_id_map[batch_idx]
q_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_question[bert_batch_idx]]
grnd_enc = enc_output[bert_batch_idx][batch_id_to_retrieve_grnd[bert_batch_idx]]
if self.summarize_header == "avg":
grnd_enc_2 = enc_output[bert_batch_idx][batch_id_to_retrieve_grnd_2[bert_batch_idx]]
grnd_enc = (grnd_enc + grnd_enc_2) / 2.0 # avg of first and last token
assert q_enc.size()[0] == len(desc["question"])
assert grnd_enc.size()[0] == g_boundary[-1]
general_grounding_types = desc.get('general_grounding_types')
q_enc_new_item, grnd_enc_new_item, _, align_mat_item = \
self.encs_update.forward_unbatched(
desc,
q_enc.unsqueeze(1),
grnd_enc.unsqueeze(1),
g_boundary,
use_relations=self.use_relations,
input_types=general_grounding_types if not self.preproc.merge_sc_link or \
self.preproc.use_type_relations or self.preproc.use_graph_relations else None)
memory = []
if self.preproc.construct_general_grounding:
assert not self.preproc.use_general_grounding
if val_enc is not None:
grnd_enc_new_item = torch.cat((t_enc_new_item, c_enc_new_item, v_enc_new_item), dim=1)
else:
grnd_enc_new_item = torch.cat((t_enc_new_item, c_enc_new_item), dim=1)
if align_mat_item[2] is not None:
align_mat_item_grnd = torch.cat((align_mat_item[1], align_mat_item[0], align_mat_item[2]), dim=1)
else:
align_mat_item_grnd = torch.cat((align_mat_item[1], align_mat_item[0]), dim=1)
if 'question' in self.include_in_memory:
memory.append(q_enc_new_item)
if 'column' in self.include_in_memory:
memory.append(c_enc_new_item)
if 'table' in self.include_in_memory:
memory.append(t_enc_new_item)
if 'value' in self.include_in_memory and val_enc is not None:
memory.append(v_enc_new_item)
if 'grounding' in self.include_in_memory:
memory.append(grnd_enc_new_item)
memory = torch.cat(memory, dim=1)
result.append(EncoderState(
state=None,
memory=memory,
question_memory=None,
schema_memory=None,
# TODO: words should match memory
words=None,
pointer_memories={
'grounding': grnd_enc_new_item,
},
pointer_maps={
'grounding': grnd_pointer_maps[batch_idx],
},
m2c_align_mat=align_mat_item[0] if not self.preproc.construct_general_grounding else align_mat_item_grnd,
m2t_align_mat=align_mat_item[1],
value_memories=None,
values=None,
value_emb=None,
grnd_idx=0,
))
return result
def check_bert_seq(self, toks):
if toks[0] == self.tokenizer.cls_token and toks[-1] == self.tokenizer.sep_token:
return True
else:
return False
def pad_single_sentence_for_bert(self, toks, cls=True):
if cls:
return [self.tokenizer.cls_token] + toks + [self.tokenizer.sep_token]
else:
return toks + [self.tokenizer.sep_token]
def pad_sequence_for_bert_batch(self, tokens_lists, use_bert_unlimited_length=False, use_bert_masks=False, indexes=None, descs=None):
pad_id = self.tokenizer.pad_token_id
max_len = max([len(it) for it in tokens_lists])
if not (use_bert_unlimited_length or use_bert_masks):
assert max_len <= 512
toks_ids = []
att_masks = []
tok_type_lists = []
position_ids_lists = []
for item_toks, desc, idx in zip(tokens_lists, descs, indexes):
padded_item_toks = item_toks + [pad_id] * (max_len - len(item_toks))
toks_ids.append(padded_item_toks)
if use_bert_masks:
_att_mask = torch.zeros((1, max_len, max_len), dtype=torch.int64)
_position_ids_list = list(range(0, idx[0]))
sep_id = list(idx) + [len(item_toks)]
# all depends on question
_att_mask[:, :, :sep_id[0]] += 1
for start_block, end_block in zip(sep_id, sep_id[1:]):
_att_mask[:, start_block:end_block, start_block:end_block] += 1
_position_ids_list += list(range(idx[0], idx[0] + end_block - start_block))
_position_ids_list += [511] * (max_len - len(item_toks))
else:
_att_mask = [1] * len(item_toks) + [0] * (max_len - len(item_toks))
att_masks.append(_att_mask)
first_sep_id = padded_item_toks.index(self.tokenizer.sep_token_id)
assert first_sep_id > 0
assert not use_bert_masks or first_sep_id == sep_id[0] - 1, (first_sep_id, sep_id[0])
_tok_type_list = [0] * (first_sep_id + 1) + [1] * (max_len - first_sep_id - 1)
if use_bert_unlimited_length:
_position_ids_list = list(range(0, first_sep_id + 1)) + [511] * (max_len - first_sep_id - 1) # 511 - maximum position Id for BERT
elif use_bert_masks:
assert len(_position_ids_list) == len(_tok_type_list), (len(_position_ids_list), len(_tok_type_list))
else:
_position_ids_list = list(range(0, len(_tok_type_list)))
tok_type_lists.append(_tok_type_list)
position_ids_lists.append(_position_ids_list)
if use_bert_masks:
att_masks = torch.cat(att_masks)
return toks_ids, att_masks, tok_type_lists, position_ids_lists
@lru_cache(maxsize=100000)
def annotate_bertokens_with_corenlp(tok):
ann = corenlp.annotate(tok, annotators=['tokenize', 'ssplit', 'lemma'])
lemmas = [tok.lemma.lower() for sent in ann.sentence for tok in sent.token]
lemma_word = " ".join(lemmas)
return lemma_word
class Bertokens:
def __init__(self, pieces):
self.pieces = pieces
self.normalized_pieces = None
self.recovered_pieces = None
self.idx_map = None
self.normalize_toks()
def normalize_toks(self):
"""
If the token is not a word piece, then find its lemma
If it is, combine pieces into a word, and then find its lemma
E.g., a ##b ##c will be normalized as "abc", "", ""
NOTE: this is only used for schema linking
"""
self.startidx2pieces = dict()
self.pieces2startidx = dict()
cache_start = None
for i, piece in enumerate(self.pieces + [""]):
if piece.startswith("##"):
if cache_start is None:
cache_start = i - 1
self.pieces2startidx[i] = cache_start
self.pieces2startidx[i - 1] = cache_start
else:
if cache_start is not None:
self.startidx2pieces[cache_start] = i
cache_start = None
assert cache_start is None
# combine pieces, "abc", "", ""
combined_word = {}
for start, end in self.startidx2pieces.items():
assert end - start + 1 < 10
pieces = [self.pieces[start]] + [self.pieces[_id].strip("##") for _id in range(start + 1, end)]
word = "".join(pieces)
combined_word[start] = word
# remove "", only keep "abc"
idx_map = {}
new_toks = []
for i, piece in enumerate(self.pieces):
if i in combined_word:
idx_map[len(new_toks)] = i
new_toks.append(combined_word[i])
elif i in self.pieces2startidx:
# remove it
pass
else:
idx_map[len(new_toks)] = i
new_toks.append(piece)
self.idx_map = idx_map
# lemmatize "abc"
normalized_toks = []
for i, tok in enumerate(new_toks):
lemma_word = annotate_bertokens_with_corenlp(tok)
normalized_toks.append(lemma_word)
self.normalized_pieces = normalized_toks
self.recovered_pieces = new_toks
def bert_schema_linking(self, columns, tables, value_unit_dict=None):
question_tokens = self.normalized_pieces
column_tokens = [c.normalized_pieces for c in columns]
table_tokens = [t.normalized_pieces for t in tables]
if value_unit_dict and not (len(value_unit_dict) == 1 and value_unit_dict.get(('<UNK>',))):
value_tokens = [v[0].bert_tokens.normalized_pieces for v in value_unit_dict.values()]
else:
value_tokens = None
sc_link = compute_schema_linking(question_tokens, column_tokens, table_tokens, value_tokens)
new_sc_link = {}
for m_type in sc_link:
_match = {}
for ij_str in sc_link[m_type]:
q_id_str, col_tab_id_str = ij_str.split(",")
q_id, col_tab_id = int(q_id_str), int(col_tab_id_str)
real_q_id = self.idx_map[q_id]
_match[f"{real_q_id},{col_tab_id}"] = sc_link[m_type][ij_str]
new_sc_link[m_type] = _match
return new_sc_link
def bert_cv_linking(self, schema):
question_tokens = self.recovered_pieces # Not using normalized tokens here because values usually match exactly
cv_link = compute_cell_value_linking(question_tokens, schema)
new_cv_link = {}
for m_type in cv_link:
_match = {}
for ij_str in cv_link[m_type]:
q_id_str, col_tab_id_str = ij_str.split(",")
q_id, col_tab_id = int(q_id_str), int(col_tab_id_str)
real_q_id = self.idx_map[q_id]
_match[f"{real_q_id},{col_tab_id}"] = cv_link[m_type][ij_str]
new_cv_link[m_type] = _match
return new_cv_link
class Robertatokens:
def __init__(self, pieces, idx_map):
self.pieces = pieces
self.idx_map = idx_map
self.normalized_pieces = None
self.recovered_pieces = None
self.normalize_toks()
def normalize_toks(self):
new_toks = []
for i, j in zip(range(len(self.idx_map)), range(1, len(self.idx_map))):
new_toks.append(''.join(self.pieces[self.idx_map[i]:self.idx_map[j]]))
# lemmatize "abc"
normalized_toks = []
for i, tok in enumerate(new_toks):
lemma_word = annotate_bertokens_with_corenlp(tok)
normalized_toks.append(lemma_word)
self.normalized_pieces = normalized_toks
self.recovered_pieces = new_toks
def bert_schema_linking(self, columns, tables, value_unit_dict=None):
question_tokens = self.normalized_pieces
column_tokens = [c.normalized_pieces for c in columns]
table_tokens = [t.normalized_pieces for t in tables]
if value_unit_dict and not (len(value_unit_dict) == 1 and value_unit_dict.get(('<UNK>',))):
value_tokens = [v[0].bert_tokens.normalized_pieces for v in value_unit_dict.values()]
else:
value_tokens = None
sc_link = compute_schema_linking(question_tokens, column_tokens, table_tokens, value_tokens)
new_sc_link = {}
for m_type in sc_link:
_match = {}
for ij_str in sc_link[m_type]:
q_id_str, col_tab_id_str = ij_str.split(",")
q_id, col_tab_id = int(q_id_str), int(col_tab_id_str)
real_q_id = self.idx_map[q_id]
_match[f"{real_q_id},{col_tab_id}"] = sc_link[m_type][ij_str]
new_sc_link[m_type] = _match
return new_sc_link
def bert_cv_linking(self, schema):
question_tokens = self.recovered_pieces # Not using normalized tokens here because values usually match exactly
cv_link = compute_cell_value_linking(question_tokens, schema)
new_cv_link = {}
for m_type in cv_link:
_match = {}
for ij_str in cv_link[m_type]:
q_id_str, col_tab_id_str = ij_str.split(",")
q_id, col_tab_id = int(q_id_str), int(col_tab_id_str)
real_q_id = self.idx_map[q_id]
_match[f"{real_q_id},{col_tab_id}"] = cv_link[m_type][ij_str]
new_cv_link[m_type] = _match
return new_cv_link
| [
"torch.as_tensor",
"torch.randperm",
"torch.LongTensor",
"transformers.AutoTokenizer.from_pretrained",
"copy.deepcopy",
"torch.arange",
"transformers.AutoModel.from_pretrained",
"json.dumps",
"attr.evolve",
"torch.randint",
"text2qdmr.utils.registry.register",
"text2qdmr.utils.corenlp.annotate... | [((49496, 49537), 'text2qdmr.utils.registry.register', 'registry.register', (['"""encoder"""', '"""text2qdmr"""'], {}), "('encoder', 'text2qdmr')\n", (49513, 49537), False, 'from text2qdmr.utils import registry\n'), ((73648, 73673), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(100000)'}), '(maxsize=100000)\n', (73657, 73673), False, 'from functools import lru_cache\n'), ((822, 831), 'attr.ib', 'attr.ib', ([], {}), '()\n', (829, 831), False, 'import attr\n'), ((845, 854), 'attr.ib', 'attr.ib', ([], {}), '()\n', (852, 854), False, 'import attr\n'), ((877, 886), 'attr.ib', 'attr.ib', ([], {}), '()\n', (884, 886), False, 'import attr\n'), ((907, 916), 'attr.ib', 'attr.ib', ([], {}), '()\n', (914, 916), False, 'import attr\n'), ((929, 938), 'attr.ib', 'attr.ib', ([], {}), '()\n', (936, 938), False, 'import attr\n'), ((963, 972), 'attr.ib', 'attr.ib', ([], {}), '()\n', (970, 972), False, 'import attr\n'), ((992, 1001), 'attr.ib', 'attr.ib', ([], {}), '()\n', (999, 1001), False, 'import attr\n'), ((1023, 1032), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1030, 1032), False, 'import attr\n'), ((1053, 1062), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1060, 1062), False, 'import attr\n'), ((1085, 1094), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1092, 1094), False, 'import attr\n'), ((1108, 1117), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1115, 1117), False, 'import attr\n'), ((1134, 1143), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1141, 1143), False, 'import attr\n'), ((1160, 1169), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1167, 1169), False, 'import attr\n'), ((1335, 1356), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (1342, 1356), False, 'import attr\n'), ((1375, 1396), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (1382, 1396), False, 'import attr\n'), ((1416, 1437), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (1423, 1437), False, 'import attr\n'), ((1460, 1481), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (1467, 1481), False, 'import attr\n'), ((1505, 1526), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (1512, 1526), False, 'import attr\n'), ((1546, 1567), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (1553, 1567), False, 'import attr\n'), ((1667, 1688), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (1674, 1688), False, 'import attr\n'), ((1748, 1769), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (1755, 1769), False, 'import attr\n'), ((1799, 1820), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (1806, 1820), False, 'import attr\n'), ((5258, 5321), 'text2qdmr.utils.serialization.to_dict_with_sorted_values', 'serialization.to_dict_with_sorted_values', (['r.foreign_keys_tables'], {}), '(r.foreign_keys_tables)\n', (5298, 5321), False, 'from text2qdmr.utils import serialization\n'), ((5851, 5880), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5874, 5880), False, 'import collections\n'), ((73726, 73791), 'text2qdmr.utils.corenlp.annotate', 'corenlp.annotate', (['tok'], {'annotators': "['tokenize', 'ssplit', 'lemma']"}), "(tok, annotators=['tokenize', 'ssplit', 'lemma'])\n", (73742, 73791), False, 'from text2qdmr.utils import corenlp\n'), ((6917, 6942), 'torch.randperm', 'torch.randperm', (['num_items'], {}), '(num_items)\n', (6931, 6942), False, 'import torch\n'), ((7290, 7324), 'attr.evolve', 'attr.evolve', (['value_units[0]'], {'idx': 'i'}), '(value_units[0], idx=i)\n', (7301, 7324), False, 'import attr\n'), ((11176, 11206), 'os.path.join', 'os.path.join', (['save_path', '"""enc"""'], {}), "(save_path, 'enc')\n", (11188, 11206), False, 'import os\n'), ((11259, 11288), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (11282, 11288), False, 'import collections\n'), ((12209, 12262), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['self.pretrained_modelname'], {}), '(self.pretrained_modelname)\n', (12235, 12262), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((16198, 16227), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (16221, 16227), False, 'import collections\n'), ((27248, 27276), 'copy.deepcopy', 'copy.deepcopy', (['qdmr_code_cur'], {}), '(qdmr_code_cur)\n', (27261, 27276), False, 'import copy\n'), ((33744, 33772), 'copy.deepcopy', 'copy.deepcopy', (['qdmr_code_cur'], {}), '(qdmr_code_cur)\n', (33757, 33772), False, 'import copy\n'), ((41539, 41602), 'torch.zeros', 'torch.zeros', (['(num_qdmr_steps, num_qdmr_steps)'], {'dtype': 'torch.long'}), '((num_qdmr_steps, num_qdmr_steps), dtype=torch.long)\n', (41550, 41602), False, 'import torch\n'), ((48802, 48843), 'os.makedirs', 'os.makedirs', (['self.data_dir'], {'exist_ok': '(True)'}), '(self.data_dir, exist_ok=True)\n', (48813, 48843), False, 'import os\n'), ((49271, 49315), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.data_dir'], {}), '(self.data_dir)\n', (49300, 49315), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((50679, 50853), 'text2qdmr.utils.registry.instantiate', 'registry.instantiate', (["update_modules[update_config['name']]", 'update_config'], {'unused_keys': "{'name'}", 'device': 'self._device', 'hidden_size': 'self.enc_hidden_size', 'sc_link': '(True)'}), "(update_modules[update_config['name']], update_config,\n unused_keys={'name'}, device=self._device, hidden_size=self.\n enc_hidden_size, sc_link=True)\n", (50699, 50853), False, 'from text2qdmr.utils import registry\n'), ((50955, 51015), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['self.preproc.pretrained_modelname'], {}), '(self.preproc.pretrained_modelname)\n', (50980, 51015), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((76658, 76744), 'text2qdmr.datasets.utils.spider_match_utils.compute_schema_linking', 'compute_schema_linking', (['question_tokens', 'column_tokens', 'table_tokens', 'value_tokens'], {}), '(question_tokens, column_tokens, table_tokens,\n value_tokens)\n', (76680, 76744), False, 'from text2qdmr.datasets.utils.spider_match_utils import compute_schema_linking, compute_cell_value_linking\n'), ((77369, 77420), 'text2qdmr.datasets.utils.spider_match_utils.compute_cell_value_linking', 'compute_cell_value_linking', (['question_tokens', 'schema'], {}), '(question_tokens, schema)\n', (77395, 77420), False, 'from text2qdmr.datasets.utils.spider_match_utils import compute_schema_linking, compute_cell_value_linking\n'), ((79126, 79212), 'text2qdmr.datasets.utils.spider_match_utils.compute_schema_linking', 'compute_schema_linking', (['question_tokens', 'column_tokens', 'table_tokens', 'value_tokens'], {}), '(question_tokens, column_tokens, table_tokens,\n value_tokens)\n', (79148, 79212), False, 'from text2qdmr.datasets.utils.spider_match_utils import compute_schema_linking, compute_cell_value_linking\n'), ((79837, 79888), 'text2qdmr.datasets.utils.spider_match_utils.compute_cell_value_linking', 'compute_cell_value_linking', (['question_tokens', 'schema'], {}), '(question_tokens, schema)\n', (79863, 79888), False, 'from text2qdmr.datasets.utils.spider_match_utils import compute_schema_linking, compute_cell_value_linking\n'), ((6677, 6751), 'attr.evolve', 'attr.evolve', (['val_unit'], {'tokenized_value': 'val_tokens', 'bert_tokens': 'bert_tokens'}), '(val_unit, tokenized_value=val_tokens, bert_tokens=bert_tokens)\n', (6688, 6751), False, 'import attr\n'), ((7866, 7894), 'attr.evolve', 'attr.evolve', (['val_unit'], {'idx': 'i'}), '(val_unit, idx=i)\n', (7877, 7894), False, 'import attr\n'), ((12305, 12361), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.pretrained_modelname'], {}), '(self.pretrained_modelname)\n', (12334, 12361), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((43718, 43854), 'warnings.warn', 'warnings.warn', (['f"""The last node should be last, otherwise smth is wrong, have permutation {new_node_id}, aborting this shuffle"""'], {}), "(\n f'The last node should be last, otherwise smth is wrong, have permutation {new_node_id}, aborting this shuffle'\n )\n", (43731, 43854), False, 'import warnings\n'), ((46356, 46382), 'torch.randperm', 'torch.randperm', (['num_tables'], {}), '(num_tables)\n', (46370, 46382), False, 'import torch\n'), ((46406, 46430), 'torch.arange', 'torch.arange', (['num_tables'], {}), '(num_tables)\n', (46418, 46430), False, 'import torch\n'), ((49378, 49414), 'json.loads', 'json.loads', (['line'], {'cls': 'ComplexDecoder'}), '(line, cls=ComplexDecoder)\n', (49388, 49414), False, 'import json\n'), ((69968, 69992), 'torch.cat', 'torch.cat', (['memory'], {'dim': '(1)'}), '(memory, dim=1)\n', (69977, 69992), False, 'import torch\n'), ((73553, 73573), 'torch.cat', 'torch.cat', (['att_masks'], {}), '(att_masks)\n', (73562, 73573), False, 'import torch\n'), ((1618, 1646), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (1641, 1646), False, 'import collections\n'), ((12574, 12653), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.pretrained_modelname'], {'local_files_only': '(True)'}), '(self.pretrained_modelname, local_files_only=True)\n', (12603, 12653), False, 'from transformers import AutoModel, AutoTokenizer, AutoConfig\n'), ((42580, 42633), 'qdmr2sparql.structures.QdmrInstance.is_good_qdmr_ref', 'QdmrInstance.is_good_qdmr_ref', (['step[1][2].arg.keys[1]'], {}), '(step[1][2].arg.keys[1])\n', (42609, 42633), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((42659, 42708), 'qdmr2sparql.structures.QdmrInstance.ref_to_index', 'QdmrInstance.ref_to_index', (['step[1][2].arg.keys[1]'], {}), '(step[1][2].arg.keys[1])\n', (42684, 42708), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((44078, 44114), 'copy.deepcopy', 'copy.deepcopy', (['qdmr_code_cur[i_step]'], {}), '(qdmr_code_cur[i_step])\n', (44091, 44114), False, 'import copy\n'), ((47868, 47892), 'torch.randperm', 'torch.randperm', (['num_cols'], {}), '(num_cols)\n', (47882, 47892), False, 'import torch\n'), ((57850, 57886), 'torch.LongTensor', 'torch.LongTensor', (['padded_token_lists'], {}), '(padded_token_lists)\n', (57866, 57886), False, 'import torch\n'), ((57931, 57963), 'torch.LongTensor', 'torch.LongTensor', (['att_mask_lists'], {}), '(att_mask_lists)\n', (57947, 57963), False, 'import torch\n'), ((58011, 58047), 'torch.LongTensor', 'torch.LongTensor', (['position_ids_lists'], {}), '(position_ids_lists)\n', (58027, 58047), False, 'import torch\n'), ((61073, 61099), 'torch.as_tensor', 'torch.as_tensor', (['relations'], {}), '(relations)\n', (61088, 61099), False, 'import torch\n'), ((63620, 63644), 'torch.cat', 'torch.cat', (['memory'], {'dim': '(1)'}), '(memory, dim=1)\n', (63629, 63644), False, 'import torch\n'), ((71913, 71966), 'torch.zeros', 'torch.zeros', (['(1, max_len, max_len)'], {'dtype': 'torch.int64'}), '((1, max_len, max_len), dtype=torch.int64)\n', (71924, 71966), False, 'import torch\n'), ((9246, 9325), 'attr.evolve', 'attr.evolve', (['val_unit'], {'q_match': "{'idx_question': idx_match, 'match': type_match}"}), "(val_unit, q_match={'idx_question': idx_match, 'match': type_match})\n", (9257, 9325), False, 'import attr\n'), ((40337, 40396), 'attr.evolve', 'attr.evolve', (['qdmr_code_new[i_step][1][2]'], {'arg': 'new_grounding'}), '(qdmr_code_new[i_step][1][2], arg=new_grounding)\n', (40348, 40396), False, 'import attr\n'), ((42083, 42117), 'qdmr2sparql.structures.QdmrInstance.is_good_qdmr_ref', 'QdmrInstance.is_good_qdmr_ref', (['ref'], {}), '(ref)\n', (42112, 42117), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((42228, 42258), 'qdmr2sparql.structures.QdmrInstance.ref_to_index', 'QdmrInstance.ref_to_index', (['ref'], {}), '(ref)\n', (42253, 42258), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((45189, 45242), 'qdmr2sparql.structures.QdmrInstance.is_good_qdmr_ref', 'QdmrInstance.is_good_qdmr_ref', (['step[1][2].arg.keys[1]'], {}), '(step[1][2].arg.keys[1])\n', (45218, 45242), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((45272, 45321), 'qdmr2sparql.structures.QdmrInstance.ref_to_index', 'QdmrInstance.ref_to_index', (['step[1][2].arg.keys[1]'], {}), '(step[1][2].arg.keys[1])\n', (45297, 45321), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((45413, 45458), 'qdmr2sparql.structures.QdmrInstance.index_to_ref', 'QdmrInstance.index_to_ref', (['new_node_id[i_ref]'], {}), '(new_node_id[i_ref])\n', (45438, 45458), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((49057, 49104), 'os.path.join', 'os.path.join', (['self.data_dir', "(section + '.jsonl')"], {}), "(self.data_dir, section + '.jsonl')\n", (49069, 49104), False, 'import os\n'), ((49444, 49491), 'os.path.join', 'os.path.join', (['self.data_dir', "(section + '.jsonl')"], {}), "(self.data_dir, section + '.jsonl')\n", (49456, 49491), False, 'import os\n'), ((55210, 55244), 'torch.LongTensor', 'torch.LongTensor', (['question_indexes'], {}), '(question_indexes)\n', (55226, 55244), False, 'import torch\n'), ((58129, 58161), 'torch.LongTensor', 'torch.LongTensor', (['tok_type_lists'], {}), '(tok_type_lists)\n', (58145, 58161), False, 'import torch\n'), ((60987, 61043), 'numpy.zeros', 'np.zeros', (['(total_enc_len, total_enc_len)'], {'dtype': 'np.int64'}), '((total_enc_len, total_enc_len), dtype=np.int64)\n', (60995, 61043), True, 'import numpy as np\n'), ((68961, 69027), 'torch.cat', 'torch.cat', (['(t_enc_new_item, c_enc_new_item, v_enc_new_item)'], {'dim': '(1)'}), '((t_enc_new_item, c_enc_new_item, v_enc_new_item), dim=1)\n', (68970, 69027), False, 'import torch\n'), ((69090, 69140), 'torch.cat', 'torch.cat', (['(t_enc_new_item, c_enc_new_item)'], {'dim': '(1)'}), '((t_enc_new_item, c_enc_new_item), dim=1)\n', (69099, 69140), False, 'import torch\n'), ((69234, 69309), 'torch.cat', 'torch.cat', (['(align_mat_item[1], align_mat_item[0], align_mat_item[2])'], {'dim': '(1)'}), '((align_mat_item[1], align_mat_item[0], align_mat_item[2]), dim=1)\n', (69243, 69309), False, 'import torch\n'), ((69374, 69430), 'torch.cat', 'torch.cat', (['(align_mat_item[1], align_mat_item[0])'], {'dim': '(1)'}), '((align_mat_item[1], align_mat_item[0]), dim=1)\n', (69383, 69430), False, 'import torch\n'), ((39944, 40045), 'qdmr2sparql.structures.GroundingKey.make_comparative_grounding', 'GroundingKey.make_comparative_grounding', (['new_op', 'comparative_arg.keys[1]', 'comparative_arg.keys[2]'], {}), '(new_op, comparative_arg.keys[1],\n comparative_arg.keys[2])\n', (39983, 40045), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((40108, 40180), 'qdmr2sparql.structures.GroundingKey.make_comparative_grounding', 'GroundingKey.make_comparative_grounding', (['new_op', 'comparative_arg.keys[1]'], {}), '(new_op, comparative_arg.keys[1])\n', (40147, 40180), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((40489, 40548), 'attr.evolve', 'attr.evolve', (['qdmr_code_new[i_step][1][0]'], {'arg': 'new_grounding'}), '(qdmr_code_new[i_step][1][0], arg=new_grounding)\n', (40500, 40548), False, 'import attr\n'), ((44649, 44683), 'qdmr2sparql.structures.QdmrInstance.is_good_qdmr_ref', 'QdmrInstance.is_good_qdmr_ref', (['ref'], {}), '(ref)\n', (44678, 44683), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((44798, 44828), 'qdmr2sparql.structures.QdmrInstance.ref_to_index', 'QdmrInstance.ref_to_index', (['ref'], {}), '(ref)\n', (44823, 44828), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((44866, 44911), 'qdmr2sparql.structures.QdmrInstance.index_to_ref', 'QdmrInstance.index_to_ref', (['new_node_id[i_ref]'], {}), '(new_node_id[i_ref])\n', (44891, 44911), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((55417, 55449), 'torch.LongTensor', 'torch.LongTensor', (['column_indexes'], {}), '(column_indexes)\n', (55433, 55449), False, 'import torch\n'), ((55566, 55597), 'torch.LongTensor', 'torch.LongTensor', (['table_indexes'], {}), '(table_indexes)\n', (55582, 55597), False, 'import torch\n'), ((55712, 55743), 'torch.LongTensor', 'torch.LongTensor', (['value_indexes'], {}), '(value_indexes)\n', (55728, 55743), False, 'import torch\n'), ((56697, 56727), 'torch.LongTensor', 'torch.LongTensor', (['grnd_indexes'], {}), '(grnd_indexes)\n', (56713, 56727), False, 'import torch\n'), ((62422, 62488), 'torch.cat', 'torch.cat', (['(t_enc_new_item, c_enc_new_item, v_enc_new_item)'], {'dim': '(1)'}), '((t_enc_new_item, c_enc_new_item, v_enc_new_item), dim=1)\n', (62431, 62488), False, 'import torch\n'), ((62559, 62609), 'torch.cat', 'torch.cat', (['(t_enc_new_item, c_enc_new_item)'], {'dim': '(1)'}), '((t_enc_new_item, c_enc_new_item), dim=1)\n', (62568, 62609), False, 'import torch\n'), ((62736, 62811), 'torch.cat', 'torch.cat', (['(align_mat_item[1], align_mat_item[0], align_mat_item[2])'], {'dim': '(1)'}), '((align_mat_item[1], align_mat_item[0], align_mat_item[2]), dim=1)\n', (62745, 62811), False, 'import torch\n'), ((30956, 30978), 'torch.randint', 'torch.randint', (['(2)', '(1,)'], {}), '(2, (1,))\n', (30969, 30978), False, 'import torch\n'), ((46466, 46490), 'torch.arange', 'torch.arange', (['num_tables'], {}), '(num_tables)\n', (46478, 46490), False, 'import torch\n'), ((49180, 49216), 'json.dumps', 'json.dumps', (['text'], {'cls': 'ComplexEncoder'}), '(text, cls=ComplexEncoder)\n', (49190, 49216), False, 'import json\n'), ((56011, 56045), 'torch.LongTensor', 'torch.LongTensor', (['column_indexes_2'], {}), '(column_indexes_2)\n', (56027, 56045), False, 'import torch\n'), ((56269, 56302), 'torch.LongTensor', 'torch.LongTensor', (['table_indexes_2'], {}), '(table_indexes_2)\n', (56285, 56302), False, 'import torch\n'), ((56524, 56557), 'torch.LongTensor', 'torch.LongTensor', (['value_indexes_2'], {}), '(value_indexes_2)\n', (56540, 56557), False, 'import torch\n'), ((57018, 57050), 'torch.LongTensor', 'torch.LongTensor', (['grnd_indexes_2'], {}), '(grnd_indexes_2)\n', (57034, 57050), False, 'import torch\n'), ((62905, 62961), 'torch.cat', 'torch.cat', (['(align_mat_item[1], align_mat_item[0])'], {'dim': '(1)'}), '((align_mat_item[1], align_mat_item[0]), dim=1)\n', (62914, 62961), False, 'import torch\n'), ((32766, 32840), 'qdmr2sparql.structures.GroundingKey.make_sortdir_grounding', 'GroundingKey.make_sortdir_grounding', ([], {'ascending': "(sort_dir_arg == 'ascending')"}), "(ascending=sort_dir_arg == 'ascending')\n", (32801, 32840), False, 'from qdmr2sparql.structures import QdmrInstance, GroundingKey\n'), ((47938, 47960), 'torch.arange', 'torch.arange', (['num_cols'], {}), '(num_cols)\n', (47950, 47960), False, 'import torch\n')] |
# This script produces custom wrappers for XGBoost and Lasagne modules (to generate sklearn-like interface)
# The implementation of Lasagne (with some custom adjustments) is adapted from <NAME>'s solution
# to Kaggle's Otto Product Classification Challenge which can be found here https://github.com/ahara/kaggle_otto
# Description:
# 1. XGBoostRegressor (used in stacking procedure): gradient boosting model with least squares objective.
# It can be trained with either tree or linear booster
# The following two models were used in the second stage to obtain probabilities:
# 2. XGBoostClassifier: gradient boosting model with binary_logistic objective and tree booster
# 3. NeuralNetClassifier: deep pyramidal feed-forward network with categorical crossentropy objective.
# It employs three hidden layers and SGD update with Nesterov momentum for optimization. Dropout in
# both input and hidden layers is used for regularization
import copy
import itertools
import numpy as np
import lasagne
import math
import theano
from theano import tensor as T
import time
from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params
from lasagne.nonlinearities import rectify, softmax
from lasagne.objectives import categorical_crossentropy, Objective
from sklearn.base import BaseEstimator
import xgboost as xgb
from sklearn.utils import check_random_state
class XGBoostClassifier(BaseEstimator):
def __init__(self, nthread, eta,
gamma, max_depth, min_child_weight, max_delta_step,
subsample, colsample_bytree, scale_pos_weight, silent, seed,
l2_reg, alpha, n_estimators):
self.silent = silent
self.nthread = nthread
self.eta = eta
self.gamma = gamma
self.max_depth = max_depth
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.silent = silent
self.colsample_bytree = colsample_bytree
self.scale_pos_weight = scale_pos_weight
self.seed = seed
self.l2_reg = l2_reg
self.alpha = alpha
self.n_estimators=n_estimators
self.model = None
def fit(self, X, y):
sf = xgb.DMatrix(X, y)
params = {"objective": 'binary:logistic',
"eta": self.eta,
"gamma": self.gamma,
"max_depth": self.max_depth,
"min_child_weight": self.min_child_weight,
"max_delta_step": self.max_delta_step,
"subsample": self.subsample,
"silent": self.silent,
"colsample_bytree": self.colsample_bytree,
"scale_pos_weight": self.scale_pos_weight,
"seed": self.seed,
"lambda": self.l2_reg,
"alpha": self.alpha}
self.model = xgb.train(params, sf, self.n_estimators)
return self
def predict_proba(self, X):
X=xgb.DMatrix(X)
preds = self.model.predict(X)
return preds
class XGBoostRegressor(BaseEstimator):
def __init__(self, booster, nthread, eta,
gamma, max_depth, min_child_weight, max_delta_step,
subsample, colsample_bytree, silent, seed,
l2_reg, alpha, n_estimators):
self.booster = booster
self.silent = silent
self.nthread = nthread
self.eta = eta
self.gamma = gamma
self.max_depth = max_depth
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.silent = silent
self.colsample_bytree = colsample_bytree
self.seed = seed
self.l2_reg = l2_reg
self.alpha = alpha
self.n_estimators=n_estimators
self.model = None
def fit(self, X, y):
sf = xgb.DMatrix(X, y)
if (self.booster == 'gbtree'):
params = {"objective": 'reg:linear',
"eta": self.eta,
"gamma": self.gamma,
"max_depth": self.max_depth,
"min_child_weight": self.min_child_weight,
"max_delta_step": self.max_delta_step,
"subsample": self.subsample,
"silent": self.silent,
"colsample_bytree": self.colsample_bytree,
"seed": self.seed,
"lambda": self.l2_reg,
"alpha": self.alpha}
else:
params = {"booster": 'gblinear',
"objective": 'reg:linear',
"silent": self.silent,
"seed": self.seed,
"eta": self.eta,
"lambda": self.l2_reg,
"alpha": self.alpha}
self.model = xgb.train(params, sf, self.n_estimators)
return self
def predict(self, X):
X=xgb.DMatrix(X)
preds = self.model.predict(X)
return preds
class NeuralNetClassifier(BaseEstimator):
def __init__(self, n_hidden1=20, n_hidden2=20, n_hidden3=20, max_epochs=150, batch_size=200,
lr=0.01, momentum=0.9, dropout_input=0.2, dropout_hidden=0.5, valid_ratio=0.0,
use_valid=False, verbose=0, random_state=None):
self.n_hidden1 = n_hidden1
self.n_hidden2 = n_hidden2
self.n_hidden3 = n_hidden3
self.max_epochs = max_epochs
self.batch_size = batch_size
self.lr = lr
self.momentum = momentum
self.dropout_input = dropout_input
self.dropout_hidden = dropout_hidden
self.valid_ratio = valid_ratio
self.use_valid = use_valid
self.verbose = verbose
self.random_state = random_state
# State
self.score_ = None
self.classes_ = None
self.n_classes_ = None
self.model = None
def fit(self, data, targets, sample_weight=None):
self.classes_, indices = np.unique(targets, return_inverse=True)
self.n_classes_ = self.classes_.shape[0]
random_state = check_random_state(self.random_state)
# Shuffle data and eventually split on train and validation sets
if self.valid_ratio > 0:
strat_shuffled_split = StratifiedShuffleSplit(targets, test_size=self.valid_ratio,
n_iter=1, random_state=self.random_state)
train_index, valid_index = [s for s in strat_shuffled_split][0]
X_train, y_train = data[train_index], targets[train_index]
X_valid, y_valid = data[valid_index], targets[valid_index]
else:
X_train, y_train = data, targets
X_valid, y_valid = np.array([]), np.array([])
if self.verbose > 5:
print ('X_train: %s, y_train: %s' % (X_train.shape, y_train.shape))
if self.use_valid:
print ('X_valid: %s, y_valid: %s' % (X_valid.shape, y_valid.shape))
# Prepare theano variables
dataset = dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
input_dim=X_train.shape[1],
output_dim=self.n_classes_,
)
if self.verbose > 0:
print ("Building model and compiling functions...")
output_layer = self.build_model(dataset['input_dim'])
iter_funcs = self.create_iter_functions(dataset, output_layer)
if self.verbose > 0:
print ("Starting training...")
now = time.time()
results = []
try:
for epoch in self.train(iter_funcs, dataset, output_layer):
if self.verbose > 1:
print ("Epoch {} of {} took {:.3f}s".format(
epoch['number'], self.max_epochs, time.time() - now))
now = time.time()
results.append([epoch['number'], epoch['train_loss'], epoch['valid_loss']])
if self.verbose > 1:
print (" training loss:\t\t{:.6f}".format(epoch['train_loss']))
print (" validation loss:\t\t{:.6f}".format(epoch['valid_loss']))
print (" validation accuracy:\t\t{:.2f} %%".format(
epoch['valid_accuracy'] * 100))
if epoch['number'] >= self.max_epochs:
break
if self.verbose > 0:
print ('Minimum validation error: %f (epoch %d)' % \
(epoch['best_val_error'], epoch['best_val_iter']))
except KeyboardInterrupt:
pass
return self
def predict(self, data):
preds, _ = self.make_predictions(data)
return preds
def predict_proba(self, data):
_, proba = self.make_predictions(data)
return proba
def score(self):
return self.score_
# Private methods
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_in_dropout=DropoutLayer(l_in, p=self.dropout_input)
l_hidden1 = DenseLayer(l_in_dropout, num_units=self.n_hidden1, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout_hidden)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden2, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout_hidden)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden3, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout_hidden)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(lasagne.layers.get_output(output_layer, X_batch, deterministic=True), axis=1)
proba = lasagne.layers.get_output(output_layer, X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = lasagne.updates.nesterov_momentum(loss_train, all_params, self.lr, self.momentum)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
def create_test_function(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
pred = T.argmax(lasagne.layers.get_output(output_layer, X_batch, deterministic=True), axis=1)
proba = lasagne.layers.get_output(output_layer, X_batch, deterministic=True)
iter_test = theano.function(
[batch_index], [pred, proba],
givens={
X_batch: dataset['X_test'][batch_slice],
},
)
return dict(test=iter_test)
def train(self, iter_funcs, dataset, output_layer):
num_batches_train = dataset['num_examples_train'] // self.batch_size
num_batches_valid = int(math.ceil(dataset['num_examples_valid'] / float(self.batch_size)))
best_val_err = 100
best_val_iter = -1
for epoch in itertools.count(1):
batch_train_losses = []
for b in range(num_batches_train):
batch_train_loss = iter_funcs['train'](b)
batch_train_losses.append(batch_train_loss)
avg_train_loss = np.mean(batch_train_losses)
batch_valid_losses = []
batch_valid_accuracies = []
batch_valid_probas = []
if self.use_valid:
for b in range(num_batches_valid):
batch_valid_loss, batch_valid_accuracy, batch_valid_proba = iter_funcs['valid'](b)
batch_valid_losses.append(batch_valid_loss)
batch_valid_accuracies.append(batch_valid_accuracy)
batch_valid_probas.append(batch_valid_proba)
avg_valid_loss = np.mean(batch_valid_losses)
avg_valid_accuracy = np.mean(batch_valid_accuracies)
if (best_val_err > avg_valid_loss and self.use_valid) or\
(epoch == self.max_epochs and not self.use_valid):
best_val_err = avg_valid_loss
best_val_iter = epoch
# Save model
self.score_ = best_val_err
self.model = copy.deepcopy(output_layer)
yield {
'number': epoch,
'train_loss': avg_train_loss,
'valid_loss': avg_valid_loss,
'valid_accuracy': avg_valid_accuracy,
'best_val_error': best_val_err,
'best_val_iter': best_val_iter,
}
def make_predictions(self, data):
dataset = dict(
X_test=theano.shared(lasagne.utils.floatX(data)),
num_examples_test=data.shape[0],
input_dim=data.shape[1],
output_dim=self.n_classes_,
)
iter_funcs = self.create_test_function(dataset, self.model)
num_batches_test = int(math.ceil(dataset['num_examples_test'] / float(self.batch_size)))
test_preds, test_probas = np.array([]), None
for b in range(num_batches_test):
batch_test_pred, batch_test_proba = iter_funcs['test'](b)
test_preds = np.append(test_preds, batch_test_pred)
test_probas = np.append(test_probas, batch_test_proba, axis=0) if test_probas is not None else batch_test_proba
return test_preds, test_probas
| [
"theano.tensor.iscalar",
"lasagne.updates.nesterov_momentum",
"lasagne.utils.floatX",
"numpy.array",
"copy.deepcopy",
"xgboost.DMatrix",
"lasagne.layers.get_all_params",
"numpy.mean",
"theano.shared",
"lasagne.objectives.Objective",
"xgboost.train",
"theano.function",
"lasagne.layers.Dropout... | [((2244, 2261), 'xgboost.DMatrix', 'xgb.DMatrix', (['X', 'y'], {}), '(X, y)\n', (2255, 2261), True, 'import xgboost as xgb\n'), ((2803, 2843), 'xgboost.train', 'xgb.train', (['params', 'sf', 'self.n_estimators'], {}), '(params, sf, self.n_estimators)\n', (2812, 2843), True, 'import xgboost as xgb\n'), ((2907, 2921), 'xgboost.DMatrix', 'xgb.DMatrix', (['X'], {}), '(X)\n', (2918, 2921), True, 'import xgboost as xgb\n'), ((3832, 3849), 'xgboost.DMatrix', 'xgb.DMatrix', (['X', 'y'], {}), '(X, y)\n', (3843, 3849), True, 'import xgboost as xgb\n'), ((4717, 4757), 'xgboost.train', 'xgb.train', (['params', 'sf', 'self.n_estimators'], {}), '(params, sf, self.n_estimators)\n', (4726, 4757), True, 'import xgboost as xgb\n'), ((4816, 4830), 'xgboost.DMatrix', 'xgb.DMatrix', (['X'], {}), '(X)\n', (4827, 4830), True, 'import xgboost as xgb\n'), ((5883, 5922), 'numpy.unique', 'np.unique', (['targets'], {'return_inverse': '(True)'}), '(targets, return_inverse=True)\n', (5892, 5922), True, 'import numpy as np\n'), ((5996, 6033), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (6014, 6033), False, 'from sklearn.utils import check_random_state\n'), ((7712, 7723), 'time.time', 'time.time', ([], {}), '()\n', (7721, 7723), False, 'import time\n'), ((9130, 9176), 'lasagne.layers.InputLayer', 'InputLayer', ([], {'shape': '(self.batch_size, input_dim)'}), '(shape=(self.batch_size, input_dim))\n', (9140, 9176), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9198, 9238), 'lasagne.layers.DropoutLayer', 'DropoutLayer', (['l_in'], {'p': 'self.dropout_input'}), '(l_in, p=self.dropout_input)\n', (9210, 9238), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9260, 9332), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_in_dropout'], {'num_units': 'self.n_hidden1', 'nonlinearity': 'rectify'}), '(l_in_dropout, num_units=self.n_hidden1, nonlinearity=rectify)\n', (9270, 9332), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9361, 9407), 'lasagne.layers.DropoutLayer', 'DropoutLayer', (['l_hidden1'], {'p': 'self.dropout_hidden'}), '(l_hidden1, p=self.dropout_hidden)\n', (9373, 9407), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9429, 9506), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_hidden1_dropout'], {'num_units': 'self.n_hidden2', 'nonlinearity': 'rectify'}), '(l_hidden1_dropout, num_units=self.n_hidden2, nonlinearity=rectify)\n', (9439, 9506), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9535, 9581), 'lasagne.layers.DropoutLayer', 'DropoutLayer', (['l_hidden2'], {'p': 'self.dropout_hidden'}), '(l_hidden2, p=self.dropout_hidden)\n', (9547, 9581), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9611, 9688), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_hidden2_dropout'], {'num_units': 'self.n_hidden3', 'nonlinearity': 'rectify'}), '(l_hidden2_dropout, num_units=self.n_hidden3, nonlinearity=rectify)\n', (9621, 9688), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9717, 9763), 'lasagne.layers.DropoutLayer', 'DropoutLayer', (['l_hidden3'], {'p': 'self.dropout_hidden'}), '(l_hidden3, p=self.dropout_hidden)\n', (9729, 9763), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9789, 9867), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_hidden3_dropout'], {'num_units': 'self.n_classes_', 'nonlinearity': 'softmax'}), '(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)\n', (9799, 9867), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((9997, 10021), 'theano.tensor.iscalar', 'T.iscalar', (['"""batch_index"""'], {}), "('batch_index')\n", (10006, 10021), True, 'from theano import tensor as T\n'), ((10077, 10091), 'theano.tensor.ivector', 'T.ivector', (['"""y"""'], {}), "('y')\n", (10086, 10091), True, 'from theano import tensor as T\n'), ((10210, 10273), 'lasagne.objectives.Objective', 'Objective', (['output_layer'], {'loss_function': 'categorical_crossentropy'}), '(output_layer, loss_function=categorical_crossentropy)\n', (10219, 10273), False, 'from lasagne.objectives import categorical_crossentropy, Objective\n'), ((10543, 10611), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['output_layer', 'X_batch'], {'deterministic': '(True)'}), '(output_layer, X_batch, deterministic=True)\n', (10568, 10611), False, 'import lasagne\n'), ((10709, 10737), 'lasagne.layers.get_all_params', 'get_all_params', (['output_layer'], {}), '(output_layer)\n', (10723, 10737), False, 'from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params\n'), ((10756, 10842), 'lasagne.updates.nesterov_momentum', 'lasagne.updates.nesterov_momentum', (['loss_train', 'all_params', 'self.lr', 'self.momentum'], {}), '(loss_train, all_params, self.lr, self.\n momentum)\n', (10789, 10842), False, 'import lasagne\n'), ((10860, 11047), 'theano.function', 'theano.function', (['[batch_index]', 'loss_train'], {'updates': 'updates', 'givens': "{X_batch: dataset['X_train'][batch_slice], y_batch: dataset['y_train'][\n batch_slice]}", 'on_unused_input': '"""ignore"""'}), "([batch_index], loss_train, updates=updates, givens={X_batch:\n dataset['X_train'][batch_slice], y_batch: dataset['y_train'][\n batch_slice]}, on_unused_input='ignore')\n", (10875, 11047), False, 'import theano\n'), ((11647, 11671), 'theano.tensor.iscalar', 'T.iscalar', (['"""batch_index"""'], {}), "('batch_index')\n", (11656, 11671), True, 'from theano import tensor as T\n'), ((11925, 11993), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['output_layer', 'X_batch'], {'deterministic': '(True)'}), '(output_layer, X_batch, deterministic=True)\n', (11950, 11993), False, 'import lasagne\n'), ((12015, 12115), 'theano.function', 'theano.function', (['[batch_index]', '[pred, proba]'], {'givens': "{X_batch: dataset['X_test'][batch_slice]}"}), "([batch_index], [pred, proba], givens={X_batch: dataset[\n 'X_test'][batch_slice]})\n", (12030, 12115), False, 'import theano\n'), ((12524, 12542), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (12539, 12542), False, 'import itertools\n'), ((10449, 10517), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['output_layer', 'X_batch'], {'deterministic': '(True)'}), '(output_layer, X_batch, deterministic=True)\n', (10474, 10517), False, 'import lasagne\n'), ((10638, 10657), 'theano.tensor.eq', 'T.eq', (['pred', 'y_batch'], {}), '(pred, y_batch)\n', (10642, 10657), True, 'from theano import tensor as T\n'), ((11224, 11387), 'theano.function', 'theano.function', (['[batch_index]', '[loss_eval, accuracy, proba]'], {'givens': "{X_batch: dataset['X_valid'][batch_slice], y_batch: dataset['y_valid'][\n batch_slice]}"}), "([batch_index], [loss_eval, accuracy, proba], givens={\n X_batch: dataset['X_valid'][batch_slice], y_batch: dataset['y_valid'][\n batch_slice]})\n", (11239, 11387), False, 'import theano\n'), ((11831, 11899), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['output_layer', 'X_batch'], {'deterministic': '(True)'}), '(output_layer, X_batch, deterministic=True)\n', (11856, 11899), False, 'import lasagne\n'), ((12774, 12801), 'numpy.mean', 'np.mean', (['batch_train_losses'], {}), '(batch_train_losses)\n', (12781, 12801), True, 'import numpy as np\n'), ((13332, 13359), 'numpy.mean', 'np.mean', (['batch_valid_losses'], {}), '(batch_valid_losses)\n', (13339, 13359), True, 'import numpy as np\n'), ((13393, 13424), 'numpy.mean', 'np.mean', (['batch_valid_accuracies'], {}), '(batch_valid_accuracies)\n', (13400, 13424), True, 'import numpy as np\n'), ((14549, 14561), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14557, 14561), True, 'import numpy as np\n'), ((14706, 14744), 'numpy.append', 'np.append', (['test_preds', 'batch_test_pred'], {}), '(test_preds, batch_test_pred)\n', (14715, 14744), True, 'import numpy as np\n'), ((6644, 6656), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6652, 6656), True, 'import numpy as np\n'), ((6658, 6670), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6666, 6670), True, 'import numpy as np\n'), ((8028, 8039), 'time.time', 'time.time', ([], {}), '()\n', (8037, 8039), False, 'import time\n'), ((13752, 13779), 'copy.deepcopy', 'copy.deepcopy', (['output_layer'], {}), '(output_layer)\n', (13765, 13779), False, 'import copy\n'), ((14771, 14819), 'numpy.append', 'np.append', (['test_probas', 'batch_test_proba'], {'axis': '(0)'}), '(test_probas, batch_test_proba, axis=0)\n', (14780, 14819), True, 'import numpy as np\n'), ((6990, 7019), 'lasagne.utils.floatX', 'lasagne.utils.floatX', (['X_train'], {}), '(X_train)\n', (7010, 7019), False, 'import lasagne\n'), ((7049, 7071), 'theano.shared', 'theano.shared', (['y_train'], {}), '(y_train)\n', (7062, 7071), False, 'import theano\n'), ((7117, 7146), 'lasagne.utils.floatX', 'lasagne.utils.floatX', (['X_valid'], {}), '(X_valid)\n', (7137, 7146), False, 'import lasagne\n'), ((7176, 7198), 'theano.shared', 'theano.shared', (['y_valid'], {}), '(y_valid)\n', (7189, 7198), False, 'import theano\n'), ((14187, 14213), 'lasagne.utils.floatX', 'lasagne.utils.floatX', (['data'], {}), '(data)\n', (14207, 14213), False, 'import lasagne\n'), ((7986, 7997), 'time.time', 'time.time', ([], {}), '()\n', (7995, 7997), False, 'import time\n')] |
import tkinter as tk
from pathlib import Path
import random
from numpy import array_equal, copy, zeros, any
_GRID_SIZE = 4
class Application(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.wm_iconbitmap('2048.ico')
self.title("2048")
self.minsize(272, 350)
self.tab = []
self.old_tab = []
self.random_color = random.randrange(16777215)
# Score frame
self.score_frame = tk.Frame(self)
self.score_frame.pack()
# Score label
self.score = 0
self.old_score = 0
self.score_text = tk.StringVar()
self.score_text.set('Score\n' + str(self.score))
self.score_label = tk.Label(self.score_frame, textvariable=self.score_text, font=("Arial", 16),
width=11, borderwidth=1, relief="ridge")
self.score_label.pack(side=tk.LEFT)
# Best score label
self.best = 0
try:
self.best = int(Path('score.txt').read_text())
except FileNotFoundError:
pass
self.best_text = tk.StringVar()
self.best_text.set('Best\n' + str(self.best))
self.best_label = tk.Label(self.score_frame, textvariable=self.best_text, font=("Arial", 16),
width=11, borderwidth=1, relief="ridge")
self.best_label.pack(side=tk.RIGHT)
# Button frame
self.button_frame = tk.Frame(self)
self.button_frame.pack()
# Restart button
restart_button = tk.Button(self.button_frame, text="Restart", command=self.restart, font=("Arial", 10),
width=16, borderwidth=1, relief="ridge")
restart_button.pack(side=tk.LEFT)
# Undo button
self.undo_button = tk.Button(self.button_frame, text="Undo", command=self.undo, font=("Arial", 10),
width=16, borderwidth=1, relief="ridge", state=tk.DISABLED)
self.undo_button.pack(side=tk.RIGHT)
# Game grid
self.grid = tk.Frame(self)
self.grid.pack()
# You win label
self.win_label = tk.Label(self, text="You win!", font=("Arial", 16))
self.win_label.pack_forget()
# Continue button
self.continue_button = tk.Button(self, text="Continue", command=self.continue_game, font=("Arial", 10),
width=33, borderwidth=1, relief="ridge")
self.continue_button.pack_forget()
# Game over label
self.defeat_label = tk.Label(self, text="Game over!", font=("Arial", 16))
self.defeat_label.pack_forget()
self.start()
def start(self):
self.tab = zeros((_GRID_SIZE, _GRID_SIZE), dtype=int)
self.old_tab = zeros((_GRID_SIZE, _GRID_SIZE), dtype=int)
self.score = 0
self.old_score = 0
self.update_score()
self.random_tile()
self.random_tile()
self.bind_key()
def random_tile(self):
random_row = random.randrange(_GRID_SIZE)
random_column = random.randrange(_GRID_SIZE)
while self.tab[random_row][random_column] != 0:
random_row = random.randrange(_GRID_SIZE)
random_column = random.randrange(_GRID_SIZE)
if random.randrange(10) < 8:
self.tab[random_row][random_column] = 2 # 8% chance
else:
self.tab[random_row][random_column] = 4 # 2% chance
self.display_tab()
def bind_key(self):
self.bind("<Up>", lambda e: self.key_up_pressed())
self.bind("<Down>", lambda e: self.key_down_pressed())
self.bind("<Right>", lambda e: self.key_right_pressed())
self.bind("<Left>", lambda e: self.key_left_pressed())
def unbind_key(self):
self.unbind("<Up>")
self.unbind("<Down>")
self.unbind("<Right>")
self.unbind("<Left>")
def restart(self):
self.random_color = random.randrange(16777215)
self.start()
# You win label
self.win_label.destroy()
self.win_label = tk.Label(self, text="You win!", font=("Arial", 16))
# Continue button
self.continue_button.destroy()
self.continue_button = tk.Button(self, text="Continue", command=self.continue_game, font=("Arial", 10),
width=33, borderwidth=1, relief="ridge")
# Game over label
self.defeat_label.destroy()
self.defeat_label = tk.Label(self, text="Game over!", font=("Arial", 16))
self.undo_button['state'] = tk.DISABLED
def undo(self):
if any(self.old_tab) and not array_equal(self.tab, self.old_tab):
self.tab = self.old_tab.copy()
self.display_tab()
self.score = self.old_score
self.score_text.set('Score\n' + str(self.score))
self.undo_button['state'] = tk.DISABLED
def continue_game(self):
self.win_label.destroy()
self.continue_button.destroy()
self.bind_key()
self.undo_button['state'] = tk.NORMAL
def update_score(self):
self.score_text.set('Score\n' + str(self.score))
if self.best < self.score:
self.best = self.score
self.best_text.set('Best\n' + str(self.best))
Path('score.txt').write_text(str(self.best))
def display_tab(self):
for label in self.grid.winfo_children():
label.destroy()
for x in range(_GRID_SIZE):
for y in range(_GRID_SIZE):
if self.tab[x][y] == 0:
label = tk.Label(self.grid, text=None, font=("Arial", 20),
width=4, height=2, borderwidth=1, relief="ridge")
elif self.tab[x][y] % 2 != 0:
self.tab[x][y] += 1
label = tk.Label(self.grid, text=self.tab[x][y], font=("Arial", 20),
width=4, height=2, borderwidth=1, relief="ridge", underline=0)
else:
label = tk.Label(self.grid, text=self.tab[x][y], font=("Arial", 20),
width=4, height=2, borderwidth=1, relief="ridge")
# Colors
if self.tab[x][y] != 0:
label.config(bg="#" + '{0:06X}'.format(self.tab[x][y] * 20 + self.random_color))
label.grid(row=x, column=y)
def update_tab(self):
self.undo_button['state'] = tk.NORMAL
defeat = 1
for x in range(_GRID_SIZE):
for y in range(_GRID_SIZE):
if self.win_label.winfo_exists():
# check win
if self.tab[x][y] == 2048:
self.win_label.pack()
self.continue_button.pack()
self.unbind_key()
self.undo_button['state'] = tk.DISABLED
# defeat
if self.tab[x][y] == 0:
defeat = 0
else:
# check x
if 0 <= x < _GRID_SIZE - 1:
if self.tab[x][y] == self.tab[x + 1][y]:
defeat = 0
# check y
if 0 <= y < _GRID_SIZE - 1:
if self.tab[x][y] == self.tab[x][y + 1]:
defeat = 0
if defeat == 1:
self.defeat_label.pack()
self.unbind_key()
self.undo_button['state'] = tk.DISABLED
def move_up(self):
move = 0
self.old_tab = copy(self.tab)
self.old_score = self.score
for row in range(_GRID_SIZE):
for column in range(_GRID_SIZE):
# merge
if self.tab[column][row] != 0:
i = 1
while column + i < _GRID_SIZE and self.tab[column + i][row] != self.tab[column][row] \
and self.tab[column + i][row] == 0:
i += 1
if column + i < _GRID_SIZE and self.tab[column + i][row] == self.tab[column][row]:
self.tab[column][row] *= 2
self.score += self.tab[column][row]
self.update_score()
self.tab[column][row] -= 1
self.tab[column + i][row] = 0
move = 1
# move
if self.tab[column][row] != 0 and column != 0:
i = 0
while i < _GRID_SIZE and self.tab[i][row] != 0:
i += 1
if i < column and self.tab[i][row] == 0:
self.tab[i][row] = self.tab[column][row]
self.tab[column][row] = 0
move = 1
return move
def move_down(self):
move = 0
self.old_tab = copy(self.tab)
self.old_score = self.score
for row in range(_GRID_SIZE - 1, -1, -1):
for column in range(_GRID_SIZE - 1, -1, -1):
# merge
if self.tab[column][row] != 0:
i = 1
while i < _GRID_SIZE and self.tab[column - i][row] != self.tab[column][row] \
and self.tab[column - i][row] == 0:
i += 1
if i <= column and self.tab[column - i][row] == self.tab[column][row]:
self.tab[column][row] *= 2
self.score += self.tab[column][row]
self.update_score()
self.tab[column][row] -= 1
self.tab[column - i][row] = 0
move = 1
# move
if self.tab[column][row] != 0 and column != _GRID_SIZE - 1:
i = _GRID_SIZE - 1
while i >= 0 and self.tab[i][row] != 0:
i -= 1
if i > column and self.tab[i][row] == 0:
self.tab[i][row] = self.tab[column][row]
self.tab[column][row] = 0
move = 1
return move
def move_right(self):
move = 0
self.old_tab = copy(self.tab)
self.old_score = self.score
for row in range(_GRID_SIZE - 1, -1, -1):
for column in range(_GRID_SIZE - 1, -1, -1):
# merge
if self.tab[column][row] != 0:
i = 1
while i < _GRID_SIZE and self.tab[column][row - i] != self.tab[column][row] \
and self.tab[column][row - i] == 0:
i += 1
if i <= row and self.tab[column][row - i] == self.tab[column][row]:
self.tab[column][row] *= 2
self.score += self.tab[column][row]
self.update_score()
self.tab[column][row] -= 1
self.tab[column][row - i] = 0
move = 1
# move
if self.tab[column][row] != 0 and row != _GRID_SIZE - 1:
i = _GRID_SIZE - 1
while i >= 0 and self.tab[column][i] != 0:
i -= 1
if i > row and self.tab[column][i] == 0:
self.tab[column][i] = self.tab[column][row]
self.tab[column][row] = 0
move = 1
return move
def move_left(self):
move = 0
self.old_tab = copy(self.tab)
self.old_score = self.score
for row in range(_GRID_SIZE):
for column in range(_GRID_SIZE):
# merge
if self.tab[column][row] != 0:
i = 1
while row + i < _GRID_SIZE and self.tab[column][row + i] != self.tab[column][row] \
and self.tab[column][row + i] == 0:
i += 1
if row + i < _GRID_SIZE and self.tab[column][row + i] == self.tab[column][row]:
self.tab[column][row] *= 2
self.score += self.tab[column][row]
self.update_score()
self.tab[column][row] -= 1
self.tab[column][row + i] = 0
move = 1
# move
if self.tab[column][row] != 0 and row != 0:
i = 0
while i < _GRID_SIZE and self.tab[column][i] != 0:
i += 1
if i < row and self.tab[column][i] == 0:
self.tab[column][i] = self.tab[column][row]
self.tab[column][row] = 0
move = 1
return move
def key_up_pressed(self):
if self.move_up() == 1:
self.random_tile()
self.update_tab()
def key_down_pressed(self):
if self.move_down() == 1:
self.random_tile()
self.update_tab()
def key_right_pressed(self):
if self.move_right() == 1:
self.random_tile()
self.update_tab()
def key_left_pressed(self):
if self.move_left() == 1:
self.random_tile()
self.update_tab()
if __name__ == "__main__":
app = Application()
app.mainloop()
| [
"numpy.copy",
"random.randrange",
"pathlib.Path",
"tkinter.Button",
"numpy.any",
"tkinter.StringVar",
"numpy.zeros",
"tkinter.Tk.__init__",
"numpy.array_equal",
"tkinter.Label",
"tkinter.Frame"
] | [((194, 214), 'tkinter.Tk.__init__', 'tk.Tk.__init__', (['self'], {}), '(self)\n', (208, 214), True, 'import tkinter as tk\n'), ((394, 420), 'random.randrange', 'random.randrange', (['(16777215)'], {}), '(16777215)\n', (410, 420), False, 'import random\n'), ((474, 488), 'tkinter.Frame', 'tk.Frame', (['self'], {}), '(self)\n', (482, 488), True, 'import tkinter as tk\n'), ((626, 640), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (638, 640), True, 'import tkinter as tk\n'), ((727, 848), 'tkinter.Label', 'tk.Label', (['self.score_frame'], {'textvariable': 'self.score_text', 'font': "('Arial', 16)", 'width': '(11)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self.score_frame, textvariable=self.score_text, font=('Arial', 16),\n width=11, borderwidth=1, relief='ridge')\n", (735, 848), True, 'import tkinter as tk\n'), ((1133, 1147), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (1145, 1147), True, 'import tkinter as tk\n'), ((1230, 1350), 'tkinter.Label', 'tk.Label', (['self.score_frame'], {'textvariable': 'self.best_text', 'font': "('Arial', 16)", 'width': '(11)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self.score_frame, textvariable=self.best_text, font=('Arial', 16),\n width=11, borderwidth=1, relief='ridge')\n", (1238, 1350), True, 'import tkinter as tk\n'), ((1483, 1497), 'tkinter.Frame', 'tk.Frame', (['self'], {}), '(self)\n', (1491, 1497), True, 'import tkinter as tk\n'), ((1586, 1718), 'tkinter.Button', 'tk.Button', (['self.button_frame'], {'text': '"""Restart"""', 'command': 'self.restart', 'font': "('Arial', 10)", 'width': '(16)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self.button_frame, text='Restart', command=self.restart, font=(\n 'Arial', 10), width=16, borderwidth=1, relief='ridge')\n", (1595, 1718), True, 'import tkinter as tk\n'), ((1844, 1988), 'tkinter.Button', 'tk.Button', (['self.button_frame'], {'text': '"""Undo"""', 'command': 'self.undo', 'font': "('Arial', 10)", 'width': '(16)', 'borderwidth': '(1)', 'relief': '"""ridge"""', 'state': 'tk.DISABLED'}), "(self.button_frame, text='Undo', command=self.undo, font=('Arial',\n 10), width=16, borderwidth=1, relief='ridge', state=tk.DISABLED)\n", (1853, 1988), True, 'import tkinter as tk\n'), ((2113, 2127), 'tkinter.Frame', 'tk.Frame', (['self'], {}), '(self)\n', (2121, 2127), True, 'import tkinter as tk\n'), ((2207, 2258), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""You win!"""', 'font': "('Arial', 16)"}), "(self, text='You win!', font=('Arial', 16))\n", (2215, 2258), True, 'import tkinter as tk\n'), ((2356, 2481), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Continue"""', 'command': 'self.continue_game', 'font': "('Arial', 10)", 'width': '(33)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self, text='Continue', command=self.continue_game, font=('Arial',\n 10), width=33, borderwidth=1, relief='ridge')\n", (2365, 2481), True, 'import tkinter as tk\n'), ((2620, 2673), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Game over!"""', 'font': "('Arial', 16)"}), "(self, text='Game over!', font=('Arial', 16))\n", (2628, 2673), True, 'import tkinter as tk\n'), ((2783, 2825), 'numpy.zeros', 'zeros', (['(_GRID_SIZE, _GRID_SIZE)'], {'dtype': 'int'}), '((_GRID_SIZE, _GRID_SIZE), dtype=int)\n', (2788, 2825), False, 'from numpy import array_equal, copy, zeros, any\n'), ((2850, 2892), 'numpy.zeros', 'zeros', (['(_GRID_SIZE, _GRID_SIZE)'], {'dtype': 'int'}), '((_GRID_SIZE, _GRID_SIZE), dtype=int)\n', (2855, 2892), False, 'from numpy import array_equal, copy, zeros, any\n'), ((3113, 3141), 'random.randrange', 'random.randrange', (['_GRID_SIZE'], {}), '(_GRID_SIZE)\n', (3129, 3141), False, 'import random\n'), ((3167, 3195), 'random.randrange', 'random.randrange', (['_GRID_SIZE'], {}), '(_GRID_SIZE)\n', (3183, 3195), False, 'import random\n'), ((4071, 4097), 'random.randrange', 'random.randrange', (['(16777215)'], {}), '(16777215)\n', (4087, 4097), False, 'import random\n'), ((4205, 4256), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""You win!"""', 'font': "('Arial', 16)"}), "(self, text='You win!', font=('Arial', 16))\n", (4213, 4256), True, 'import tkinter as tk\n'), ((4356, 4481), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Continue"""', 'command': 'self.continue_game', 'font': "('Arial', 10)", 'width': '(33)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self, text='Continue', command=self.continue_game, font=('Arial',\n 10), width=33, borderwidth=1, relief='ridge')\n", (4365, 4481), True, 'import tkinter as tk\n'), ((4613, 4666), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Game over!"""', 'font': "('Arial', 16)"}), "(self, text='Game over!', font=('Arial', 16))\n", (4621, 4666), True, 'import tkinter as tk\n'), ((7815, 7829), 'numpy.copy', 'copy', (['self.tab'], {}), '(self.tab)\n', (7819, 7829), False, 'from numpy import array_equal, copy, zeros, any\n'), ((9180, 9194), 'numpy.copy', 'copy', (['self.tab'], {}), '(self.tab)\n', (9184, 9194), False, 'from numpy import array_equal, copy, zeros, any\n'), ((10567, 10581), 'numpy.copy', 'copy', (['self.tab'], {}), '(self.tab)\n', (10571, 10581), False, 'from numpy import array_equal, copy, zeros, any\n'), ((11953, 11967), 'numpy.copy', 'copy', (['self.tab'], {}), '(self.tab)\n', (11957, 11967), False, 'from numpy import array_equal, copy, zeros, any\n'), ((3281, 3309), 'random.randrange', 'random.randrange', (['_GRID_SIZE'], {}), '(_GRID_SIZE)\n', (3297, 3309), False, 'import random\n'), ((3339, 3367), 'random.randrange', 'random.randrange', (['_GRID_SIZE'], {}), '(_GRID_SIZE)\n', (3355, 3367), False, 'import random\n'), ((3382, 3402), 'random.randrange', 'random.randrange', (['(10)'], {}), '(10)\n', (3398, 3402), False, 'import random\n'), ((4751, 4768), 'numpy.any', 'any', (['self.old_tab'], {}), '(self.old_tab)\n', (4754, 4768), False, 'from numpy import array_equal, copy, zeros, any\n'), ((4777, 4812), 'numpy.array_equal', 'array_equal', (['self.tab', 'self.old_tab'], {}), '(self.tab, self.old_tab)\n', (4788, 4812), False, 'from numpy import array_equal, copy, zeros, any\n'), ((5457, 5474), 'pathlib.Path', 'Path', (['"""score.txt"""'], {}), "('score.txt')\n", (5461, 5474), False, 'from pathlib import Path\n'), ((5759, 5863), 'tkinter.Label', 'tk.Label', (['self.grid'], {'text': 'None', 'font': "('Arial', 20)", 'width': '(4)', 'height': '(2)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self.grid, text=None, font=('Arial', 20), width=4, height=2,\n borderwidth=1, relief='ridge')\n", (5767, 5863), True, 'import tkinter as tk\n'), ((1023, 1040), 'pathlib.Path', 'Path', (['"""score.txt"""'], {}), "('score.txt')\n", (1027, 1040), False, 'from pathlib import Path\n'), ((6015, 6142), 'tkinter.Label', 'tk.Label', (['self.grid'], {'text': 'self.tab[x][y]', 'font': "('Arial', 20)", 'width': '(4)', 'height': '(2)', 'borderwidth': '(1)', 'relief': '"""ridge"""', 'underline': '(0)'}), "(self.grid, text=self.tab[x][y], font=('Arial', 20), width=4,\n height=2, borderwidth=1, relief='ridge', underline=0)\n", (6023, 6142), True, 'import tkinter as tk\n'), ((6229, 6343), 'tkinter.Label', 'tk.Label', (['self.grid'], {'text': 'self.tab[x][y]', 'font': "('Arial', 20)", 'width': '(4)', 'height': '(2)', 'borderwidth': '(1)', 'relief': '"""ridge"""'}), "(self.grid, text=self.tab[x][y], font=('Arial', 20), width=4,\n height=2, borderwidth=1, relief='ridge')\n", (6237, 6343), True, 'import tkinter as tk\n')] |
import numpy as np
from fast3tree import fast3tree, get_distance, find_friends_of_friends
points = np.random.rand(1000, 3)
def find_sphere(c, points, r, box_size=-1):
return np.where(get_distance(c, points, box_size) < r)[0]
def test_fast3tree():
c = np.array([0.5, 0.5, 0.5])
r = 0.1
with fast3tree(points) as tree:
ind = tree.query_radius(c, r)
ind.sort()
ind_true = find_sphere(c, points, r)
assert len(ind) == len(ind_true)
assert (ind == ind_true).all()
def test_fast3tree_periodic():
c = np.array([0, 0, 0])
r = 0.2
with fast3tree(points) as tree:
tree.set_boundaries(0, 1)
ind = tree.query_radius(c, r, periodic=True)
ind.sort()
ind_true = find_sphere(c, points, r, box_size=1.0)
assert len(ind) == len(ind_true)
assert (ind == ind_true).all()
def test_fast3tree_index():
c = np.array([0.5, 0.5, 0.5])
r = 0.1
index = np.random.randint(0, 100000, size=len(points))
with fast3tree(points, index) as tree:
ind = tree.query_radius(c, r)
ind.sort()
ind_true = index[find_sphere(c, points, r)]
ind_true.sort()
assert len(ind) == len(ind_true)
assert (ind == ind_true).all()
def prepare_fof(n_points=50, n_groups=8, n_dim=2, scale=0.01, seed=0):
n_total = n_points*n_groups
points = np.vstack((np.random.RandomState(seed+i).randn(n_points, n_dim)*scale + np.random.RandomState(seed+i).rand(n_dim) for i in range(n_groups)))
answer = np.hstack((np.repeat(i, n_points) for i in range(n_groups)))
shuffle = np.random.RandomState(seed).choice(n_total, n_total, replace=False)
points = points[shuffle]
answer = answer[shuffle]
return points, answer
def test_fof_d2():
scale = 0.01
n_groups = 8
points, answer = prepare_fof(n_groups=n_groups, n_dim=2, scale=scale, seed=100)
fof = find_friends_of_friends(points, scale*2)
for i in range(n_groups):
assert len(np.unique(answer[fof == i])) == 1
assert len(np.unique(fof[answer == i])) == 1
def test_fof_d3():
scale = 0.01
n_groups = 8
points, answer = prepare_fof(n_groups=n_groups, n_dim=3, scale=scale, seed=200)
fof = find_friends_of_friends(points, scale*3)
for i in range(n_groups):
assert len(np.unique(answer[fof == i])) == 1
assert len(np.unique(fof[answer == i])) == 1
| [
"fast3tree.get_distance",
"numpy.repeat",
"numpy.random.rand",
"numpy.unique",
"fast3tree.fast3tree",
"numpy.array",
"fast3tree.find_friends_of_friends",
"numpy.random.RandomState"
] | [((100, 123), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (114, 123), True, 'import numpy as np\n'), ((262, 287), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (270, 287), True, 'import numpy as np\n'), ((545, 564), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (553, 564), True, 'import numpy as np\n'), ((882, 907), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (890, 907), True, 'import numpy as np\n'), ((1865, 1907), 'fast3tree.find_friends_of_friends', 'find_friends_of_friends', (['points', '(scale * 2)'], {}), '(points, scale * 2)\n', (1888, 1907), False, 'from fast3tree import fast3tree, get_distance, find_friends_of_friends\n'), ((2191, 2233), 'fast3tree.find_friends_of_friends', 'find_friends_of_friends', (['points', '(scale * 3)'], {}), '(points, scale * 3)\n', (2214, 2233), False, 'from fast3tree import fast3tree, get_distance, find_friends_of_friends\n'), ((309, 326), 'fast3tree.fast3tree', 'fast3tree', (['points'], {}), '(points)\n', (318, 326), False, 'from fast3tree import fast3tree, get_distance, find_friends_of_friends\n'), ((586, 603), 'fast3tree.fast3tree', 'fast3tree', (['points'], {}), '(points)\n', (595, 603), False, 'from fast3tree import fast3tree, get_distance, find_friends_of_friends\n'), ((988, 1012), 'fast3tree.fast3tree', 'fast3tree', (['points', 'index'], {}), '(points, index)\n', (997, 1012), False, 'from fast3tree import fast3tree, get_distance, find_friends_of_friends\n'), ((1500, 1522), 'numpy.repeat', 'np.repeat', (['i', 'n_points'], {}), '(i, n_points)\n', (1509, 1522), True, 'import numpy as np\n'), ((1564, 1591), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1585, 1591), True, 'import numpy as np\n'), ((189, 222), 'fast3tree.get_distance', 'get_distance', (['c', 'points', 'box_size'], {}), '(c, points, box_size)\n', (201, 222), False, 'from fast3tree import fast3tree, get_distance, find_friends_of_friends\n'), ((1955, 1982), 'numpy.unique', 'np.unique', (['answer[fof == i]'], {}), '(answer[fof == i])\n', (1964, 1982), True, 'import numpy as np\n'), ((2008, 2035), 'numpy.unique', 'np.unique', (['fof[answer == i]'], {}), '(fof[answer == i])\n', (2017, 2035), True, 'import numpy as np\n'), ((2281, 2308), 'numpy.unique', 'np.unique', (['answer[fof == i]'], {}), '(answer[fof == i])\n', (2290, 2308), True, 'import numpy as np\n'), ((2334, 2361), 'numpy.unique', 'np.unique', (['fof[answer == i]'], {}), '(fof[answer == i])\n', (2343, 2361), True, 'import numpy as np\n'), ((1407, 1438), 'numpy.random.RandomState', 'np.random.RandomState', (['(seed + i)'], {}), '(seed + i)\n', (1428, 1438), True, 'import numpy as np\n'), ((1346, 1377), 'numpy.random.RandomState', 'np.random.RandomState', (['(seed + i)'], {}), '(seed + i)\n', (1367, 1377), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.