code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import division
import os
import numpy as np
import math
from PIL import Image
from skimage.metrics import structural_similarity as ssim
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
def save_img(image_tensor, filename):
image_numpy = image_tensor.squeeze(0).float().numpy()
image_numpy = (image_numpy + 1) / 2.0 * 255.0
image_numpy = image_numpy.clip(0, 255)
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(filename+'.png')
def psnr(img1, img2):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 1.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def compute_metrics(real, pred):
real = real.squeeze(0).squeeze(0).cpu()
pred = pred.detach().squeeze(0).squeeze(0).cpu()
real = real.float().numpy()
pred = pred.float().numpy()
real = (real + 1.0) / 2.0
pred = (pred + 1.0) / 2.0
cur_psnr = psnr(real, pred)
cur_ssim = ssim(real, pred, gaussian_weights=True, multichannel=False, use_sample_covariance=False)
return cur_psnr, cur_ssim
def find_latest_model(net_path):
file_list = os.listdir(net_path)
model_names = [int(f[14:-4]) for f in file_list if ".pth" in f]
if len(model_names) == 0:
return False
else:
iter_num = max(model_names)
if net_path[-1] == 'G':
return os.path.join(net_path, "G_model_epoch_{}.pth".format(iter_num))
elif net_path[-2] == '_':
return os.path.join(net_path, "D_model_epoch_{}.pth".format(iter_num))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
def plot_losses():
loss_record = "loss_record.txt"
psnr_record = "psnr_record.txt"
ddg_record = "ddg_record.txt"
ssim_record = "ssim_record.txt"
losses_dg = np.loadtxt(loss_record)
psnr_ave = np.loadtxt(psnr_record)
ssim_ave = np.loadtxt(ssim_record)
ddg_ave = np.loadtxt(ddg_record)
plt.figure()
plt.plot(losses_dg[0:-1:100, 0], 'r-', label='d_loss')
plt.xlabel("iteration*100")
plt.ylabel("Error")
#plt.xlim(xmin=-5, xmax=300) # xmax=300
#plt.ylim(ymin=0, ymax=60) # ymax=60
plt.title("Discriminator Loss")
plt.savefig("plot_d_loss.jpg")
plt.figure()
plt.plot(losses_dg[0:-1:100, 1], 'g-', label='g_loss')
plt.xlabel("iteration*100")
plt.ylabel("Error")
#plt.xlim(xmin=-5, xmax=300)
#plt.ylim(ymin=0, ymax=60)
plt.title("Generator Loss")
plt.savefig("plot_g_loss.jpg")
plt.figure()
plt.plot(losses_dg[0:-1:100, 3], 'b--', label='l2_loss')
plt.plot(losses_dg[0:-1:100, 4], 'y-', label='cycle_loss')
plt.plot(losses_dg[0:-1:100, 2], 'k-', label='gan_loss')
plt.xlabel("iteration*100")
plt.ylabel("Error")
plt.legend()
# plt.xlim(xmin=-5, xmax=480)
# plt.ylim(ymin=0, ymax=16)
plt.title("L2_Grad_DarkChan Loss")
plt.savefig("plot_4g_losses.jpg")
# plt.show()
plt.figure()
plt.plot(psnr_ave, 'r-')
plt.xlabel("epochs")
plt.ylabel("Average PSNR")
# plt.xlim(xmin=-5, xmax=300) # xmax=300
plt.ylim(ymin=0, ymax=30.) # ymax=60
plt.title("Validation PSNR")
plt.savefig("plot_psnr_loss.jpg")
plt.figure()
plt.plot(ssim_ave, 'r-')
plt.xlabel("epochs")
plt.ylabel("Average SSIM")
# plt.xlim(xmin=-5, xmax=300) # xmax=300
# plt.ylim(ymin=0, ymax=30.) # ymax=60
plt.title("Validation SSIM")
plt.savefig("plot_ssim_loss.jpg")
plt.figure()
plt.plot(ddg_ave[:, 0], 'b-', label='d_fake')
plt.plot(ddg_ave[:, 1], 'r-', label='d_real')
plt.plot(ddg_ave[:, 2], 'g-', label='gan')
plt.xlabel("epochs")
plt.ylabel("Average loss")
plt.legend()
# plt.xlim(xmin=-5, xmax=300) # xmax=300
plt.ylim(ymin=0, ymax=2.) # ymax=60
plt.title("D1_D2_G PSNR")
plt.savefig("plot_ddg_loss.jpg")
#plot_losses()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"math.sqrt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.use",
"skimage.metrics.structural_similarity",
"numpy.mean",
"numpy.loadtxt",
"PIL.Image.fromarray",
"matplotlib.pyplot.ylabel",
"ma... | [((178, 194), 'matplotlib.use', 'mpl.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (185, 194), True, 'import matplotlib as mpl\n'), ((481, 509), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (496, 509), False, 'from PIL import Image\n'), ((580, 607), 'numpy.mean', 'np.mean', (['((img1 - img2) ** 2)'], {}), '((img1 - img2) ** 2)\n', (587, 607), True, 'import numpy as np\n'), ((1022, 1114), 'skimage.metrics.structural_similarity', 'ssim', (['real', 'pred'], {'gaussian_weights': '(True)', 'multichannel': '(False)', 'use_sample_covariance': '(False)'}), '(real, pred, gaussian_weights=True, multichannel=False,\n use_sample_covariance=False)\n', (1026, 1114), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((1192, 1212), 'os.listdir', 'os.listdir', (['net_path'], {}), '(net_path)\n', (1202, 1212), False, 'import os\n'), ((2233, 2256), 'numpy.loadtxt', 'np.loadtxt', (['loss_record'], {}), '(loss_record)\n', (2243, 2256), True, 'import numpy as np\n'), ((2272, 2295), 'numpy.loadtxt', 'np.loadtxt', (['psnr_record'], {}), '(psnr_record)\n', (2282, 2295), True, 'import numpy as np\n'), ((2311, 2334), 'numpy.loadtxt', 'np.loadtxt', (['ssim_record'], {}), '(ssim_record)\n', (2321, 2334), True, 'import numpy as np\n'), ((2349, 2371), 'numpy.loadtxt', 'np.loadtxt', (['ddg_record'], {}), '(ddg_record)\n', (2359, 2371), True, 'import numpy as np\n'), ((2377, 2389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2387, 2389), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2448), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_dg[0:-1:100, 0]', '"""r-"""'], {'label': '"""d_loss"""'}), "(losses_dg[0:-1:100, 0], 'r-', label='d_loss')\n", (2402, 2448), True, 'import matplotlib.pyplot as plt\n'), ((2453, 2480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration*100"""'], {}), "('iteration*100')\n", (2463, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2504), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (2495, 2504), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2627), 'matplotlib.pyplot.title', 'plt.title', (['"""Discriminator Loss"""'], {}), "('Discriminator Loss')\n", (2605, 2627), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2662), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_d_loss.jpg"""'], {}), "('plot_d_loss.jpg')\n", (2643, 2662), True, 'import matplotlib.pyplot as plt\n'), ((2668, 2680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2678, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2685, 2739), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_dg[0:-1:100, 1]', '"""g-"""'], {'label': '"""g_loss"""'}), "(losses_dg[0:-1:100, 1], 'g-', label='g_loss')\n", (2693, 2739), True, 'import matplotlib.pyplot as plt\n'), ((2744, 2771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration*100"""'], {}), "('iteration*100')\n", (2754, 2771), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2795), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (2786, 2795), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2891), 'matplotlib.pyplot.title', 'plt.title', (['"""Generator Loss"""'], {}), "('Generator Loss')\n", (2873, 2891), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2926), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_g_loss.jpg"""'], {}), "('plot_g_loss.jpg')\n", (2907, 2926), True, 'import matplotlib.pyplot as plt\n'), ((2932, 2944), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2942, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2949, 3005), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_dg[0:-1:100, 3]', '"""b--"""'], {'label': '"""l2_loss"""'}), "(losses_dg[0:-1:100, 3], 'b--', label='l2_loss')\n", (2957, 3005), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3068), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_dg[0:-1:100, 4]', '"""y-"""'], {'label': '"""cycle_loss"""'}), "(losses_dg[0:-1:100, 4], 'y-', label='cycle_loss')\n", (3018, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3073, 3129), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_dg[0:-1:100, 2]', '"""k-"""'], {'label': '"""gan_loss"""'}), "(losses_dg[0:-1:100, 2], 'k-', label='gan_loss')\n", (3081, 3129), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3161), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration*100"""'], {}), "('iteration*100')\n", (3144, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3185), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (3176, 3185), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3202), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3200, 3202), True, 'import matplotlib.pyplot as plt\n'), ((3273, 3307), 'matplotlib.pyplot.title', 'plt.title', (['"""L2_Grad_DarkChan Loss"""'], {}), "('L2_Grad_DarkChan Loss')\n", (3282, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3345), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_4g_losses.jpg"""'], {}), "('plot_4g_losses.jpg')\n", (3323, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3380), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3378, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3409), 'matplotlib.pyplot.plot', 'plt.plot', (['psnr_ave', '"""r-"""'], {}), "(psnr_ave, 'r-')\n", (3393, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3414, 3434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3424, 3434), True, 'import matplotlib.pyplot as plt\n'), ((3439, 3465), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average PSNR"""'], {}), "('Average PSNR')\n", (3449, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3516, 3543), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)', 'ymax': '(30.0)'}), '(ymin=0, ymax=30.0)\n', (3524, 3543), True, 'import matplotlib.pyplot as plt\n'), ((3558, 3586), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation PSNR"""'], {}), "('Validation PSNR')\n", (3567, 3586), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3624), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_psnr_loss.jpg"""'], {}), "('plot_psnr_loss.jpg')\n", (3602, 3624), True, 'import matplotlib.pyplot as plt\n'), ((3630, 3642), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3640, 3642), True, 'import matplotlib.pyplot as plt\n'), ((3647, 3671), 'matplotlib.pyplot.plot', 'plt.plot', (['ssim_ave', '"""r-"""'], {}), "(ssim_ave, 'r-')\n", (3655, 3671), True, 'import matplotlib.pyplot as plt\n'), ((3676, 3696), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3686, 3696), True, 'import matplotlib.pyplot as plt\n'), ((3701, 3727), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average SSIM"""'], {}), "('Average SSIM')\n", (3711, 3727), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3850), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation SSIM"""'], {}), "('Validation SSIM')\n", (3831, 3850), True, 'import matplotlib.pyplot as plt\n'), ((3855, 3888), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_ssim_loss.jpg"""'], {}), "('plot_ssim_loss.jpg')\n", (3866, 3888), True, 'import matplotlib.pyplot as plt\n'), ((3894, 3906), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3904, 3906), True, 'import matplotlib.pyplot as plt\n'), ((3911, 3956), 'matplotlib.pyplot.plot', 'plt.plot', (['ddg_ave[:, 0]', '"""b-"""'], {'label': '"""d_fake"""'}), "(ddg_ave[:, 0], 'b-', label='d_fake')\n", (3919, 3956), True, 'import matplotlib.pyplot as plt\n'), ((3961, 4006), 'matplotlib.pyplot.plot', 'plt.plot', (['ddg_ave[:, 1]', '"""r-"""'], {'label': '"""d_real"""'}), "(ddg_ave[:, 1], 'r-', label='d_real')\n", (3969, 4006), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4053), 'matplotlib.pyplot.plot', 'plt.plot', (['ddg_ave[:, 2]', '"""g-"""'], {'label': '"""gan"""'}), "(ddg_ave[:, 2], 'g-', label='gan')\n", (4019, 4053), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4078), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (4068, 4078), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4109), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average loss"""'], {}), "('Average loss')\n", (4093, 4109), True, 'import matplotlib.pyplot as plt\n'), ((4114, 4126), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4124, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4177, 4203), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)', 'ymax': '(2.0)'}), '(ymin=0, ymax=2.0)\n', (4185, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4218, 4243), 'matplotlib.pyplot.title', 'plt.title', (['"""D1_D2_G PSNR"""'], {}), "('D1_D2_G PSNR')\n", (4227, 4243), True, 'import matplotlib.pyplot as plt\n'), ((4248, 4280), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_ddg_loss.jpg"""'], {}), "('plot_ddg_loss.jpg')\n", (4259, 4280), True, 'import matplotlib.pyplot as plt\n'), ((703, 717), 'math.sqrt', 'math.sqrt', (['mse'], {}), '(mse)\n', (712, 717), False, 'import math\n')] |
"""
Implementation of circular fingerprint calculation. Class developed from a pre-existing class from <NAME>,
postdoc in Computational Chemistry (led by <NAME>) and under the supervision of Ola Engkvist and <NAME>.
"""
import numpy as np # Linear algebra
import pandas as pd # Data wrangling
# Chemistry packages
from rdkit.Chem import AllChem
from rdkit import Chem
from rdkit import DataStructs
class Fingerprint(object):
def __init__(self, smiles):
"""
Initialiser.
: smi (pd.Series): contains canonical SMILES strings
"""
self.smiles = smiles
# Performs conversion into RDKit molecules automatically
self.smiles_converted = self.smiles_convert()
def smiles_convert(self):
"""
Converts SMILES strings into RDKit molecules suitable for fingerprint calculation.
"""
smiles_convert = [Chem.MolFromSmiles(smiles) for smiles in self.smiles]
return smiles_convert
def morgan(self, radius, size = None):
"""
Calculates circular fingerprints and renders them as a DataFrame, so that it is easier to handle.
: radius (int): radius = 2 ~ ECFP4, radius = 3 ~ ECFP6
: size (int, optional): number of bits to generate. If None, it will be assigned
the standard value of 2048
"""
if size is None:
size = 2048
fps = [AllChem.GetMorganFingerprintAsBitVect(m, radius, size) for m in self.smiles_converted]
np_fps = []
for fp in fps:
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
df = pd.DataFrame(np_fps)
return df
def calculate_circular_fp(data, radius = 3, size = 2048):
"""
Generates ~ECFP6 given SMILES strings.
: data (pd.DataFrame):
: radius (int):
: size (int):
"""
fp = Fingerprint(data)
fps = fp.morgan(radius = radius, size = size)
return fps | [
"pandas.DataFrame",
"rdkit.DataStructs.ConvertToNumpyArray",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"numpy.zeros",
"rdkit.Chem.MolFromSmiles"
] | [((1803, 1823), 'pandas.DataFrame', 'pd.DataFrame', (['np_fps'], {}), '(np_fps)\n', (1815, 1823), True, 'import pandas as pd\n'), ((950, 976), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (968, 976), False, 'from rdkit import Chem\n'), ((1521, 1575), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['m', 'radius', 'size'], {}), '(m, radius, size)\n', (1558, 1575), False, 'from rdkit.Chem import AllChem\n'), ((1678, 1692), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1686, 1692), True, 'import numpy as np\n'), ((1705, 1745), 'rdkit.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fp', 'arr'], {}), '(fp, arr)\n', (1736, 1745), False, 'from rdkit import DataStructs\n')] |
""" Contains Batch classes for images """
import os
import warnings
from numbers import Number
import numpy as np
import PIL
import PIL.ImageOps
import PIL.ImageChops
import PIL.ImageFilter
import PIL.ImageEnhance
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from .batch import Batch
from .decorators import action, apply_parallel, inbatch_parallel
from .dsindex import FilesIndex
class BaseImagesBatch(Batch):
""" Batch class for 2D images.
Note, that if any class method is wrapped with `@apply_parallel` decorator
than for inner calls (i.e. from other class methods) should be used version
of desired method with underscores. (For example, if there is a decorated
`method` than you need to call `_method_` from inside of `other_method`).
Same is applicable for all child classes of :class:`batch.Batch`.
"""
components = "images", "labels", "masks"
# Class-specific defaults for :meth:`.Batch.apply_parallel`
apply_defaults = dict(target='for',
post='_assemble',
src='images',
dst='images',
)
def _make_path(self, ix, src=None):
""" Compose path.
Parameters
----------
ix : str
element's index (filename)
src : str
Path to folder with images. Used if `self.index` is not `FilesIndex`.
Returns
-------
path : str
Full path to an element.
"""
if isinstance(src, FilesIndex):
path = src.get_fullpath(ix)
elif isinstance(self.index, FilesIndex):
path = self.index.get_fullpath(ix)
else:
path = os.path.join(src, str(ix))
return path
def _load_image(self, ix, src=None, fmt=None, dst="images"):
""" Loads image.
.. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str, dataset.FilesIndex, None
path to the folder with an image. If src is None then it is determined from the index.
dst : str
Component to write images to.
fmt : str
Format of the an image
Raises
------
NotImplementedError
If this method is not defined in a child class
"""
_ = self, ix, src, dst, fmt
raise NotImplementedError("Must be implemented in a child class")
@action
def load(self, *args, src=None, fmt=None, dst=None, **kwargs):
""" Load data.
.. note:: if `fmt='images'` than ``components`` must be a single component (str).
.. note:: All parameters must be named only.
Parameters
----------
src : str, None
Path to the folder with data. If src is None then path is determined from the index.
fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}
Format of the file to download.
dst : str, sequence
components to download.
"""
if fmt == 'image':
return self._load_image(src, fmt=fmt, dst=dst)
return super().load(src=src, fmt=fmt, dst=dst, *args, **kwargs)
def _dump_image(self, ix, src='images', dst=None, fmt=None):
""" Saves image to dst.
.. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str
Component to get images from.
dst : str
Folder where to dump. If dst is None then it is determined from index.
Raises
------
NotImplementedError
If this method is not defined in a child class
"""
_ = self, ix, src, dst, fmt
raise NotImplementedError("Must be implemented in a child class")
@action
def dump(self, *args, dst=None, fmt=None, components="images", **kwargs):
""" Dump data.
.. note:: If `fmt='images'` than ``dst`` must be a single component (str).
.. note:: All parameters must be named only.
Parameters
----------
dst : str, None
Path to the folder where to dump. If dst is None then path is determined from the index.
fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}
Format of the file to save.
components : str, sequence
Components to save.
ext: str
Format to save images to.
Returns
-------
self
"""
if fmt == 'image':
return self._dump_image(components, dst, fmt=kwargs.pop('ext'))
return super().dump(dst=dst, fmt=fmt, components=components, *args, **kwargs)
class ImagesBatch(BaseImagesBatch):
""" Batch class for 2D images.
Images are stored as numpy arrays of PIL.Image.
PIL.Image has the following system of coordinates::
X
0 -------------- >
|
|
| images's pixels
|
|
Y v
Pixel's position is defined as (x, y)
Note, that if any class method is wrapped with `@apply_parallel` decorator
than for inner calls (i.e. from other class methods) should be used version
of desired method with underscores. (For example, if there is a decorated
`method` than you need to call `_method_` from inside of `other_method`).
Same is applicable for all child classes of :class:`batch.Batch`.
"""
@classmethod
def _get_image_shape(cls, image):
if isinstance(image, PIL.Image.Image):
return image.size
return image.shape[:2]
@property
def image_shape(self):
""": tuple - shape of the image"""
_, shapes_count = np.unique([image.size for image in self.images], return_counts=True, axis=0)
if len(shapes_count) == 1:
if isinstance(self.images[0], PIL.Image.Image):
return (*self.images[0].size, len(self.images[0].getbands()))
return self.images[0].shape
raise RuntimeError('Images have different shapes')
@inbatch_parallel(init='indices', post='_assemble')
def _load_image(self, ix, src=None, fmt=None, dst="images"):
""" Loads image
.. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str, dataset.FilesIndex, None
Path to the folder with an image. If src is None then it is determined from the index.
dst : str
Component to write images to.
fmt : str
Format of an image.
"""
return PIL.Image.open(self._make_path(ix, src))
@inbatch_parallel(init='indices')
def _dump_image(self, ix, src='images', dst=None, fmt=None):
""" Saves image to dst.
.. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str
Component to get images from.
dst : str
Folder where to dump.
fmt : str
Format of saved image.
"""
if dst is None:
raise RuntimeError('You must specify `dst`')
image = self.get(ix, src)
ix = str(ix) + '.' + fmt if fmt is not None else str(ix)
image.save(os.path.join(dst, ix))
def _assemble_component(self, result, *args, component='images', **kwargs):
""" Assemble one component after parallel execution.
Parameters
----------
result : sequence, array_like
Results after inbatch_parallel.
component : str
component to assemble
"""
_ = args, kwargs
if isinstance(result[0], PIL.Image.Image):
setattr(self, component, np.asarray(result, dtype=object))
else:
try:
setattr(self, component, np.stack(result))
except ValueError:
array_result = np.empty(len(result), dtype=object)
array_result[:] = result
setattr(self, component, array_result)
@apply_parallel
def to_pil(self, image, mode=None):
"""converts images in Batch to PIL format
Parameters
----------
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
"""
if isinstance(image, PIL.Image.Image):
return image
if mode is None:
if len(image.shape) == 2:
mode = 'L'
elif len(image.shape) == 3:
if image.shape[-1] == 3:
mode = 'RGB'
elif image.shape[-1] == 1:
mode = 'L'
image = image[:, :, 0]
elif image.shape[-1] == 2:
mode = 'LA'
elif image.shape[-1] == 4:
mode = 'RGBA'
else:
raise ValueError('Unknown image type as image has', image.shape[-1], 'channels')
elif mode == 'L' and len(image.shape) == 3:
image = image[..., 0]
return PIL.Image.fromarray(image, mode)
def _calc_origin(self, image_shape, origin, background_shape):
""" Calculate coordinate of the input image with respect to the background.
Parameters
----------
image_shape : sequence
shape of the input image.
origin : array_like, sequence, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}
Position of the input image with respect to the background. Can be one of:
- 'center' - place the center of the input image on the center of the background and crop
the input image accordingly.
- 'top_left' - place the upper-left corner of the input image on the upper-left of the background
and crop the input image accordingly.
- 'top_right' - crop an image such that upper-right corners of
an image and the cropping box coincide
- 'bottom_left' - crop an image such that lower-left corners of
an image and the cropping box coincide
- 'bottom_right' - crop an image such that lower-right corners of
an image and the cropping box coincide
- 'random' - place the upper-left corner of the input image on the randomly sampled position
in the background. Position is sampled uniformly such that there is no need for cropping.
- other - sequence of ints or sequence of floats in [0, 1) interval;
place the upper-left corner of the input image on the given position in the background.
If `origin` is a sequence of floats in [0, 1), it defines a relative position of
the origin in a valid region of image.
background_shape : sequence
shape of the background image.
Returns
-------
sequence : calculated origin in the form (column, row)
"""
if isinstance(origin, str):
if origin == 'top_left':
origin = 0, 0
elif origin == 'top_right':
origin = (background_shape[0]-image_shape[0]+1, 0)
elif origin == 'bottom_left':
origin = (0, background_shape[1]-image_shape[1]+1)
elif origin == 'bottom_right':
origin = (background_shape[0]-image_shape[0]+1,
background_shape[1]-image_shape[1]+1)
elif origin == 'center':
origin = np.maximum(0, np.asarray(background_shape) - image_shape) // 2
elif origin == 'random':
origin = (np.random.randint(background_shape[0]-image_shape[0]+1),
np.random.randint(background_shape[1]-image_shape[1]+1))
else:
raise ValueError("If string, origin should be one of ['center', 'top_left', 'top_right', "
"'bottom_left', 'bottom_right', 'random']. Got '{}'.".format(origin))
elif all(0 <= elem < 1 for elem in origin):
region = ((background_shape[0]-image_shape[0]+1),
(background_shape[1]-image_shape[1]+1))
origin = np.asarray(origin) * region
elif not all(isinstance(elem, int) for elem in origin):
raise ValueError('If not a string, origin should be either a sequence of ints or sequence of '
'floats in [0, 1) interval. Got {}'.format(origin))
return np.asarray(origin, dtype=np.int)
@apply_parallel
def scale(self, image, factor, preserve_shape=False, origin='center', resample=0):
""" Scale the content of each image in the batch.
Resulting shape is obtained as original_shape * factor.
Parameters
-----------
factor : float, sequence
resulting shape is obtained as original_shape * factor
- float - scale all axes with the given factor
- sequence (factor_1, factort_2, ...) - scale each axis with the given factor separately
preserve_shape : bool
whether to preserve the shape of the image after scaling
origin : array-like, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}
Relevant only if `preserve_shape` is True.
If `scale` < 1, defines position of the scaled image with respect to the original one's shape.
If `scale` > 1, defines position of cropping box.
Can be one of:
- 'center' - place the center of the input image on the center of the background and crop
the input image accordingly.
- 'top_left' - place the upper-left corner of the input image on the upper-left of the background
and crop the input image accordingly.
- 'top_right' - crop an image such that upper-right corners of
an image and the cropping box coincide
- 'bottom_left' - crop an image such that lower-left corners of
an image and the cropping box coincide
- 'bottom_right' - crop an image such that lower-right corners of
an image and the cropping box coincide
- 'random' - place the upper-left corner of the input image on the randomly sampled position
in the background. Position is sampled uniformly such that there is no need for cropping.
- array_like - sequence of ints or sequence of floats in [0, 1) interval;
place the upper-left corner of the input image on the given position in the background.
If `origin` is a sequence of floats in [0, 1), it defines a relative position
of the origin in a valid region of image.
resample: int
Parameter passed to PIL.Image.resize. Interpolation order
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
Notes
-----
Using 'random' option for origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
Returns
-------
self
"""
original_shape = self._get_image_shape(image)
rescaled_shape = list(np.int32(np.ceil(np.asarray(original_shape)*factor)))
rescaled_image = image.resize(rescaled_shape, resample=resample)
if preserve_shape:
rescaled_image = self._preserve_shape(original_shape, rescaled_image, origin)
return rescaled_image
@apply_parallel
def crop(self, image, origin, shape, crop_boundaries=False):
""" Crop an image.
Extract image data from the window of the size given by `shape` and placed at `origin`.
Parameters
----------
origin : sequence, str
Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.
shape : sequence
crop size in the form of (rows, columns)
crop_boundaries : bool
If `True` then crop is got only from image's area. Shape of the crop might diverge with the passed one
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
Notes
-----
Using 'random' origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
"""
origin = self._calc_origin(shape, origin, image.size)
right_bottom = origin + shape
if crop_boundaries:
out_of_boundaries = origin < 0
origin[out_of_boundaries] = 0
image_shape = np.asarray(image.size)
out_of_boundaries = right_bottom > image_shape
right_bottom[out_of_boundaries] = image_shape[out_of_boundaries]
return image.crop((*origin, *right_bottom))
@apply_parallel
def put_on_background(self, image, background, origin, mask=None):
""" Put an image on a background at given origin
Parameters
----------
background : PIL.Image, np.ndarray of np.uint8
Blank background to put image on.
origin : sequence, str
Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.
mask : None, PIL.Image, np.ndarray of np.uint8
mask passed to PIL.Image.paste
Notes
-----
Using 'random' origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
"""
if not isinstance(background, PIL.Image.Image):
background = PIL.Image.fromarray(background)
else:
background = background.copy()
if not isinstance(mask, PIL.Image.Image):
mask = PIL.Image.fromarray(mask) if mask is not None else None
origin = list(self._calc_origin(self._get_image_shape(image), origin,
self._get_image_shape(background)))
background.paste(image, origin, mask)
return background
def _preserve_shape(self, original_shape, transformed_image, origin='center'):
""" Change the transformed image's shape by cropping and adding empty pixels to fit the shape of original image.
Parameters
----------
original_shape : sequence
transformed_image : np.ndarray
input_origin : array-like, {'center', 'top_left', 'random'}
Position of the scaled image with respect to the original one's shape.
- 'center' - place the center of the input image on the center of the background and crop
the input image accordingly.
- 'top_left' - place the upper-left corner of the input image on the upper-left of the background
and crop the input image accordingly.
- 'top_right' - crop an image such that upper-right corners of
an image and the cropping box coincide
- 'bottom_left' - crop an image such that lower-left corners of
an image and the cropping box coincide
- 'bottom_right' - crop an image such that lower-right corners of
an image and the cropping box coincide
- 'random' - place the upper-left corner of the input image on the randomly sampled position
in the background. Position is sampled uniformly such that there is no need for cropping.
- array_like - sequence of ints or sequence of floats in [0, 1) interval;
place the upper-left corner of the input image on the given position in the background.
If `origin` is a sequence of floats in [0, 1), it defines a relative position
of the origin in a valid region of image.
crop_origin: array-like, {'center', 'top_left', 'random'}
Position of crop from transformed image.
Has same values as `input_origin`.
Returns
-------
np.ndarray : image after described actions
"""
transformed_shape = self._get_image_shape(transformed_image)
if np.any(np.array(transformed_shape) < np.array(original_shape)):
n_channels = len(transformed_image.getbands())
if n_channels == 1:
background = np.zeros(original_shape, dtype=np.uint8)
else:
background = np.zeros((*original_shape, n_channels), dtype=np.uint8)
return self._put_on_background_(transformed_image, background, origin)
return self._crop_(transformed_image, origin, original_shape, True)
@apply_parallel
def filter(self, image, mode, *args, **kwargs):
""" Filters an image. Calls ``image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))``.
For more details see `ImageFilter <http://pillow.readthedocs.io/en/stable/reference/ImageFilter.html>_`.
Parameters
----------
mode : str
Name of the filter.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))
@apply_parallel
def transform(self, image, *args, **kwargs):
""" Calls ``image.transform(*args, **kwargs)``.
For more information see
`<http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform>_`.
Parameters
----------
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
size = kwargs.pop('size', self._get_image_shape(image))
return image.transform(*args, size=size, **kwargs)
@apply_parallel
def resize(self, image, size, *args, **kwargs):
""" Calls ``image.resize(*args, **kwargs)``.
For more details see `<https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize>_`.
Parameters
----------
size : tuple
the resulting size of the image. If one of the components of tuple is None,
corresponding dimension will be proportionally resized.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if size[0] is None and size[1] is None:
raise ValueError('At least one component of the parameter "size" must be a number.')
if size[0] is None:
new_size = (int(image.size[0] * size[1] / image.size[1]), size[1])
elif size[1] is None:
new_size = (size[0], int(image.size[1] * size[0] / image.size[0]))
else:
new_size = size
return image.resize(new_size, *args, **kwargs)
@apply_parallel
def shift(self, image, offset, mode='const'):
""" Shifts an image.
Parameters
----------
offset : (Number, Number)
mode : {'const', 'wrap'}
How to fill borders
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if mode == 'const':
image = image.transform(size=image.size,
method=PIL.Image.AFFINE,
data=(1, 0, -offset[0], 0, 1, -offset[1]))
elif mode == 'wrap':
image = PIL.ImageChops.offset(image, *offset)
else:
raise ValueError("mode must be one of ['const', 'wrap']")
return image
@apply_parallel
def pad(self, image, *args, **kwargs):
""" Calls ``PIL.ImageOps.expand``.
For more details see `<http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.expand>`_.
Parameters
----------
offset : sequence
Size of the borders in pixels. The order is (left, top, right, bottom).
mode : {'const', 'wrap'}
Filling mode
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return PIL.ImageOps.expand(image, *args, **kwargs)
@apply_parallel
def rotate(self, image, *args, **kwargs):
""" Rotates an image.
kwargs are passed to PIL.Image.rotate
Parameters
----------
angle: Number
In degrees counter clockwise.
resample: int
Interpolation order
expand: bool
Whether to expand the output to hold the whole image. Default is False.
center: (Number, Number)
Center of rotation. Default is the center of the image.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return image.rotate(*args, **kwargs)
@apply_parallel
def flip(self, image, mode='lr'):
""" Flips image.
Parameters
----------
mode : {'lr', 'ud'}
- 'lr' - apply the left/right flip
- 'ud' - apply the upside/down flip
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if mode == 'lr':
return PIL.ImageOps.mirror(image)
return PIL.ImageOps.flip(image)
@apply_parallel
def invert(self, image, channels='all'):
""" Invert givn channels.
Parameters
----------
channels : int, sequence
Indices of the channels to invert.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if channels == 'all':
image = PIL.ImageChops.invert(image)
else:
bands = list(image.split())
channels = (channels,) if isinstance(channels, Number) else channels
for channel in channels:
bands[channel] = PIL.ImageChops.invert(bands[channel])
image = PIL.Image.merge('RGB', bands)
return image
@apply_parallel
def salt(self, image, p_noise=.015, color=255, size=(1, 1)):
""" Set random pixel on image to givan value.
Every pixel will be set to ``color`` value with probability ``p_noise``.
Parameters
----------
p_noise : float
Probability of salting a pixel.
color : float, int, sequence, callable
Color's value.
- int, float, sequence -- value of color
- callable -- color is sampled for every chosen pixel (rules are the same as for int, float and sequence)
size : int, sequence of int, callable
Size of salt
- int -- square salt with side ``size``
- sequence -- recangular salt in the form (row, columns)
- callable -- size is sampled for every chosen pixel (rules are the same as for int and sequence)
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
mask_size = np.asarray(self._get_image_shape(image))
mask_salt = np.random.binomial(1, p_noise, size=mask_size).astype(bool)
image = np.array(image)
if isinstance(size, (tuple, int)) and size in [1, (1, 1)] and not callable(color):
image[mask_salt] = color
else:
size_lambda = size if callable(size) else lambda: size
color_lambda = color if callable(color) else lambda: color
mask_salt = np.where(mask_salt)
for i in range(len(mask_salt[0])):
current_size = size_lambda()
current_size = (current_size, current_size) if isinstance(current_size, Number) else current_size
left_top = np.asarray((mask_salt[0][i], mask_salt[1][i]))
right_bottom = np.minimum(left_top + current_size, self._get_image_shape(image))
image[left_top[0]:right_bottom[0], left_top[1]:right_bottom[1]] = color_lambda()
return PIL.Image.fromarray(image)
@apply_parallel
def clip(self, image, low=0, high=255):
""" Truncate image's pixels.
Parameters
----------
low : int, float, sequence
Actual pixel's value is equal max(value, low). If sequence is given, then its length must coincide
with the number of channels in an image and each channel is thresholded separately
high : int, float, sequence
Actual pixel's value is equal min(value, high). If sequence is given, then its length must coincide
with the number of channels in an image and each channel is thresholded separately
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if isinstance(low, Number):
low = tuple([low]*3)
if isinstance(high, Number):
high = tuple([high]*3)
high = PIL.Image.new('RGB', image.size, high)
low = PIL.Image.new('RGB', image.size, low)
return PIL.ImageChops.lighter(PIL.ImageChops.darker(image, high), low)
@apply_parallel
def enhance(self, image, layout='hcbs', factor=(1, 1, 1, 1)):
""" Apply enhancements from PIL.ImageEnhance to the image.
Parameters
----------
layout : str
defines layout of operations, default is `hcbs`:
h - color
c - contrast
b - brightness
s - sharpness
factor : float or tuple of float
factor of enhancement for each operation listed in `layout`.
"""
enhancements = {
'h': 'Color',
'c': 'Contrast',
'b': 'Brightness',
's': 'Sharpness'
}
if isinstance(factor, float):
factor = (factor,) * len(layout)
if len(layout) != len(factor):
raise ValueError("'layout' and 'factor' should be of same length!")
for alias, multiplier in zip(layout, factor):
enhancement = enhancements.get(alias)
if enhancement is None:
raise ValueError('Unknown enhancement alias: ', alias)
image = getattr(PIL.ImageEnhance, enhancement)(image).enhance(multiplier)
return image
@apply_parallel
def multiply(self, image, multiplier=1., clip=False, preserve_type=False):
""" Multiply each pixel by the given multiplier.
Parameters
----------
multiplier : float, sequence
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
multiplier = np.float32(multiplier)
if isinstance(image, PIL.Image.Image):
if preserve_type is False:
warnings.warn("Note that some info might be lost during `multiply` transformation since PIL.image "
"stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or "
"consider using `to_array` action before multiplication.")
return PIL.Image.fromarray(np.clip(multiplier*np.asarray(image), 0, 255).astype(np.uint8))
dtype = image.dtype if preserve_type else np.float
if clip:
image = np.clip(multiplier*image, 0, 255 if dtype == np.uint8 else 1.)
else:
image = multiplier * image
return image.astype(dtype)
@apply_parallel
def add(self, image, term=1., clip=False, preserve_type=False):
""" Add term to each pixel.
Parameters
----------
term : float, sequence
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
term = np.float32(term)
if isinstance(image, PIL.Image.Image):
return PIL.Image.fromarray(np.clip(term+np.asarray(image), 0, 255).astype(np.uint8))
dtype = image.dtype if preserve_type else np.float
if clip:
image = np.clip(term+image, 0, 255 if dtype == np.uint8 else 1.)
else:
image = term + image
return image.astype(dtype)
@apply_parallel
def pil_convert(self, image, mode="L"):
""" Convert image. Actually calls ``image.convert(mode)``.
Parameters
----------
mode : str
Pass 'L' to convert to grayscale
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return image.convert(mode)
@apply_parallel
def posterize(self, image, bits=4):
""" Posterizes image.
More concretely, it quantizes pixels' values so that they have``2^bits`` colors
Parameters
----------
bits : int
Number of bits used to store a color's component.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return PIL.ImageOps.posterize(image, bits)
@apply_parallel
def cutout(self, image, origin, shape, color):
""" Fills given areas with color
.. note:: It is assumed that ``origins``, ``shapes`` and ``colors`` have the same length.
Parameters
----------
origin : sequence, str
Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.
shape : sequence, int
Shape of a filled box. Can be one of:
- sequence - crop size in the form of (rows, columns)
- int - shape has squared form
color : sequence, number
Color of a filled box. Can be one of:
- sequence - (r,g,b) form
- number - grayscale
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
Notes
-----
Using 'random' origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
"""
image = image.copy()
shape = (shape, shape) if isinstance(shape, Number) else shape
origin = self._calc_origin(shape, origin, self._get_image_shape(image))
color = (color, color, color) if isinstance(color, Number) else color
image.paste(PIL.Image.new('RGB', tuple(shape), tuple(color)), tuple(origin))
return image
def _assemble_patches(self, patches, *args, dst, **kwargs):
""" Assembles patches after parallel execution.
Parameters
----------
patches : sequence
Patches to gather. pathces.shape must be like (batch.size, patches_i, patch_height, patch_width, n_channels)
dst : str
Component to put patches in.
"""
_ = args, kwargs
new_items = np.concatenate(patches)
setattr(self, dst, new_items)
return self
@action
@inbatch_parallel(init='indices', post='_assemble_patches')
def split_to_patches(self, ix, patch_shape, stride=1, drop_last=False, src='images', dst=None):
""" Splits image to patches.
Small images with the same shape (``patch_shape``) are cropped from the original one with stride ``stride``.
Parameters
----------
patch_shape : int, sequence
Patch's shape in the from (rows, columns). If int is given then patches have square shape.
stride : int, square
Step of the moving window from which patches are cropped. If int is given then the window has square shape.
drop_last : bool
Whether to drop patches whose window covers area out of the image.
If False is passed then these patches are cropped from the edge of an image. See more in tutorials.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
_ = dst
image = self.get(ix, src)
image_shape = self._get_image_shape(image)
image = np.array(image)
stride = (stride, stride) if isinstance(stride, Number) else stride
patch_shape = (patch_shape, patch_shape) if isinstance(patch_shape, Number) else patch_shape
patches = []
def _iterate_columns(row_from, row_to):
column = 0
while column < image_shape[1]-patch_shape[1]+1:
patches.append(PIL.Image.fromarray(image[column:column+patch_shape[1], row_from:row_to]))
column += stride[1]
if not drop_last and column + patch_shape[1] != image_shape[1]:
patches.append(PIL.Image.fromarray(image[image_shape[1]-patch_shape[1]:image_shape[1],
row_from:row_to]))
row = 0
while row < image_shape[0]-patch_shape[0]+1:
_iterate_columns(row, row+patch_shape[0])
row += stride[0]
if not drop_last and row + patch_shape[0] != image_shape[0]:
_iterate_columns(image_shape[0]-patch_shape[0], image_shape[0])
array = np.empty(len(patches), dtype=object)
for i, patch in enumerate(patches):
array[i] = patch
return array
@apply_parallel
def additive_noise(self, image, noise, clip=False, preserve_type=False):
""" Add additive noise to an image.
Parameters
----------
noise : callable
Distribution. Must have ``size`` parameter.
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)
return self._add_(image, noise, clip, preserve_type)
@apply_parallel
def multiplicative_noise(self, image, noise, clip=False, preserve_type=False):
""" Add multiplicative noise to an image.
Parameters
----------
noise : callable
Distribution. Must have ``size`` parameter.
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)
return self._multiply_(image, noise, clip, preserve_type)
@apply_parallel
def elastic_transform(self, image, alpha, sigma, **kwargs):
""" Deformation of images as described by Simard, Steinkraus and Platt, `Best Practices for Convolutional
Neural Networks applied to Visual Document Analysis <http://cognitivemedium.com/assets/rmnist/Simard.pdf>_`.
Code slightly differs from `<https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`_.
Parameters
----------
alpha : number
maximum of vectors' norms.
sigma : number
Smooth factor.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
image = np.array(image)
# full shape is needed
shape = image.shape
if len(shape) == 2:
image = image[..., None]
shape = image.shape
kwargs.setdefault('mode', 'constant')
kwargs.setdefault('cval', 0)
column_shift = gaussian_filter(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha
row_shift = gaussian_filter(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha
row, column, channel = np.meshgrid(range(shape[0]), range(shape[1]), range(shape[2]))
indices = (column + column_shift, row + row_shift, channel)
distored_image = map_coordinates(image, indices, order=1, mode='reflect')
if shape[-1] == 1:
return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape))[..., 0])
return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape)))
| [
"PIL.Image.new",
"numpy.clip",
"PIL.ImageChops.invert",
"numpy.random.randint",
"os.path.join",
"PIL.Image.merge",
"numpy.unique",
"numpy.stack",
"numpy.random.binomial",
"numpy.asarray",
"PIL.ImageOps.expand",
"scipy.ndimage.interpolation.map_coordinates",
"PIL.ImageOps.mirror",
"PIL.Imag... | [((5854, 5930), 'numpy.unique', 'np.unique', (['[image.size for image in self.images]'], {'return_counts': '(True)', 'axis': '(0)'}), '([image.size for image in self.images], return_counts=True, axis=0)\n', (5863, 5930), True, 'import numpy as np\n'), ((9325, 9357), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image', 'mode'], {}), '(image, mode)\n', (9344, 9357), False, 'import PIL\n'), ((12847, 12879), 'numpy.asarray', 'np.asarray', (['origin'], {'dtype': 'np.int'}), '(origin, dtype=np.int)\n', (12857, 12879), True, 'import numpy as np\n'), ((26079, 26122), 'PIL.ImageOps.expand', 'PIL.ImageOps.expand', (['image', '*args'], {}), '(image, *args, **kwargs)\n', (26098, 26122), False, 'import PIL\n'), ((27526, 27550), 'PIL.ImageOps.flip', 'PIL.ImageOps.flip', (['image'], {}), '(image)\n', (27543, 27550), False, 'import PIL\n'), ((29712, 29727), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (29720, 29727), True, 'import numpy as np\n'), ((30542, 30568), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image'], {}), '(image)\n', (30561, 30568), False, 'import PIL\n'), ((31608, 31646), 'PIL.Image.new', 'PIL.Image.new', (['"""RGB"""', 'image.size', 'high'], {}), "('RGB', image.size, high)\n", (31621, 31646), False, 'import PIL\n'), ((31661, 31698), 'PIL.Image.new', 'PIL.Image.new', (['"""RGB"""', 'image.size', 'low'], {}), "('RGB', image.size, low)\n", (31674, 31698), False, 'import PIL\n'), ((33731, 33753), 'numpy.float32', 'np.float32', (['multiplier'], {}), '(multiplier)\n', (33741, 33753), True, 'import numpy as np\n'), ((35243, 35259), 'numpy.float32', 'np.float32', (['term'], {}), '(term)\n', (35253, 35259), True, 'import numpy as np\n'), ((36738, 36773), 'PIL.ImageOps.posterize', 'PIL.ImageOps.posterize', (['image', 'bits'], {}), '(image, bits)\n', (36760, 36773), False, 'import PIL\n'), ((38882, 38905), 'numpy.concatenate', 'np.concatenate', (['patches'], {}), '(patches)\n', (38896, 38905), True, 'import numpy as np\n'), ((40213, 40228), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (40221, 40228), True, 'import numpy as np\n'), ((44201, 44216), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (44209, 44216), True, 'import numpy as np\n'), ((44849, 44905), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['image', 'indices'], {'order': '(1)', 'mode': '"""reflect"""'}), "(image, indices, order=1, mode='reflect')\n", (44864, 44905), False, 'from scipy.ndimage.interpolation import map_coordinates\n'), ((7452, 7473), 'os.path.join', 'os.path.join', (['dst', 'ix'], {}), '(dst, ix)\n', (7464, 7473), False, 'import os\n'), ((17642, 17664), 'numpy.asarray', 'np.asarray', (['image.size'], {}), '(image.size)\n', (17652, 17664), True, 'import numpy as np\n'), ((18793, 18824), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['background'], {}), '(background)\n', (18812, 18824), False, 'import PIL\n'), ((27484, 27510), 'PIL.ImageOps.mirror', 'PIL.ImageOps.mirror', (['image'], {}), '(image)\n', (27503, 27510), False, 'import PIL\n'), ((28077, 28105), 'PIL.ImageChops.invert', 'PIL.ImageChops.invert', (['image'], {}), '(image)\n', (28098, 28105), False, 'import PIL\n'), ((28369, 28398), 'PIL.Image.merge', 'PIL.Image.merge', (['"""RGB"""', 'bands'], {}), "('RGB', bands)\n", (28384, 28398), False, 'import PIL\n'), ((30032, 30051), 'numpy.where', 'np.where', (['mask_salt'], {}), '(mask_salt)\n', (30040, 30051), True, 'import numpy as np\n'), ((31737, 31771), 'PIL.ImageChops.darker', 'PIL.ImageChops.darker', (['image', 'high'], {}), '(image, high)\n', (31758, 31771), False, 'import PIL\n'), ((34358, 34423), 'numpy.clip', 'np.clip', (['(multiplier * image)', '(0)', '(255 if dtype == np.uint8 else 1.0)'], {}), '(multiplier * image, 0, 255 if dtype == np.uint8 else 1.0)\n', (34365, 34423), True, 'import numpy as np\n'), ((35500, 35559), 'numpy.clip', 'np.clip', (['(term + image)', '(0)', '(255 if dtype == np.uint8 else 1.0)'], {}), '(term + image, 0, 255 if dtype == np.uint8 else 1.0)\n', (35507, 35559), True, 'import numpy as np\n'), ((7921, 7953), 'numpy.asarray', 'np.asarray', (['result'], {'dtype': 'object'}), '(result, dtype=object)\n', (7931, 7953), True, 'import numpy as np\n'), ((18952, 18977), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['mask'], {}), '(mask)\n', (18971, 18977), False, 'import PIL\n'), ((21415, 21442), 'numpy.array', 'np.array', (['transformed_shape'], {}), '(transformed_shape)\n', (21423, 21442), True, 'import numpy as np\n'), ((21445, 21469), 'numpy.array', 'np.array', (['original_shape'], {}), '(original_shape)\n', (21453, 21469), True, 'import numpy as np\n'), ((21592, 21632), 'numpy.zeros', 'np.zeros', (['original_shape'], {'dtype': 'np.uint8'}), '(original_shape, dtype=np.uint8)\n', (21600, 21632), True, 'import numpy as np\n'), ((21680, 21735), 'numpy.zeros', 'np.zeros', (['(*original_shape, n_channels)'], {'dtype': 'np.uint8'}), '((*original_shape, n_channels), dtype=np.uint8)\n', (21688, 21735), True, 'import numpy as np\n'), ((25231, 25268), 'PIL.ImageChops.offset', 'PIL.ImageChops.offset', (['image', '*offset'], {}), '(image, *offset)\n', (25252, 25268), False, 'import PIL\n'), ((28311, 28348), 'PIL.ImageChops.invert', 'PIL.ImageChops.invert', (['bands[channel]'], {}), '(bands[channel])\n', (28332, 28348), False, 'import PIL\n'), ((29636, 29682), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p_noise'], {'size': 'mask_size'}), '(1, p_noise, size=mask_size)\n', (29654, 29682), True, 'import numpy as np\n'), ((30285, 30331), 'numpy.asarray', 'np.asarray', (['(mask_salt[0][i], mask_salt[1][i])'], {}), '((mask_salt[0][i], mask_salt[1][i]))\n', (30295, 30331), True, 'import numpy as np\n'), ((33856, 34102), 'warnings.warn', 'warnings.warn', (['"""Note that some info might be lost during `multiply` transformation since PIL.image stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or consider using `to_array` action before multiplication."""'], {}), "(\n 'Note that some info might be lost during `multiply` transformation since PIL.image stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or consider using `to_array` action before multiplication.'\n )\n", (33869, 34102), False, 'import warnings\n'), ((44497, 44533), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'shape'}), '(-1, 1, size=shape)\n', (44514, 44533), True, 'import numpy as np\n'), ((44596, 44632), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'shape'}), '(-1, 1, size=shape)\n', (44613, 44632), True, 'import numpy as np\n'), ((8027, 8043), 'numpy.stack', 'np.stack', (['result'], {}), '(result)\n', (8035, 8043), True, 'import numpy as np\n'), ((12551, 12569), 'numpy.asarray', 'np.asarray', (['origin'], {}), '(origin)\n', (12561, 12569), True, 'import numpy as np\n'), ((40590, 40665), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image[column:column + patch_shape[1], row_from:row_to]'], {}), '(image[column:column + patch_shape[1], row_from:row_to])\n', (40609, 40665), False, 'import PIL\n'), ((40808, 40903), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image[image_shape[1] - patch_shape[1]:image_shape[1], row_from:row_to]'], {}), '(image[image_shape[1] - patch_shape[1]:image_shape[1],\n row_from:row_to])\n', (40827, 40903), False, 'import PIL\n'), ((15955, 15981), 'numpy.asarray', 'np.asarray', (['original_shape'], {}), '(original_shape)\n', (15965, 15981), True, 'import numpy as np\n'), ((34217, 34234), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (34227, 34234), True, 'import numpy as np\n'), ((35359, 35376), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (35369, 35376), True, 'import numpy as np\n'), ((11986, 12045), 'numpy.random.randint', 'np.random.randint', (['(background_shape[0] - image_shape[0] + 1)'], {}), '(background_shape[0] - image_shape[0] + 1)\n', (12003, 12045), True, 'import numpy as np\n'), ((12069, 12128), 'numpy.random.randint', 'np.random.randint', (['(background_shape[1] - image_shape[1] + 1)'], {}), '(background_shape[1] - image_shape[1] + 1)\n', (12086, 12128), True, 'import numpy as np\n'), ((11874, 11902), 'numpy.asarray', 'np.asarray', (['background_shape'], {}), '(background_shape)\n', (11884, 11902), True, 'import numpy as np\n')] |
import glob
import scipy.io as ff
import pandas as pd
import yt
import numpy as np
import os as os
props=['M','x','y','z','vx','vy','vz','jx_g','jy_g','jz_g','dMB','dME','dM','rho','cs','dv','Esave','jx_bh','jy_bh','jz_bh','spinmag','eps_sink', 'rho_stars', 'rho_dm', 'vx_stars', 'vy_stars', 'vz_stars', 'vx_dm', 'vy_dm', 'vz_dm', 'n_stars', 'n_dm', 'rho_lowspeed_stars', 'rho_lowspeed_dm', 'fact_fast_stars', 'fact_fast_dm']
os.system('mkdir sinks')
os.system('mv sink_* ./sinks')
files=glob.glob('output_*/info*')
ds=yt.load(files[-1])
df={tmpprop:[] for tmpprop in props}
df=pd.DataFrame(data=df)
if ds.cosmological_simulation==1:
df=pd.concat([pd.DataFrame(data={'a':[]}),df],axis=1)
else:
df=pd.concat([pd.DataFrame(data={'t':[]}),df],axis=1)
files=glob.glob('./sinks/sink*')
files.sort()
d=ds.all_data()
dx=float(d[('index','dx')].min().in_units('pc'))
dx_dm=float(ds.length_unit.in_units('pc')/2**ds.max_level*(1+ds.current_redshift))
for f in files:
p=ff.FortranFile(f)
p.read_ints(); p.read_ints()
a=list(p.read_reals('d'))
scale_l=p.read_reals('d')
scale_d=p.read_reals('d')
scale_t=p.read_reals('d')
bhid=p.read_ints()
d={tmpprop:p.read_reals('d') for tmpprop in props[:30]}
d=pd.DataFrame(data=d, index=bhid)
d = pd.concat([d, pd.DataFrame(data={tmpprop:p.read_ints() for tmpprop in props[30:32]}, index=bhid)], axis=1)
d = pd.concat([d, pd.DataFrame(data={tmpprop:p.read_reals('d') for tmpprop in props[32:]}, index=bhid)], axis=1)
t=list(p.read_reals('d'))
d['M']*=scale_d*scale_l**3/2e33
d['vx']*=scale_l/1e5/scale_t
d['vy']*=scale_l/1e5/scale_t
d['vz']*=scale_l/1e5/scale_t
d['dMB']*=scale_d*scale_l**3/2e33 /scale_t * 3600*24*365
d['dME']*=scale_d*scale_l**3/2e33 /scale_t * 3600*24*365
d['dM']*=scale_d*scale_l**3/2e33
d['rho']*=scale_d/1.67e-24
d['cs']*=scale_l/1e5/scale_t
d['dv']*=scale_l/1e5/scale_t
d['Esave']*=scale_l/1e5/scale_t
d['vx_stars']*=scale_l/1e5/scale_t
d['vy_stars']*=scale_l/1e5/scale_t
d['vz_stars']*=scale_l/1e5/scale_t
d['vx_dm']*=scale_l/1e5/scale_t
d['vy_dm']*=scale_l/1e5/scale_t
d['vz_dm']*=scale_l/1e5/scale_t
d['rho_stars']*=scale_d/1.67e-24
d['rho_dm']*=scale_d/1.67e-24
d['rho_lowspeed_stars']*=scale_d/1.67e-24
d['rho_lowspeed_dm']*=scale_d/1.67e-24
d['fact_fast_stars']*=scale_d/1.67e-24
d['fact_fast_dm']*=scale_d/1.67e-24
for tmpbhid in bhid:
if tmpbhid not in df.index:
df.loc[tmpbhid]=[[]]+[[] for tmpprop in props]
bh=df.loc[tmpbhid]
dd=d.loc[tmpbhid]
if ds.cosmological_simulation==1:
bh['a']+=a
else:
bh['t']+=t
for tmpprop in props:
bh[tmpprop]+=[dd[tmpprop]]
for bhid in df.index:
tmp={tmpprop:df.loc[bhid][tmpprop] for tmpprop in props}
if ds.cosmological_simulation==1:
tmp.update({'a':df.loc[bhid]['a']})
tmp=pd.DataFrame(data=tmp)
tmp=pd.concat([tmp, pd.DataFrame({'t':np.copy(ds.cosmology.t_from_z(1/np.copy(tmp.a)-1).in_units('Gyr'))})], axis=1)
else:
tmp.update({'t':df.loc[bhid]['t']})
tmp=pd.DataFrame(data=tmp)
tmp.t*=scale_t/(1e9*365*24*3600)
dMdt=tmp.dM[1:]/np.diff(tmp.t)/1e9
dMdt.index-=1
dMdt.loc[dMdt.index.max()+1]=0
tmp['x']/=ds['boxlen']
tmp['y']/=ds['boxlen']
tmp['z']/=ds['boxlen']
tmp['dM']=dMdt
tmp['vsink_rel_stars'] = np.sqrt((tmp['vx_stars']-tmp['vx'])**2+(tmp['vy_stars']-tmp['vy'])**2+(tmp['vz_stars']-tmp['vz'])**2)
tmp['vsink_rel_dm'] = np.sqrt((tmp['vx_dm']-tmp['vx'])**2+(tmp['vy_dm']-tmp['vy'])**2+(tmp['vz_dm']-tmp['vz'])**2)
tmp['rinf_stars'] = (tmp.M / 1e7) / (tmp.vsink_rel_stars / 200)**2
tmp['rinf_dm'] = (tmp.M / 1e7) / (tmp.vsink_rel_dm / 200)**2
CoulombLog = np.maximum(np.zeros(len(tmp.t)), np.log(4*dx/tmp.rinf_stars))
tmp['a_stars_slow']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.rho_lowspeed_stars*1.67e-24*CoulombLog/(tmp.vsink_rel_stars*1e5)**2*3600*24*365*1e6/1e5
CoulombLog = np.minimum(np.zeros(len(tmp.t)), tmp.rinf_stars-4*dx) / (tmp.rinf_stars - 4*dx)
tmp['a_stars_fast']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.fact_fast_stars*1.67e-24*CoulombLog/(tmp.vsink_rel_stars*1e5)**2*3600*24*365*1e6/1e5
CoulombLog = np.maximum(np.zeros(len(tmp.t)), np.log(4*dx/tmp.rinf_dm))
tmp['a_dm_slow']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.rho_lowspeed_dm*1.67e-24*CoulombLog/(tmp.vsink_rel_dm*1e5)**2*3600*24*365*1e6/1e5
CoulombLog = np.minimum(np.zeros(len(tmp.t)), tmp.rinf_dm-4*dx) / (tmp.rinf_dm - 4*dx)
tmp['a_dm_fast']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.fact_fast_dm*1.67e-24*CoulombLog/(tmp.vsink_rel_dm*1e5)**2*3600*24*365*1e6/1e5
M=tmp.dv / tmp.cs
tmp['rinf_gas'] = (tmp.M / 1e7) / (tmp.dv**2 + tmp.cs**2)/200**2
CoulombLog = np.minimum(np.zeros(len(tmp.t)), tmp.rinf_gas-4*dx) / (tmp.rinf_gas - 4*dx)
fudge=M
fudge.loc[M < 0.95] = 1/M**2*(0.5*np.log((1+M)/(1-M)) - M)
fudge.loc[(M >= 0.95) & (M <= 1.007)] = 1
fudge.loc[M > 1.007] = 1/M**2*(0.5*np.log(M**2-1) + 3.2)
tmp['a_gas']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.rho*1.67e-24/(tmp.cs*1e5)**2*fudge*(3600*24*365*1e6)/1e5*CoulombLog
if os.path.exists('./sinks/BH00001.csv'):
os.system('rm ./sinks/BH{:05}'.format(bhid)+'.csv')
tmp.to_csv('./sinks/BH{:05}'.format(bhid)+'.csv', index=False)
tmp={tmpprop:[] for tmpprop in props}
tmp.update({'a':[],'t':[]})
tmp=pd.DataFrame(data=tmp)
if os.path.exists('./sinks/BH00001.csv'):
os.system('rm ./sinks/BH00000.csv')
tmp.to_csv('./sinks/BH00000.csv', index=False)
| [
"pandas.DataFrame",
"numpy.log",
"numpy.copy",
"os.path.exists",
"os.system",
"numpy.diff",
"glob.glob",
"yt.load",
"scipy.io.FortranFile",
"numpy.sqrt"
] | [((429, 453), 'os.system', 'os.system', (['"""mkdir sinks"""'], {}), "('mkdir sinks')\n", (438, 453), True, 'import os as os\n'), ((454, 484), 'os.system', 'os.system', (['"""mv sink_* ./sinks"""'], {}), "('mv sink_* ./sinks')\n", (463, 484), True, 'import os as os\n'), ((492, 519), 'glob.glob', 'glob.glob', (['"""output_*/info*"""'], {}), "('output_*/info*')\n", (501, 519), False, 'import glob\n'), ((523, 541), 'yt.load', 'yt.load', (['files[-1]'], {}), '(files[-1])\n', (530, 541), False, 'import yt\n'), ((583, 604), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'df'}), '(data=df)\n', (595, 604), True, 'import pandas as pd\n'), ((768, 794), 'glob.glob', 'glob.glob', (['"""./sinks/sink*"""'], {}), "('./sinks/sink*')\n", (777, 794), False, 'import glob\n'), ((5487, 5509), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'tmp'}), '(data=tmp)\n', (5499, 5509), True, 'import pandas as pd\n'), ((5513, 5550), 'os.path.exists', 'os.path.exists', (['"""./sinks/BH00001.csv"""'], {}), "('./sinks/BH00001.csv')\n", (5527, 5550), True, 'import os as os\n'), ((980, 997), 'scipy.io.FortranFile', 'ff.FortranFile', (['f'], {}), '(f)\n', (994, 997), True, 'import scipy.io as ff\n'), ((1240, 1272), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd', 'index': 'bhid'}), '(data=d, index=bhid)\n', (1252, 1272), True, 'import pandas as pd\n'), ((3477, 3598), 'numpy.sqrt', 'np.sqrt', (["((tmp['vx_stars'] - tmp['vx']) ** 2 + (tmp['vy_stars'] - tmp['vy']) ** 2 + \n (tmp['vz_stars'] - tmp['vz']) ** 2)"], {}), "((tmp['vx_stars'] - tmp['vx']) ** 2 + (tmp['vy_stars'] - tmp['vy']) **\n 2 + (tmp['vz_stars'] - tmp['vz']) ** 2)\n", (3484, 3598), True, 'import numpy as np\n'), ((3605, 3717), 'numpy.sqrt', 'np.sqrt', (["((tmp['vx_dm'] - tmp['vx']) ** 2 + (tmp['vy_dm'] - tmp['vy']) ** 2 + (tmp[\n 'vz_dm'] - tmp['vz']) ** 2)"], {}), "((tmp['vx_dm'] - tmp['vx']) ** 2 + (tmp['vy_dm'] - tmp['vy']) ** 2 +\n (tmp['vz_dm'] - tmp['vz']) ** 2)\n", (3612, 3717), True, 'import numpy as np\n'), ((5250, 5287), 'os.path.exists', 'os.path.exists', (['"""./sinks/BH00001.csv"""'], {}), "('./sinks/BH00001.csv')\n", (5264, 5287), True, 'import os as os\n'), ((5556, 5591), 'os.system', 'os.system', (['"""rm ./sinks/BH00000.csv"""'], {}), "('rm ./sinks/BH00000.csv')\n", (5565, 5591), True, 'import os as os\n'), ((2974, 2996), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'tmp'}), '(data=tmp)\n', (2986, 2996), True, 'import pandas as pd\n'), ((3188, 3210), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'tmp'}), '(data=tmp)\n', (3200, 3210), True, 'import pandas as pd\n'), ((3889, 3920), 'numpy.log', 'np.log', (['(4 * dx / tmp.rinf_stars)'], {}), '(4 * dx / tmp.rinf_stars)\n', (3895, 3920), True, 'import numpy as np\n'), ((4360, 4388), 'numpy.log', 'np.log', (['(4 * dx / tmp.rinf_dm)'], {}), '(4 * dx / tmp.rinf_dm)\n', (4366, 4388), True, 'import numpy as np\n'), ((657, 685), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'a': []}"}), "(data={'a': []})\n", (669, 685), True, 'import pandas as pd\n'), ((721, 749), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'t': []}"}), "(data={'t': []})\n", (733, 749), True, 'import pandas as pd\n'), ((3273, 3287), 'numpy.diff', 'np.diff', (['tmp.t'], {}), '(tmp.t)\n', (3280, 3287), True, 'import numpy as np\n'), ((4989, 5014), 'numpy.log', 'np.log', (['((1 + M) / (1 - M))'], {}), '((1 + M) / (1 - M))\n', (4995, 5014), True, 'import numpy as np\n'), ((5099, 5117), 'numpy.log', 'np.log', (['(M ** 2 - 1)'], {}), '(M ** 2 - 1)\n', (5105, 5117), True, 'import numpy as np\n'), ((3075, 3089), 'numpy.copy', 'np.copy', (['tmp.a'], {}), '(tmp.a)\n', (3082, 3089), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from dante_by_rev_syl.data_preparation import text_in_rev_syls, text_in_syls_rhyme
from dante_by_rev_syl.text_processing import clean_comedy, prettify_text, special_tokens
from dante_by_rev_syl.generate_dante import generate_text
from utils import save_vocab, load_vocab
working_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dante_by_rev_syl')
divine_comedy_file = os.path.join(os.path.dirname(working_dir), "divina_commedia", "divina_commedia_accent_UTF-8.txt")
with open(divine_comedy_file,"r") as f:
divine_comedy = f.read()
divine_comedy = clean_comedy(divine_comedy, special_tokens)
#vocab, idx2syl, syl2idx = build_vocab(divine_comedy)
# Path where the vocab is saved
logs_dir = os.path.join(working_dir, 'logs')
os.makedirs(logs_dir, exist_ok = True)
vocab_file_rhyme = os.path.join(logs_dir, 'vocab_rhyme.json')
vocab_file_verse = os.path.join(logs_dir, 'vocab_verse.json')
vocab_rhyme, idx2syl_rhyme, syl2idx_rhyme = load_vocab(vocab_file_rhyme)
vocab_verse, idx2syl_verse, syl2idx_verse = load_vocab(vocab_file_verse)
# Path where the model is saved
models_dir = os.path.join(working_dir, 'models')
os.makedirs(models_dir, exist_ok = True)
model_file_verse = os.path.join(models_dir, "dante_by_rev_syl_verse_model.h5")
model_file_rhyme = os.path.join(models_dir, "dante_by_rev_syl_rhyme_model.h5")
model_verse = tf.keras.models.load_model(model_file_verse)
model_rhyme = tf.keras.models.load_model(model_file_rhyme)
# Length of the vocabulary
vocab_size_rhyme = len(vocab_rhyme)
vocab_size_verse = len(vocab_verse)
SEQ_LENGTH_RHYME = model_rhyme.get_layer('embedding').output.shape[1]
EMBEDDING_DIM_RHYME = model_rhyme.get_layer('embedding').output.shape[2]
for l in model_rhyme.layers:
if l.name == 'first_lstm':
RNN_TYPE_RHYME = '2lstm'
break
if l.name == 'last_lstm':
RNN_TYPE_RHYME = 'lstm'
break
if l.name == 'first_gru':
RNN_TYPE_RHYME = '2gru'
break
if l.name == 'last_gru':
RNN_TYPE_RHYME = 'gru'
break
if 'lstm' in RNN_TYPE_RHYME :
RNN_UNITS_RHYME = model_rhyme.get_layer('last_lstm').output.shape[-1]
if 'gru' in RNN_TYPE_RHYME:
RNN_UNITS_RHYME = model_rhyme.get_layer('last_gru').output.shape[-1]
SEQ_LENGTH_VERSE = model_verse.get_layer('embedding').output.shape[1]
EMBEDDING_DIM_VERSE = model_verse.get_layer('embedding').output.shape[2]
for l in model_verse.layers:
if l.name == 'first_lstm':
RNN_TYPE_VERSE = '2lstm'
break
if l.name == 'last_lstm':
RNN_TYPE_VERSE = 'lstm'
break
if l.name == 'first_gru':
RNN_TYPE_VERSE = '2gru'
break
if l.name == 'last_gru':
RNN_TYPE_VERSE = 'gru'
break
if 'lstm' in RNN_TYPE_VERSE :
RNN_UNITS_VERSE = model_verse.get_layer('last_lstm').output.shape[-1]
if 'gru' in RNN_TYPE_VERSE:
RNN_UNITS_VERSE = model_verse.get_layer('last_gru').output.shape[-1]
model_rhyme.summary()
model_verse.summary()
model_filename_rhyme = 'model_by_rev_syl_rhyme_seq{}_emb{}_{}{}'.format(SEQ_LENGTH_RHYME , EMBEDDING_DIM_RHYME , RNN_TYPE_RHYME , RNN_UNITS_RHYME )
model_filename_verse = 'model_by_rev_syl_verse_seq{}_emb{}_{}{}'.format(SEQ_LENGTH_VERSE, EMBEDDING_DIM_VERSE, RNN_TYPE_VERSE, RNN_UNITS_VERSE)
print("\nMODEL RHYME: {}".format(model_filename_rhyme))
print("MODEL VERSE: {}\n".format(model_filename_verse))
model_filename = 'model_by_rev_syl'
os.makedirs(os.path.join(logs_dir, model_filename), exist_ok = True)
output_file = os.path.join(logs_dir, model_filename, "output.txt")
raw_output_file = os.path.join(logs_dir, model_filename, "raw_output.txt")
divine_comedy_rhyme = text_in_syls_rhyme(divine_comedy)
#index_eoc = divine_comedy_rhyme.index(special_tokens['END_OF_CANTO']) + 1
indexes = [i for i, x in enumerate(divine_comedy_rhyme) if x == special_tokens['END_OF_CANTO'] and i > SEQ_LENGTH_RHYME]
index_eoc = np.random.choice(indexes) + 1
start_idx = max(0, index_eoc - SEQ_LENGTH_RHYME)
start_seq_rhyme = divine_comedy_rhyme[start_idx:index_eoc]
divine_comedy_verse = text_in_rev_syls(divine_comedy)
indexes = [i for i, x in enumerate(divine_comedy_verse) if x == special_tokens['END_OF_VERSO'] and i > SEQ_LENGTH_VERSE]
index_eov = np.random.choice(indexes)
start_idx = max(0, index_eov - SEQ_LENGTH_VERSE)
start_seq_verse = divine_comedy_verse[start_idx:index_eov]
generated_text = generate_text(model_rhyme, model_verse, special_tokens, vocab_size_rhyme, vocab_size_verse, syl2idx_rhyme, idx2syl_rhyme, syl2idx_verse, idx2syl_verse, SEQ_LENGTH_RHYME, SEQ_LENGTH_VERSE, start_seq_rhyme, start_seq_verse, temperature=1.0)
#print(prettify_text(generated_text, special_tokens))
with open(output_file,"w") as f:
f.write(prettify_text(generated_text, special_tokens))
with open(raw_output_file,"w") as f:
f.write(generated_text)
| [
"os.path.abspath",
"tensorflow.keras.models.load_model",
"os.makedirs",
"dante_by_rev_syl.generate_dante.generate_text",
"os.path.dirname",
"dante_by_rev_syl.text_processing.prettify_text",
"dante_by_rev_syl.data_preparation.text_in_rev_syls",
"dante_by_rev_syl.data_preparation.text_in_syls_rhyme",
... | [((808, 851), 'dante_by_rev_syl.text_processing.clean_comedy', 'clean_comedy', (['divine_comedy', 'special_tokens'], {}), '(divine_comedy, special_tokens)\n', (820, 851), False, 'from dante_by_rev_syl.text_processing import clean_comedy, prettify_text, special_tokens\n'), ((953, 986), 'os.path.join', 'os.path.join', (['working_dir', '"""logs"""'], {}), "(working_dir, 'logs')\n", (965, 986), False, 'import os\n'), ((987, 1023), 'os.makedirs', 'os.makedirs', (['logs_dir'], {'exist_ok': '(True)'}), '(logs_dir, exist_ok=True)\n', (998, 1023), False, 'import os\n'), ((1046, 1088), 'os.path.join', 'os.path.join', (['logs_dir', '"""vocab_rhyme.json"""'], {}), "(logs_dir, 'vocab_rhyme.json')\n", (1058, 1088), False, 'import os\n'), ((1108, 1150), 'os.path.join', 'os.path.join', (['logs_dir', '"""vocab_verse.json"""'], {}), "(logs_dir, 'vocab_verse.json')\n", (1120, 1150), False, 'import os\n'), ((1197, 1225), 'utils.load_vocab', 'load_vocab', (['vocab_file_rhyme'], {}), '(vocab_file_rhyme)\n', (1207, 1225), False, 'from utils import save_vocab, load_vocab\n'), ((1270, 1298), 'utils.load_vocab', 'load_vocab', (['vocab_file_verse'], {}), '(vocab_file_verse)\n', (1280, 1298), False, 'from utils import save_vocab, load_vocab\n'), ((1346, 1381), 'os.path.join', 'os.path.join', (['working_dir', '"""models"""'], {}), "(working_dir, 'models')\n", (1358, 1381), False, 'import os\n'), ((1382, 1420), 'os.makedirs', 'os.makedirs', (['models_dir'], {'exist_ok': '(True)'}), '(models_dir, exist_ok=True)\n', (1393, 1420), False, 'import os\n'), ((1443, 1502), 'os.path.join', 'os.path.join', (['models_dir', '"""dante_by_rev_syl_verse_model.h5"""'], {}), "(models_dir, 'dante_by_rev_syl_verse_model.h5')\n", (1455, 1502), False, 'import os\n'), ((1522, 1581), 'os.path.join', 'os.path.join', (['models_dir', '"""dante_by_rev_syl_rhyme_model.h5"""'], {}), "(models_dir, 'dante_by_rev_syl_rhyme_model.h5')\n", (1534, 1581), False, 'import os\n'), ((1597, 1641), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_file_verse'], {}), '(model_file_verse)\n', (1623, 1641), True, 'import tensorflow as tf\n'), ((1656, 1700), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_file_rhyme'], {}), '(model_file_rhyme)\n', (1682, 1700), True, 'import tensorflow as tf\n'), ((3764, 3816), 'os.path.join', 'os.path.join', (['logs_dir', 'model_filename', '"""output.txt"""'], {}), "(logs_dir, model_filename, 'output.txt')\n", (3776, 3816), False, 'import os\n'), ((3835, 3891), 'os.path.join', 'os.path.join', (['logs_dir', 'model_filename', '"""raw_output.txt"""'], {}), "(logs_dir, model_filename, 'raw_output.txt')\n", (3847, 3891), False, 'import os\n'), ((3917, 3950), 'dante_by_rev_syl.data_preparation.text_in_syls_rhyme', 'text_in_syls_rhyme', (['divine_comedy'], {}), '(divine_comedy)\n', (3935, 3950), False, 'from dante_by_rev_syl.data_preparation import text_in_rev_syls, text_in_syls_rhyme\n'), ((4321, 4352), 'dante_by_rev_syl.data_preparation.text_in_rev_syls', 'text_in_rev_syls', (['divine_comedy'], {}), '(divine_comedy)\n', (4337, 4352), False, 'from dante_by_rev_syl.data_preparation import text_in_rev_syls, text_in_syls_rhyme\n'), ((4486, 4511), 'numpy.random.choice', 'np.random.choice', (['indexes'], {}), '(indexes)\n', (4502, 4511), True, 'import numpy as np\n'), ((4640, 4890), 'dante_by_rev_syl.generate_dante.generate_text', 'generate_text', (['model_rhyme', 'model_verse', 'special_tokens', 'vocab_size_rhyme', 'vocab_size_verse', 'syl2idx_rhyme', 'idx2syl_rhyme', 'syl2idx_verse', 'idx2syl_verse', 'SEQ_LENGTH_RHYME', 'SEQ_LENGTH_VERSE', 'start_seq_rhyme', 'start_seq_verse'], {'temperature': '(1.0)'}), '(model_rhyme, model_verse, special_tokens, vocab_size_rhyme,\n vocab_size_verse, syl2idx_rhyme, idx2syl_rhyme, syl2idx_verse,\n idx2syl_verse, SEQ_LENGTH_RHYME, SEQ_LENGTH_VERSE, start_seq_rhyme,\n start_seq_verse, temperature=1.0)\n', (4653, 4890), False, 'from dante_by_rev_syl.generate_dante import generate_text\n'), ((634, 662), 'os.path.dirname', 'os.path.dirname', (['working_dir'], {}), '(working_dir)\n', (649, 662), False, 'import os\n'), ((3691, 3729), 'os.path.join', 'os.path.join', (['logs_dir', 'model_filename'], {}), '(logs_dir, model_filename)\n', (3703, 3729), False, 'import os\n'), ((4159, 4184), 'numpy.random.choice', 'np.random.choice', (['indexes'], {}), '(indexes)\n', (4175, 4184), True, 'import numpy as np\n'), ((183, 198), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (196, 198), True, 'import tensorflow as tf\n'), ((4980, 5025), 'dante_by_rev_syl.text_processing.prettify_text', 'prettify_text', (['generated_text', 'special_tokens'], {}), '(generated_text, special_tokens)\n', (4993, 5025), False, 'from dante_by_rev_syl.text_processing import clean_comedy, prettify_text, special_tokens\n'), ((69, 94), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'import os\n'), ((550, 575), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (565, 575), False, 'import os\n')] |
__author__ = '<NAME>'
import os
import numpy as np
import matplotlib.pyplot as plt
noisePath = "/Volumes/MYSD/项目/汽车齿轮高效配对/E04.参考资料/实验数据/数据及说明/12组特征齿轮试验/特征齿轮实验振动噪声数据/"
drivingXs = []
drivingYs = []
drivingZs = []
drivedXs = []
drivedYs = []
drivedZs = []
Noises = []
fileList = os.listdir(noisePath)
for i in range(0, len(fileList)):
filePath = os.path.join(noisePath, fileList[i])
if os.path.isfile(filePath) and filePath.endswith(".TXT"):
data = np.loadtxt(filePath, delimiter='\t')
drivingX = data[:, 2]
drivingXs.append(drivingX.ptp())
drivingY = data[:, 3]
drivingYs.append(drivingY.ptp())
drivingZ = data[:, 4]
drivingZs.append(drivingZ.ptp())
drivedX = data[:, 5]
drivedXs.append(drivedX.ptp())
drivedY = data[:, 6]
drivedYs.append(drivedY.ptp())
drivedZ = data[:, 7]
drivedZs.append(drivedZ.ptp())
noise = data[:, 8]
Noises.append(noise.ptp())
noise_10 = []
noise_30 = []
noise_60 = []
noise_90 = []
noise_120 = []
for i in range(0, len(Noises)):
if i % 5 == 0:
noise_10.append(Noises[i])
elif i % 5 == 1:
noise_30.append(Noises[i])
elif i % 5 == 2:
noise_60.append(Noises[i])
elif i % 5 == 3:
noise_90.append(Noises[i])
elif i % 5 == 4:
noise_120.append(Noises[i])
X = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
plt.plot(X, noise_10, label="noise_10")
plt.plot(X, noise_30, label="noise_30")
plt.plot(X, noise_60, label="noise_60")
plt.plot(X, noise_90, label="noise_90")
plt.plot(X, noise_120, label="noise_120")
plt.legend(loc='best')
plt.xlabel("group")
plt.ylabel("noise")
plt.show()
plt.savefig("noise.jpg")
# plt.hist(drivingXs, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("Driving X")
# plt.show()
#
# plt.hist(drivingYs, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("Driving Y")
# plt.show()
#
# plt.hist(drivingZs, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("DrivingZ")
# plt.show()
#
# plt.hist(drivedXs, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("Drived X")
# plt.show()
#
# plt.hist(drivedYs, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("Drived Y")
# plt.show()
# plt.hist(drivedZs, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("Drived Z")
# plt.show()
#
# plt.hist(Noises, bins=20)
# plt.xlabel("Distribute")
# plt.ylabel("Noise")
# plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"os.path.isfile",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir",
"matplotlib.pyplot.savefig"
] | [((280, 301), 'os.listdir', 'os.listdir', (['noisePath'], {}), '(noisePath)\n', (290, 301), False, 'import os\n'), ((1410, 1449), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'noise_10'], {'label': '"""noise_10"""'}), "(X, noise_10, label='noise_10')\n", (1418, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1489), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'noise_30'], {'label': '"""noise_30"""'}), "(X, noise_30, label='noise_30')\n", (1458, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1490, 1529), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'noise_60'], {'label': '"""noise_60"""'}), "(X, noise_60, label='noise_60')\n", (1498, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1569), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'noise_90'], {'label': '"""noise_90"""'}), "(X, noise_90, label='noise_90')\n", (1538, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1611), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'noise_120'], {'label': '"""noise_120"""'}), "(X, noise_120, label='noise_120')\n", (1578, 1611), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1635), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1623, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1655), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""group"""'], {}), "('group')\n", (1646, 1655), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1675), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""noise"""'], {}), "('noise')\n", (1666, 1675), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1684, 1686), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1711), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""noise.jpg"""'], {}), "('noise.jpg')\n", (1698, 1711), True, 'import matplotlib.pyplot as plt\n'), ((351, 387), 'os.path.join', 'os.path.join', (['noisePath', 'fileList[i]'], {}), '(noisePath, fileList[i])\n', (363, 387), False, 'import os\n'), ((395, 419), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (409, 419), False, 'import os\n'), ((466, 502), 'numpy.loadtxt', 'np.loadtxt', (['filePath'], {'delimiter': '"""\t"""'}), "(filePath, delimiter='\\t')\n", (476, 502), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: mnist.py
import os
import gzip
import numpy
from six.moves import range
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['Mnist', 'FashionMnist']
def maybe_download(url, work_directory):
"""Download the data from Yann's website, unless it's already here."""
filename = url.split('/')[-1]
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
logger.info("Downloading to {}...".format(filepath))
download(url, work_directory)
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
data = data.astype('float32') / 255.0
return data
def extract_labels(filename):
"""Extract the labels into a 1D uint8 numpy array [index]."""
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
return labels
class Mnist(RNGDataFlow):
"""
Produces [image, label] in MNIST dataset,
image is 28x28 in the range [0,1], label is an int.
"""
DIR_NAME = 'mnist_data'
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def __init__(self, train_or_test, shuffle=True, dir=None):
"""
Args:
train_or_test (str): either 'train' or 'test'
shuffle (bool): shuffle the dataset
"""
if dir is None:
dir = get_dataset_path(self.DIR_NAME)
assert train_or_test in ['train', 'test']
self.train_or_test = train_or_test
self.shuffle = shuffle
def get_images_and_labels(image_file, label_file):
f = maybe_download(self.SOURCE_URL + image_file, dir)
images = extract_images(f)
f = maybe_download(self.SOURCE_URL + label_file, dir)
labels = extract_labels(f)
assert images.shape[0] == labels.shape[0]
return images, labels
if self.train_or_test == 'train':
self.images, self.labels = get_images_and_labels(
'train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz')
else:
self.images, self.labels = get_images_and_labels(
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz')
def size(self):
return self.images.shape[0]
def get_data(self):
idxs = list(range(self.size()))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
img = self.images[k].reshape((28, 28))
label = self.labels[k]
yield [img, label]
class FashionMnist(Mnist):
DIR_NAME = 'fashion_mnist_data'
SOURCE_URL = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
if __name__ == '__main__':
ds = Mnist('train')
ds.reset_state()
for (img, label) in ds.get_data():
from IPython import embed
embed()
break
| [
"gzip.open",
"numpy.frombuffer",
"numpy.dtype",
"os.path.exists",
"IPython.embed",
"os.path.join"
] | [((443, 481), 'os.path.join', 'os.path.join', (['work_directory', 'filename'], {}), '(work_directory, filename)\n', (455, 481), False, 'import os\n'), ((493, 517), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (507, 517), False, 'import os\n'), ((899, 918), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (908, 918), False, 'import gzip\n'), ((1311, 1351), 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': 'numpy.uint8'}), '(buf, dtype=numpy.uint8)\n', (1327, 1351), False, 'import numpy\n'), ((1580, 1599), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (1589, 1599), False, 'import gzip\n'), ((1908, 1948), 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': 'numpy.uint8'}), '(buf, dtype=numpy.uint8)\n', (1924, 1948), False, 'import numpy\n'), ((3947, 3954), 'IPython.embed', 'embed', ([], {}), '()\n', (3952, 3954), False, 'from IPython import embed\n'), ((674, 699), 'numpy.dtype', 'numpy.dtype', (['numpy.uint32'], {}), '(numpy.uint32)\n', (685, 699), False, 'import numpy\n')] |
import sys
print("Python 버전:", sys.version)
import pandas as pd
print("pandas 버전:", pd.__version__)
import matplotlib
print("matplotlib 버전:", matplotlib.__version__)
import numpy as np
print("NumPy 버전:", np.__version__)
import scipy as sp
print("SciPy 버전:", sp.__version__)
import IPython
print("IPython 버전:", IPython.__version__)
import sklearn
print("scikit-learn 버전:", sklearn.__version__)
from sklearn.datasets import load_iris
iris_dataset = load_iris()
import mglearn
print("iris_dataset의 키:\n", iris_dataset.keys())
print(iris_dataset['DESCR'][:193] + "\n...")
print("타깃의 이름:", iris_dataset['target_names'])
print("특성의 이름:\n", iris_dataset['feature_names'])
print("data의 타입:", type(iris_dataset['data']))
print("data의 크기:", iris_dataset['data'].shape)
print("data의 처음 다섯 행:\n", iris_dataset['data'][:5])
print("target의 타입:", type(iris_dataset['target']))
print("target의 크기:", iris_dataset['target'].shape)
print("타깃:\n", iris_dataset['target'])
### 성과 측정 : 훈련 데이터와 테스트 데이터
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
print("X_train 크기:", X_train.shape)
print("y_train 크기:", y_train.shape)
print("X_test 크기:", X_test.shape)
print("y_test 크기:", y_test.shape)
### 가장 먼저 할 일 : 데이터 살펴보기
# X_train 데이터를 사용해서 데이터프레임을 만듭니다.
# 열의 이름은 iris_dataset.feature_names에 있는 문자열을 사용합니다.
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# 데이터프레임을 사용해 y_train에 따라 색으로 구분된 산점도 행렬을 만듭니다.
pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',
hist_kwds={'bins': 20}, s=60, alpha=.8, cmap=mglearn.cm3)
### 첫 번째 머신 러닝 모델 : k - 최근접 이웃 알고리즘
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
# 예측하기
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape:", X_new.shape)
prediction = knn.predict(X_new)
print("예측:", prediction)
print("예측한 타깃의 이름:",
iris_dataset['target_names'][prediction])
# 모델 평가하기
y_pred = knn.predict(X_test)
print("테스트 세트에 대한 예측값:\n", y_pred)
print("테스트 세트의 정확도: {:.2f}".format(np.mean(y_pred == y_test)))
print("테스트 세트의 정확도: {:.2f}".format(knn.score(X_test, y_test)))
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("테스트 세트의 정확도: {:.2f}".format(knn.score(X_test, y_test)))
| [
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.mean",
"numpy.array",
"pandas.plotting.scatter_matrix"
] | [((454, 465), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (463, 465), False, 'from sklearn.datasets import load_iris\n'), ((1079, 1157), 'sklearn.model_selection.train_test_split', 'train_test_split', (["iris_dataset['data']", "iris_dataset['target']"], {'random_state': '(0)'}), "(iris_dataset['data'], iris_dataset['target'], random_state=0)\n", (1095, 1157), False, 'from sklearn.model_selection import train_test_split\n'), ((1435, 1492), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {'columns': 'iris_dataset.feature_names'}), '(X_train, columns=iris_dataset.feature_names)\n', (1447, 1492), True, 'import pandas as pd\n'), ((1541, 1687), 'pandas.plotting.scatter_matrix', 'pd.plotting.scatter_matrix', (['iris_dataframe'], {'c': 'y_train', 'figsize': '(15, 15)', 'marker': '"""o"""', 'hist_kwds': "{'bins': 20}", 's': '(60)', 'alpha': '(0.8)', 'cmap': 'mglearn.cm3'}), "(iris_dataframe, c=y_train, figsize=(15, 15),\n marker='o', hist_kwds={'bins': 20}, s=60, alpha=0.8, cmap=mglearn.cm3)\n", (1567, 1687), True, 'import pandas as pd\n'), ((1804, 1839), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (1824, 1839), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1882, 1910), 'numpy.array', 'np.array', (['[[5, 2.9, 1, 0.2]]'], {}), '([[5, 2.9, 1, 0.2]])\n', (1890, 1910), True, 'import numpy as np\n'), ((2313, 2391), 'sklearn.model_selection.train_test_split', 'train_test_split', (["iris_dataset['data']", "iris_dataset['target']"], {'random_state': '(0)'}), "(iris_dataset['data'], iris_dataset['target'], random_state=0)\n", (2329, 2391), False, 'from sklearn.model_selection import train_test_split\n'), ((2404, 2439), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (2424, 2439), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2204, 2229), 'numpy.mean', 'np.mean', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (2211, 2229), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import platform
import pytest
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from fairlearn.postprocessing import ThresholdOptimizer
_ESTIMATORS = [LogisticRegression, SVC, DecisionTreeClassifier]
if platform.system() != "Darwin":
# MacOS requires extra steps to install lightgbm properly, skipping for now
from lightgbm import LGBMClassifier
_ESTIMATORS.append(LGBMClassifier)
@pytest.mark.parametrize("Mitigator", [ThresholdOptimizer])
@pytest.mark.parametrize("constraints", ["demographic_parity", "equalized_odds"])
@pytest.mark.parametrize("Estimator", _ESTIMATORS)
@pytest.mark.parametrize("prefit", [True, False])
def test_smoke(Mitigator, constraints, Estimator, prefit):
# This test case ensures that input validation doesn't remove metadata from the input
# matrix X, as described at https://github.com/fairlearn/fairlearn/issues/312
np.random.seed(0)
n = 100
X0 = np.random.normal(size=n)
X1 = np.random.choice([1, 2, 3], size=n)
Y = np.random.choice([0, 1], size=n)
A = np.random.choice([0, 1], size=n)
df = pd.DataFrame({"X0": X0, "X1": X1})
# Set X1 as categorical
df['X1'] = df['X1'].astype('category')
estimator = Estimator()
if prefit:
estimator.fit(df, Y)
mitigator = Mitigator(estimator=estimator, constraints=constraints, prefit=prefit)
mitigator.fit(df, Y, sensitive_features=A)
mitigator.predict(df, sensitive_features=A)
| [
"pandas.DataFrame",
"numpy.random.seed",
"numpy.random.normal",
"numpy.random.choice",
"platform.system",
"pytest.mark.parametrize"
] | [((608, 666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Mitigator"""', '[ThresholdOptimizer]'], {}), "('Mitigator', [ThresholdOptimizer])\n", (631, 666), False, 'import pytest\n'), ((668, 753), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""constraints"""', "['demographic_parity', 'equalized_odds']"], {}), "('constraints', ['demographic_parity', 'equalized_odds']\n )\n", (691, 753), False, 'import pytest\n'), ((750, 799), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Estimator"""', '_ESTIMATORS'], {}), "('Estimator', _ESTIMATORS)\n", (773, 799), False, 'import pytest\n'), ((801, 849), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""prefit"""', '[True, False]'], {}), "('prefit', [True, False])\n", (824, 849), False, 'import pytest\n'), ((415, 432), 'platform.system', 'platform.system', ([], {}), '()\n', (430, 432), False, 'import platform\n'), ((1085, 1102), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1099, 1102), True, 'import numpy as np\n'), ((1124, 1148), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (1140, 1148), True, 'import numpy as np\n'), ((1158, 1193), 'numpy.random.choice', 'np.random.choice', (['[1, 2, 3]'], {'size': 'n'}), '([1, 2, 3], size=n)\n', (1174, 1193), True, 'import numpy as np\n'), ((1202, 1234), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'n'}), '([0, 1], size=n)\n', (1218, 1234), True, 'import numpy as np\n'), ((1243, 1275), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'n'}), '([0, 1], size=n)\n', (1259, 1275), True, 'import numpy as np\n'), ((1285, 1319), 'pandas.DataFrame', 'pd.DataFrame', (["{'X0': X0, 'X1': X1}"], {}), "({'X0': X0, 'X1': X1})\n", (1297, 1319), True, 'import pandas as pd\n')] |
import os
import platform
## replace disutils and also watch out! https://stackoverflow.com/questions/29048623/does-setuptools-build-ext-behaves-differently-from-distutils-one
from Cython.Distutils import build_ext
from numpy.distutils.system_info import get_info
# from distutils.core import setup
# from distutils.extension import Extension
from setuptools import Extension, find_packages, setup
from numkl import __author__, __version__
## cython loop import https://stackoverflow.com/questions/37471313/setup-requires-with-cython
## https://github.com/pypa/setuptools/issues/1317, Maybe only PEP518 is the way out, but still a long way to go.
try:
mklroot = os.environ["MKLROOT"]
except KeyError:
mklroot = ""
if mklroot: ## local build: using icc instead
os.environ["CC"] = "icc"
os.environ["LDSHARED"] = "icc -shared"
## try use intel compiler as introduced in https://software.intel.com/en-us/articles/thread-parallelism-in-cython
mkl_info = get_info("mkl")
libs = [
"mkl_intel_ilp64", ## mkl_rt and runtime MKL_INTERFACE_LAYER policy doesn't work well for ilp64,
## maybe directly linking to ilp interface is not a bad idea
"mkl_intel_thread",
"mkl_core",
"iomp5",
"pthread",
"m",
]
lib_dirs = mkl_info.get("library_dirs")
if lib_dirs is None:
if not mklroot:
raise Exception("environment variable MKLROOT is not set")
else:
print("Using MKLROOT defined library path")
lib_dirs = [mklroot + "/lib/intel64"]
include_dirs = mkl_info.get("include_dirs")
if include_dirs is None:
if not mklroot:
raise Exception("environment variable MKLROOT is not set")
else:
print("Using MKLROOT defined include path")
include_dirs = [mklroot + "/include"]
osinfo = platform.system()
if osinfo == "Darwin": # MacOS, clang
# flags = ["-O3", "-openmp", "-march=native"]
flags = []
## openmp + mkl on mac: https://zhuanlan.zhihu.com/p/48484576
## possible relevant posts: https://github.com/ContinuumIO/anaconda-issues/issues/8803
## not workable for now: "clang-4.0: error: no such file or directory: 'build/temp.macosx-10.9-x86_64-3.6/numkl/ev.o'"
## somehow in CI osx env, the .o file is not generated.
## or if using no flag or -fopenmp flag, the error is delayed to runtime when importing numkl.ev
## ImportError: dlopen(/Users/travis/miniconda3/conda-bld/numkl_1564029926743/_test_env_placehold/lib/python3.6/site-packages/numkl/ev.cpython-36m-darwin.so, 2):
## Symbol not found: _mkl_blas_caxpy
elif osinfo == "Linux":
flags = ["-O3", "-fopenmp", "-xhost"]
with open("README.md", "r") as fh:
long_description = fh.read()
ev = Extension(
"numkl.ev",
["numkl/ev.pyx"],
define_macros=[("MKL_ILP64",)],
include_dirs=include_dirs,
libraries=libs,
library_dirs=lib_dirs,
extra_compile_args=flags,
# see https://software.intel.com/en-us/articles/performance-tools-for-software-developers-intel-compiler-options-for-sse-generation-and-processor-specific-optimizations for cpu specific optimization flag
extra_link_args=flags, # -qopt-zmm-usage=high
)
setup(
name="numkl",
version=__version__,
author=__author__,
author_email="<EMAIL>",
description="A thin cython/python wrapper on some routines from Intel MKL",
long_description=long_description,
url="https://github.com/refraction-ray/numkl",
packages=find_packages(),
cmdclass={"build_ext": build_ext},
install_requires=["numpy>=1.16", "cython>=0.29"],
ext_modules=[ev],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
),
)
## wheel upload is not supported, see https://stackoverflow.com/questions/50690526/how-to-publish-binary-python-wheels-for-linux-on-a-local-machine
## also, it doesn't make too much sense to share the wheel at the beginning, due to the external mkl so library
| [
"platform.system",
"numpy.distutils.system_info.get_info",
"setuptools.Extension",
"setuptools.find_packages"
] | [((973, 988), 'numpy.distutils.system_info.get_info', 'get_info', (['"""mkl"""'], {}), "('mkl')\n", (981, 988), False, 'from numpy.distutils.system_info import get_info\n'), ((1775, 1792), 'platform.system', 'platform.system', ([], {}), '()\n', (1790, 1792), False, 'import platform\n'), ((2686, 2880), 'setuptools.Extension', 'Extension', (['"""numkl.ev"""', "['numkl/ev.pyx']"], {'define_macros': "[('MKL_ILP64',)]", 'include_dirs': 'include_dirs', 'libraries': 'libs', 'library_dirs': 'lib_dirs', 'extra_compile_args': 'flags', 'extra_link_args': 'flags'}), "('numkl.ev', ['numkl/ev.pyx'], define_macros=[('MKL_ILP64',)],\n include_dirs=include_dirs, libraries=libs, library_dirs=lib_dirs,\n extra_compile_args=flags, extra_link_args=flags)\n", (2695, 2880), False, 'from setuptools import Extension, find_packages, setup\n'), ((3425, 3440), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (3438, 3440), False, 'from setuptools import Extension, find_packages, setup\n')] |
# Video Synthesis via Transform-Based Tensor Neural Network
# <NAME>
# 8/4/2020
# <EMAIL>
import tensorflow as tf
import scipy.io as sio
import numpy as np
import DefineParam as DP
import h5py
# Get param
pixel_w, pixel_h, batchSize, nPhase, nTrainData, nValData, learningRate, nEpoch, nOfModel, ncpkt, trainFile, valFile, testFile, saveDir, modelDir = DP.get_param()
# Training Data Loading
def load_train_data(mat73=False):
if mat73 == True:
trainData = h5py.File(trainFile)
trainLabel = np.transpose(trainData['sub_data'], [3, 2, 1, 0])
else:
trainData = sio.loadmat(trainFile)
trainLabel = trainData['sub_data']
if mat73 == True:
valData = h5py.File(valFile)
valLabel = np.transpose(valData['sub_data'], [3, 2, 1, 0])
else:
valData = sio.loadmat(valFile)
valLabel = valData['sub_data']
print("nOfModel: %d" % nOfModel)
print(np.shape(trainLabel))
del trainData
del valData
return trainLabel, valLabel
# Testing Data Loading
def load_test_data(mat73=False):
if mat73 == True:
testData = h5py.File(testFile)
testLabel = np.transpose(testData['sub_data'], [3, 2, 1, 0])
else:
testData = sio.loadmat(testFile)
testLabel = testData['sub_data'] # labels
print(np.shape(testLabel))
del testData
return testLabel
# Essential Computations
def pre_calculate(phi):
Xinput = tf.placeholder(tf.float32, [None, pixel_h, pixel_w, nOfModel]) # After Init
Xoutput = tf.placeholder(tf.float32, [None, pixel_h, pixel_w, nOfModel])
Yinput = tf.placeholder(tf.float32, [None, pixel_h, pixel_w, nOfModel]) # After sampling
Epoch_num = tf.placeholder(tf.float32)
Phi = tf.constant(phi)
PhiT = Phi
return Xinput, Xoutput, Phi, PhiT, Yinput, Epoch_num
| [
"h5py.File",
"scipy.io.loadmat",
"numpy.transpose",
"tensorflow.constant",
"numpy.shape",
"tensorflow.placeholder",
"DefineParam.get_param"
] | [((460, 474), 'DefineParam.get_param', 'DP.get_param', ([], {}), '()\n', (472, 474), True, 'import DefineParam as DP\n'), ((1630, 1692), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, pixel_h, pixel_w, nOfModel]'], {}), '(tf.float32, [None, pixel_h, pixel_w, nOfModel])\n', (1644, 1692), True, 'import tensorflow as tf\n'), ((1738, 1800), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, pixel_h, pixel_w, nOfModel]'], {}), '(tf.float32, [None, pixel_h, pixel_w, nOfModel])\n', (1752, 1800), True, 'import tensorflow as tf\n'), ((1815, 1877), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, pixel_h, pixel_w, nOfModel]'], {}), '(tf.float32, [None, pixel_h, pixel_w, nOfModel])\n', (1829, 1877), True, 'import tensorflow as tf\n'), ((1929, 1955), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1943, 1955), True, 'import tensorflow as tf\n'), ((1967, 1983), 'tensorflow.constant', 'tf.constant', (['phi'], {}), '(phi)\n', (1978, 1983), True, 'import tensorflow as tf\n'), ((591, 611), 'h5py.File', 'h5py.File', (['trainFile'], {}), '(trainFile)\n', (600, 611), False, 'import h5py\n'), ((634, 683), 'numpy.transpose', 'np.transpose', (["trainData['sub_data']", '[3, 2, 1, 0]'], {}), "(trainData['sub_data'], [3, 2, 1, 0])\n", (646, 683), True, 'import numpy as np\n'), ((716, 738), 'scipy.io.loadmat', 'sio.loadmat', (['trainFile'], {}), '(trainFile)\n', (727, 738), True, 'import scipy.io as sio\n'), ((829, 847), 'h5py.File', 'h5py.File', (['valFile'], {}), '(valFile)\n', (838, 847), False, 'import h5py\n'), ((868, 915), 'numpy.transpose', 'np.transpose', (["valData['sub_data']", '[3, 2, 1, 0]'], {}), "(valData['sub_data'], [3, 2, 1, 0])\n", (880, 915), True, 'import numpy as np\n'), ((946, 966), 'scipy.io.loadmat', 'sio.loadmat', (['valFile'], {}), '(valFile)\n', (957, 966), True, 'import scipy.io as sio\n'), ((1058, 1078), 'numpy.shape', 'np.shape', (['trainLabel'], {}), '(trainLabel)\n', (1066, 1078), True, 'import numpy as np\n'), ((1266, 1285), 'h5py.File', 'h5py.File', (['testFile'], {}), '(testFile)\n', (1275, 1285), False, 'import h5py\n'), ((1307, 1355), 'numpy.transpose', 'np.transpose', (["testData['sub_data']", '[3, 2, 1, 0]'], {}), "(testData['sub_data'], [3, 2, 1, 0])\n", (1319, 1355), True, 'import numpy as np\n'), ((1387, 1408), 'scipy.io.loadmat', 'sio.loadmat', (['testFile'], {}), '(testFile)\n', (1398, 1408), True, 'import scipy.io as sio\n'), ((1486, 1505), 'numpy.shape', 'np.shape', (['testLabel'], {}), '(testLabel)\n', (1494, 1505), True, 'import numpy as np\n')] |
"""
This module includes tools to correct CDF values for a weighted sum
of Bernoulli RVs using linear integer programming
"""
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["cdf_corrected"]
import numpy as np
from cvxopt import glpk, matrix
from . import bernmix_double as bmd
from .. import bernmix_int as bmi
from .. import bernmix_control as control
def cdf_corrected(probs, weights, target_indiv,
m_rounding=10**6, n_solutions=100):
"""
This function computes the corrected value of CDF for a
:param probs: a List of real numbers in the range [0,1]
representing probabilities of BRVs
:param weights: a List of numbers; weights in a weighted sum of BRVs
:param target_indiv: a List of binary outcomes of BRVs, 0/1 numbers
:param m_rounding: a number of points to approximate
the weighted sum of BRVs
:param n_solutions: a number of runs for linear integer programming
to correct the CDF value
:return: an approximated and corrected CDF value for the target_indiv
"""
# ----------------------------------------------
# Control Input values
# ----------------------------------------------
control.weights_dbl(weights)
control.probs(probs)
control.individual(target_indiv)
control.lengths(weights, probs, target_indiv)
control.m_rounding(m_rounding, weights)
control.n_solutions(n_solutions)
# ----------------------------------------------
n = len(weights)
w = weights
wc = bmd.weight_rounded(w, m_rounding)
# S and Z values according to the article
target_value_s = np.dot(w, target_indiv)
target_value_z = np.dot(wc, target_indiv).astype(int)
# number of points into two directions
k = 10
target_range = np.arange(target_value_z - k, target_value_z + k + 1, 1)
distr_in_probs = np.zeros((2 * k + 1, 2))
for i, target_value in enumerate(target_range):
pop = binprog_multisol(wc, target_value, n_solutions, 10)
if pop is None:
distr_in_probs[i, 0] = 0
distr_in_probs[i, 1] = 0
continue
s_values = list(map(lambda x: np.dot(x[0:n], w), pop))
indiv_probs = list(map(lambda x: comp_indiv_prob(x[0:n], probs), pop))
sum_prob = sum(indiv_probs)
distr_in_probs[i, 0] = sum(np.extract(s_values <= target_value_s,
indiv_probs)) / sum_prob
distr_in_probs[i, 1] = sum(np.extract(s_values > target_value_s,
indiv_probs)) / sum_prob
pmf = bmi.pmf(probs, wc)
pmf_range = list(map(lambda t: pmf[t], target_range))
cdf2 = sum(pmf[:target_value_z + 1 - k - 1:]) + \
np.dot(pmf_range, distr_in_probs[:, 0])
return cdf2
def comp_indiv_prob(probs, indiv):
""" Compute probability for an individual
:param probs: a List of real numbers in the range [0,1]
representing probabilities of BRVs
:param indiv: a List of binary outcomes of BRVs, 0/1 numbers
"""
prob_multiply = list(map(lambda i, p: p if i == 1 else (1 - p),
indiv, probs))
return np.prod(prob_multiply)
def binprog_multisol(weights, target_value, n_solutions, n_fixed=10):
"""
This function returns multiple solutions of
zero-one linear programming problem
:param weights: a List of numbers; weights in a weighted sum of BRVs
:param target_value: an outcome of the weighted sum of BRVs
:param n_solutions: a number of runs for linear integer programming
to correct the CDF value
:param n_fixed: an area around target_value utilised for correction
:return: a
"""
glpk.options['msg_lev'] = 'GLP_MSG_OFF'
n = len(weights)
c_init = np.append(weights, -target_value).astype(int)
c = c_init.reshape((1, n+1))
a_ident = np.identity(n + 1).astype(int)
a_ident = np.delete(a_ident, n, axis=0)
a_ub = np.concatenate((a_ident, -c), axis=0)
b_ub = np.append(np.ones(n), [0]).astype(int).reshape(n+1, 1)
pop = np.empty((0, n+1))
for _ in range(n_solutions):
# x_(n+1) = 1
a_eq = np.append(np.zeros(n), [1]).astype(int).reshape(1, n+1)
b_eq = np.asmatrix([1])
idx = np.random.permutation(n)
idx = idx[0:round(n_fixed/100 * n)]
for i in idx:
tmp = np.zeros(n+1)
tmp[i] = 1
a_eq = np.concatenate((a_eq, [tmp]), axis=0)
b_eq = np.concatenate((b_eq, [np.random.randint(0, 2, 1)]), axis=0)
c_mx = matrix(c, tc='d')
g_mx = matrix(a_ub, tc='d')
h_mx = matrix(b_ub, tc='d')
a_mx = matrix(a_eq, tc='d')
b_mx = matrix(b_eq, tc='d')
status, x = glpk.ilp(c=c_mx,
G=g_mx,
h=h_mx,
A=a_mx,
b=b_mx,
B=set(range(n+1)))
if status != 'optimal':
continue
pop = np.concatenate((pop, np.matrix(x).reshape((1, n+1))), axis=0)
if pop.shape[1] == 0:
return None
pop = np.unique(pop, axis=0)
# ANNA: from 0 to n
return pop
if __name__ == "__main__":
print('BernMix_correction is loaded')
| [
"numpy.matrix",
"numpy.concatenate",
"cvxopt.matrix",
"numpy.extract",
"numpy.empty",
"numpy.unique",
"numpy.zeros",
"numpy.identity",
"numpy.ones",
"numpy.append",
"numpy.random.randint",
"numpy.arange",
"numpy.asmatrix",
"numpy.random.permutation",
"numpy.dot",
"numpy.delete",
"num... | [((1730, 1753), 'numpy.dot', 'np.dot', (['w', 'target_indiv'], {}), '(w, target_indiv)\n', (1736, 1753), True, 'import numpy as np\n'), ((1886, 1942), 'numpy.arange', 'np.arange', (['(target_value_z - k)', '(target_value_z + k + 1)', '(1)'], {}), '(target_value_z - k, target_value_z + k + 1, 1)\n', (1895, 1942), True, 'import numpy as np\n'), ((1964, 1988), 'numpy.zeros', 'np.zeros', (['(2 * k + 1, 2)'], {}), '((2 * k + 1, 2))\n', (1972, 1988), True, 'import numpy as np\n'), ((3298, 3320), 'numpy.prod', 'np.prod', (['prob_multiply'], {}), '(prob_multiply)\n', (3305, 3320), True, 'import numpy as np\n'), ((4062, 4091), 'numpy.delete', 'np.delete', (['a_ident', 'n'], {'axis': '(0)'}), '(a_ident, n, axis=0)\n', (4071, 4091), True, 'import numpy as np\n'), ((4103, 4140), 'numpy.concatenate', 'np.concatenate', (['(a_ident, -c)'], {'axis': '(0)'}), '((a_ident, -c), axis=0)\n', (4117, 4140), True, 'import numpy as np\n'), ((4218, 4238), 'numpy.empty', 'np.empty', (['(0, n + 1)'], {}), '((0, n + 1))\n', (4226, 4238), True, 'import numpy as np\n'), ((5293, 5315), 'numpy.unique', 'np.unique', (['pop'], {'axis': '(0)'}), '(pop, axis=0)\n', (5302, 5315), True, 'import numpy as np\n'), ((2850, 2889), 'numpy.dot', 'np.dot', (['pmf_range', 'distr_in_probs[:, 0]'], {}), '(pmf_range, distr_in_probs[:, 0])\n', (2856, 2889), True, 'import numpy as np\n'), ((4378, 4394), 'numpy.asmatrix', 'np.asmatrix', (['[1]'], {}), '([1])\n', (4389, 4394), True, 'import numpy as np\n'), ((4410, 4434), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (4431, 4434), True, 'import numpy as np\n'), ((4709, 4726), 'cvxopt.matrix', 'matrix', (['c'], {'tc': '"""d"""'}), "(c, tc='d')\n", (4715, 4726), False, 'from cvxopt import glpk, matrix\n'), ((4742, 4762), 'cvxopt.matrix', 'matrix', (['a_ub'], {'tc': '"""d"""'}), "(a_ub, tc='d')\n", (4748, 4762), False, 'from cvxopt import glpk, matrix\n'), ((4778, 4798), 'cvxopt.matrix', 'matrix', (['b_ub'], {'tc': '"""d"""'}), "(b_ub, tc='d')\n", (4784, 4798), False, 'from cvxopt import glpk, matrix\n'), ((4814, 4834), 'cvxopt.matrix', 'matrix', (['a_eq'], {'tc': '"""d"""'}), "(a_eq, tc='d')\n", (4820, 4834), False, 'from cvxopt import glpk, matrix\n'), ((4850, 4870), 'cvxopt.matrix', 'matrix', (['b_eq'], {'tc': '"""d"""'}), "(b_eq, tc='d')\n", (4856, 4870), False, 'from cvxopt import glpk, matrix\n'), ((1775, 1799), 'numpy.dot', 'np.dot', (['wc', 'target_indiv'], {}), '(wc, target_indiv)\n', (1781, 1799), True, 'import numpy as np\n'), ((3923, 3956), 'numpy.append', 'np.append', (['weights', '(-target_value)'], {}), '(weights, -target_value)\n', (3932, 3956), True, 'import numpy as np\n'), ((4017, 4035), 'numpy.identity', 'np.identity', (['(n + 1)'], {}), '(n + 1)\n', (4028, 4035), True, 'import numpy as np\n'), ((4519, 4534), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (4527, 4534), True, 'import numpy as np\n'), ((4575, 4612), 'numpy.concatenate', 'np.concatenate', (['(a_eq, [tmp])'], {'axis': '(0)'}), '((a_eq, [tmp]), axis=0)\n', (4589, 4612), True, 'import numpy as np\n'), ((2442, 2493), 'numpy.extract', 'np.extract', (['(s_values <= target_value_s)', 'indiv_probs'], {}), '(s_values <= target_value_s, indiv_probs)\n', (2452, 2493), True, 'import numpy as np\n'), ((2587, 2637), 'numpy.extract', 'np.extract', (['(s_values > target_value_s)', 'indiv_probs'], {}), '(s_values > target_value_s, indiv_probs)\n', (2597, 2637), True, 'import numpy as np\n'), ((2265, 2282), 'numpy.dot', 'np.dot', (['x[0:n]', 'w'], {}), '(x[0:n], w)\n', (2271, 2282), True, 'import numpy as np\n'), ((4162, 4172), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4169, 4172), True, 'import numpy as np\n'), ((4655, 4681), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(1)'], {}), '(0, 2, 1)\n', (4672, 4681), True, 'import numpy as np\n'), ((5194, 5206), 'numpy.matrix', 'np.matrix', (['x'], {}), '(x)\n', (5203, 5206), True, 'import numpy as np\n'), ((4317, 4328), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4325, 4328), True, 'import numpy as np\n')] |
from .Core import *
import numpy as np
import moderngl
from PIL import Image
class DemoCDB(Filter):
def __init__(self):
super().__init__()
self.addInputPort("Resolution", "vec2", (256,256))
self.addInputPort("PhiSamples", "vec3", (0,360,360))
self.addInputPort("ThetaSamples", "vec3", (20,20,1))
self.addInputPort("Time", "float", 0)
self.addOutputPort("Images", "List", [])
# create context
self.ctx = moderngl.create_standalone_context(require=330)
# fullscreen quad
self.quad = self.ctx.buffer(
np.array([
1.0, 1.0,
-1.0, 1.0,
-1.0, -1.0,
1.0, -1.0,
1.0, 1.0
]).astype('f4').tobytes()
)
# program
self.program = self.ctx.program(
vertex_shader=self.getVertexShaderCode(),
fragment_shader=self.getFragmentShaderCode(),
varyings=["uv"]
)
self.vao = self.ctx.simple_vertex_array(self.program, self.quad, 'position')
def getVertexShaderCode(self):
return """
#version 330
in vec2 position;
out vec2 uv;
void main(){
uv = position;
gl_Position = vec4(position,0,1);
}
"""
def getFragmentShaderCode(self):
return """
#version 330
in vec2 uv;
out vec3 fragColor;
uniform vec2 iResolution;
uniform float iTime;
uniform float iPhi;
uniform float iTheta;
const int MAX_MARCHING_STEPS = 255;
const float MIN_DIST = 0.1;
const float MAX_DIST = 40.0;
const float EPSILON = 0.001;
float planeSDF(vec3 p) {
return abs(p.y);
}
float sphereSDF(vec3 p, float r) {
return length(p) - r;
}
vec2 compare(vec2 hit, float d, float id){
return hit.x<d ? hit : vec2(d,id);
}
vec2 sceneSDF(vec3 p) {
vec2 hit = vec2(planeSDF(p),0);
hit = compare(hit, sphereSDF(p-2.0*vec3(cos(iTime),0.25,sin(iTime)), 0.25), 1);
hit = compare(hit, sphereSDF(p-vec3(0,1,0),1), 2);
return hit;
}
vec3 march(vec3 ro, vec3 rd, float tmin, float tmax) {
vec3 hit = vec3(0,-1,tmin);
for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
hit.xy = sceneSDF(ro + hit.z * rd);
if (hit.x < EPSILON) {
return hit;
}
hit.z += hit.x;
if (hit.z >= tmax) {
return hit;
}
}
return hit;
}
vec3 estimateNormal(vec3 p) {
return normalize(vec3(
sceneSDF(vec3(p.x + EPSILON, p.y, p.z)).x - sceneSDF(vec3(p.x - EPSILON, p.y, p.z)).x,
sceneSDF(vec3(p.x, p.y + EPSILON, p.z)).x - sceneSDF(vec3(p.x, p.y - EPSILON, p.z)).x,
sceneSDF(vec3(p.x, p.y, p.z + EPSILON)).x - sceneSDF(vec3(p.x, p.y, p.z - EPSILON)).x
));
}
vec3 phongBRDF(vec3 lightDir, vec3 rayDir, vec3 normal, vec3 diff, vec3 spec, float shininess) {
vec3 color = diff;
vec3 reflectDir = reflect(-lightDir, normal);
float specDot = max(dot(reflectDir, rayDir), 0.0);
color += pow(specDot, shininess) * spec;
return color;
}
float softshadow(vec3 ro, vec3 rd, float tmin, float tmax)
{
float res = 1.0;
vec3 hit = vec3(0,-1,tmin);
for (int i = 0; i < 16; i++) {
hit.xy = sceneSDF(ro + hit.z * rd);
res = min( res, 8.0*hit.x/hit.z );
hit.z += hit.x;
}
return clamp( res, 0.0, 1.0 );
}
void main() {
vec2 fragCoord = (uv*0.5+0.5)*iResolution;
vec3 focal = normalize(vec3(
cos(iPhi)*sin(iTheta),
cos(iTheta),
sin(iPhi)*sin(iTheta)
));
float aspect = iResolution.x/iResolution.y;
vec3 rayDir = -normalize(focal);
vec3 up = vec3(0,1,0);
vec3 right = normalize(cross(rayDir,up));
vec3 up2 = -normalize(cross(rayDir,right));
float scale = 2.0;
vec3 origin = 15.0*focal + aspect*right*uv.x*scale + up2*uv.y*scale;
vec3 hit = march(origin, rayDir, MIN_DIST, MAX_DIST);
if (hit.z > MAX_DIST - EPSILON) {
fragColor = vec3(0);
return;
}
// The closest point on the surface to the eyepoint along the view ray
vec3 p = origin + hit.z * rayDir;
vec3 lightDir = normalize(vec3(1,1,0));
vec3 normal = estimateNormal(p);
vec3 materialColor = hit.y > 1.5 ? vec3(0.8,0,0) : hit.y > 0.5 ? vec3(0,0.8,0) : vec3(0.3 + 0.1*mod( floor(1.0*p.z) + floor(1.0*p.x), 2.0));
vec3 radiance = vec3(0);
float irradiance = max(dot(lightDir, normal), 0.0);
vec3 brdf = phongBRDF(lightDir, rayDir, normal, materialColor, vec3(1), 1000.0);
radiance += brdf * irradiance * vec3(1);
radiance *= softshadow(p, lightDir, MIN_DIST, MAX_DIST);
fragColor = pow(radiance, vec3(1.0 / 2.2) ); // gamma correction
//fragColor = color*(hit.z-MIN_DIST)/(MAX_DIST-MIN_DIST);
//fragColor = estimateNormal(p);
}
"""
def render(self,phi,theta):
res = self.inputs.Resolution.get()
time = self.inputs.Time.get()
# create framebuffer
fbo = self.ctx.simple_framebuffer(res)
fbo.use()
fbo.clear(0.0, 0.0, 0.0, 1.0)
# render
self.program['iResolution'].value = res
self.program['iTime'].value = time
self.program['iPhi'].value = phi
self.program['iTheta'].value = theta
self.vao.render(moderngl.TRIANGLE_STRIP)
# read pixels
image = Image.frombytes('RGB', fbo.size, fbo.read(), 'raw', 'RGB', 0, -1)
# release resources
fbo.release()
return image
def update(self):
super().update()
phiSamples = self.inputs.PhiSamples.get();
thetaSamples = self.inputs.ThetaSamples.get();
results = []
for theta in range(thetaSamples[0],thetaSamples[1]+[0,1][thetaSamples[0]==thetaSamples[1]],thetaSamples[2]):
for phi in range(phiSamples[0],phiSamples[1]+[0,1][phiSamples[0]==phiSamples[1]],phiSamples[2]):
results.append(
self.render(
phi/360.0*2.0*np.pi,
(90-theta)/180.0*np.pi,
)
)
# self.ctx.release()
self.outputs.Images.set(results);
return 1;
| [
"numpy.array",
"moderngl.create_standalone_context"
] | [((474, 521), 'moderngl.create_standalone_context', 'moderngl.create_standalone_context', ([], {'require': '(330)'}), '(require=330)\n', (508, 521), False, 'import moderngl\n'), ((598, 662), 'numpy.array', 'np.array', (['[1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0])\n', (606, 662), True, 'import numpy as np\n')] |
import os
import cv2
import time
import argparse
import multiprocessing
import numpy as np
import tensorflow as tf
import time
from multiprocessing import Queue, Pool
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
class ObjectClassifier:
def __init__(self, model='coke2_model', predicted_class_threshold=0.90):
#CWD_PATH = os.getcwd()
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# The path to frozen_inference_graph.pb is:
# ./desktop/object_detector_app/object_detection_ssd_mobilenet_v1_coco_11_06_2017/frozen_inference_graph.pb
#MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
self.MODEL_NAME = model
# These paths should dynamic
self.PATH_TO_CKPT = os.path.join('/home/pi/ARC/', self.MODEL_NAME, 'frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
# The path for the label map is:
self.PATH_TO_LABELS = '/home/pi/ARC/coke_label_map.pbtxt'
self.NUM_CLASSES = 1
# Loading label map
self.label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
self.categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=self.NUM_CLASSES,
use_display_name=True)
self.category_index = label_map_util.create_category_index(self.categories)
self.predicted_class_threshold = predicted_class_threshold # The threshold it has to exceed to be added to predicted classes
self.sess = None
self.detection_graph = None
self.worker()
self.detected_classes = []
self.frame = None
self.isNewFrame = False
self.on = True
def run_threaded(self, frame):
self.frame = frame
self.isNewFrame = True
return self.detected_classes
def detect_objects(self,image_np):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = self.sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
"""
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=8)
"""
has_already_printed_sthg = False
detected_classes = [] # Stores classes that exceed a particular threshold
for index,value in enumerate(classes[0]):
class_name = self.category_index[classes[0][0]]['name']
if scores[0,index] > self.predicted_class_threshold:
detected_classes.append(class_name)
return detected_classes
def worker(self):
# Load a (frozen) Tensorflow model into memory.
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=self.detection_graph, config=config)
def update(self):
while self.on:
time.sleep(0.01) # random delay that should in the future be based on the framerate
if self.isNewFrame and self.frame is not None:
print("About to try to detect image")
frame_rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.isNewFrame = False
self.detected_classes = self.detect_objects(frame_rgb)
if self.detected_classes:
print("DETECTED CLASSES: ", self.detected_classes)
| [
"cv2.cvtColor",
"object_detection.utils.label_map_util.create_category_index",
"numpy.expand_dims",
"object_detection.utils.label_map_util.convert_label_map_to_categories",
"tensorflow.Session",
"time.sleep",
"tensorflow.ConfigProto",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.impor... | [((858, 933), 'os.path.join', 'os.path.join', (['"""/home/pi/ARC/"""', 'self.MODEL_NAME', '"""frozen_inference_graph.pb"""'], {}), "('/home/pi/ARC/', self.MODEL_NAME, 'frozen_inference_graph.pb')\n", (870, 933), False, 'import os\n'), ((1228, 1277), 'object_detection.utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['self.PATH_TO_LABELS'], {}), '(self.PATH_TO_LABELS)\n', (1256, 1277), False, 'from object_detection.utils import label_map_util\n'), ((1304, 1427), 'object_detection.utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['self.label_map'], {'max_num_classes': 'self.NUM_CLASSES', 'use_display_name': '(True)'}), '(self.label_map,\n max_num_classes=self.NUM_CLASSES, use_display_name=True)\n', (1350, 1427), False, 'from object_detection.utils import label_map_util\n'), ((1522, 1575), 'object_detection.utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['self.categories'], {}), '(self.categories)\n', (1558, 1575), False, 'from object_detection.utils import label_map_util\n'), ((2202, 2234), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (2216, 2234), True, 'import numpy as np\n'), ((4006, 4016), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4014, 4016), True, 'import tensorflow as tf\n'), ((4092, 4105), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (4103, 4105), True, 'import tensorflow as tf\n'), ((4365, 4381), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4379, 4381), True, 'import tensorflow as tf\n'), ((4457, 4510), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph', 'config': 'config'}), '(graph=self.detection_graph, config=config)\n', (4467, 4510), True, 'import tensorflow as tf\n'), ((4583, 4599), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (4593, 4599), False, 'import time\n'), ((4123, 4162), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.PATH_TO_CKPT', '"""rb"""'], {}), "(self.PATH_TO_CKPT, 'rb')\n", (4137, 4162), True, 'import tensorflow as tf\n'), ((4296, 4338), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (4315, 4338), True, 'import tensorflow as tf\n'), ((4808, 4851), 'cv2.cvtColor', 'cv2.cvtColor', (['self.frame', 'cv2.COLOR_BGR2RGB'], {}), '(self.frame, cv2.COLOR_BGR2RGB)\n', (4820, 4851), False, 'import cv2\n')] |
import tensorflow as tf
import numpy as np
import os
import time
class Net():
def __init__(self, path=None):
self.sess = tf.Session()
tf.keras.backend.set_session(self.sess)
self.checkpoint_path = path
self.cp_callback = tf.keras.callbacks.ModelCheckpoint(
self.checkpoint_path, verbose=False, save_weights_only=True)
self.model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(10, 10, 3), name="input"),
tf.keras.layers.Conv2D(16, kernel_size=(3, 3),
activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1)),
tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1)),
tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(1, 'tanh', name="infer")
])
self.optimizer = tf.train.AdamOptimizer(
learning_rate=0.002)
self.model.compile(loss=tf.keras.losses.mean_squared_error,
optimizer=tf.train.AdamOptimizer(), metrics=['mae'])
if tf.train.checkpoint_exists(self.checkpoint_path):
print("Loading checkpoint")
self.model.load_weights(self.checkpoint_path)
else:
print("No existing checkpoint, create new")
def update(self, x, y):
tf_X = tf.convert_to_tensor(x, dtype=tf.float32)
tf_Y = tf.convert_to_tensor(y, dtype=tf.float32)
self.model.fit(tf_X, tf_Y, callbacks=[
self.cp_callback], epochs=3, steps_per_epoch=10)
def predict(self, state):
tensor = tf.convert_to_tensor(np.asarray(state))
result = self.model.predict(tensor, steps=1)
return result
def reload(self):
self.model.load_weights(self.checkpoint_path)
def save(self):
# Use TF to save the graph model instead of Keras save model to load it in Golang
builder = tf.saved_model.builder.SavedModelBuilder("models/10x10-save")
# Tag the model, required for Go
builder.add_meta_graph_and_variables(self.sess, ["mytag"])
builder.save()
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.convert_to_tensor",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.InputLayer",
"tensorflow.keras.layers.MaxPool2D",
"ten... | [((135, 147), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (145, 147), True, 'import tensorflow as tf\n'), ((156, 195), 'tensorflow.keras.backend.set_session', 'tf.keras.backend.set_session', (['self.sess'], {}), '(self.sess)\n', (184, 195), True, 'import tensorflow as tf\n'), ((260, 359), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['self.checkpoint_path'], {'verbose': '(False)', 'save_weights_only': '(True)'}), '(self.checkpoint_path, verbose=False,\n save_weights_only=True)\n', (294, 359), True, 'import tensorflow as tf\n'), ((1278, 1321), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.002)'}), '(learning_rate=0.002)\n', (1300, 1321), True, 'import tensorflow as tf\n'), ((1496, 1544), 'tensorflow.train.checkpoint_exists', 'tf.train.checkpoint_exists', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (1522, 1544), True, 'import tensorflow as tf\n'), ((1758, 1799), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (1778, 1799), True, 'import tensorflow as tf\n'), ((1815, 1856), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y'], {'dtype': 'tf.float32'}), '(y, dtype=tf.float32)\n', (1835, 1856), True, 'import tensorflow as tf\n'), ((2345, 2406), 'tensorflow.saved_model.builder.SavedModelBuilder', 'tf.saved_model.builder.SavedModelBuilder', (['"""models/10x10-save"""'], {}), "('models/10x10-save')\n", (2385, 2406), True, 'import tensorflow as tf\n'), ((2045, 2062), 'numpy.asarray', 'np.asarray', (['state'], {}), '(state)\n', (2055, 2062), True, 'import numpy as np\n'), ((425, 490), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(10, 10, 3)', 'name': '"""input"""'}), "(input_shape=(10, 10, 3), name='input')\n", (451, 490), True, 'import tensorflow as tf\n'), ((504, 569), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(16, kernel_size=(3, 3), activation='relu')\n", (526, 569), True, 'import tensorflow as tf\n'), ((618, 677), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(1, 1)'}), '(pool_size=(2, 2), strides=(1, 1))\n', (643, 677), True, 'import tensorflow as tf\n'), ((691, 756), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(32, kernel_size=(3, 3), activation='relu')\n", (713, 756), True, 'import tensorflow as tf\n'), ((770, 829), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(1, 1)'}), '(pool_size=(2, 2), strides=(1, 1))\n', (795, 829), True, 'import tensorflow as tf\n'), ((843, 908), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(32, kernel_size=(3, 3), activation='relu')\n", (865, 908), True, 'import tensorflow as tf\n'), ((922, 981), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(1, 1)'}), '(pool_size=(2, 2), strides=(1, 1))\n', (947, 981), True, 'import tensorflow as tf\n'), ((995, 1020), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (1018, 1020), True, 'import tensorflow as tf\n'), ((1034, 1062), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1057, 1062), True, 'import tensorflow as tf\n'), ((1076, 1121), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1097, 1121), True, 'import tensorflow as tf\n'), ((1135, 1180), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1156, 1180), True, 'import tensorflow as tf\n'), ((1194, 1240), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)', '"""tanh"""'], {'name': '"""infer"""'}), "(1, 'tanh', name='infer')\n", (1215, 1240), True, 'import tensorflow as tf\n'), ((1441, 1465), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (1463, 1465), True, 'import tensorflow as tf\n')] |
"""#Calculates the individual ln likelihood
# function li=posthoc_dist(data,xt,x,a,bpop,d,bhat,...
# EPS,model_switch,...
# hlf,hlg,hle,...
# INTER)
# % from davidian and giltinan p 173
# dmat= diag(d);
#
# fg_bhat=fg(x,a,bpop,bhat);
# ff_bhat = ff(model_switch,xt,fg_bhat);
#
# res = data - ff_bhat;
#
# if INTER==true
# h=LinMatrixH_foce(model_switch,xt,x,a,bpop,EPS,hle,bhat);
# else
# h=LinMatrixH(model_switch,xt,x,a,bpop,EPS,hle);
# end
# RR = diag(diag(h*diag(EPS)*h'));
#
# %li = log(det(dmat))+ (bhat'/dmat)*bhat+ ...
# li = (bhat'/dmat)*bhat+ ...
# log(det(RR))+ (res'/RR)*res;
Author: <NAME>, <NAME>
"""
import numpy as np
from project.feval import feval
from project.LinMatrixH import LinMatrixH
from project.diag_matlab import diag_matlab
def ind_likelihood(data,bpop,d,sigma,bInter,bUDDLike,model_switch,xt_ind,x,a,bocc_ind,poped_db,lC,det_res_var,b_ind):
# % from davidian and giltinan p 173
if bUDDLike == False:
#browser()
fg_bhat = feval(poped_db["model"]["fg_pointer"],x,a,bpop,b_ind,bocc_ind)
ipred = feval(poped_db["model"]["ff_pointer"],model_switch,xt_ind,fg_bhat,poped_db)[["y"]]
res = data-ipred#Individual residuals
if bInter == True:
#For Cases WITH interaction, linearize around eta = eta^
#eps = zeros(size(tdata),1),size(sigma,2))
#eps = zeros(size(t(data),1),size(sigma,2))
h = LinMatrixH(np.transpose(model_switch),
np.transpose(xt_ind),
np.transpose(x),
np.transpose(a),
bpop,
b_ind,
bocc_ind,
poped_db)["y"] #The covariance for this individual
res_var = diag_matlab(diag_matlab(np.transpose(np.matmul(h,sigma),np.transpose(h))))
lC = np.linalg.solve(np.linalg.cholesky(res_var),diag_matlab(res_var.size))
det_res_var = np.linalg.det(res_var)
#else:
#Cases WITHOUT interaction, linearize around eta = 0
# h = LinMatrixH(tdata,cdata,theta,zeros(size(eta)),eps) #The covariance for this individual
R = (np.np.matmul(np.transpose(res),lC))
li = -1/2*np.log(det_res_var)-1/2*np.matmul(R,np.transpose(R)) # + const
else:
#%UDD likelihood
#li=sum(model(tdata,cdata,theta,eta))
raise Exception("User defined likelihood not implemented for PopED in R")
return li
| [
"project.feval.feval",
"numpy.log",
"numpy.transpose",
"numpy.matmul",
"numpy.linalg.det",
"project.diag_matlab.diag_matlab",
"numpy.linalg.cholesky"
] | [((1129, 1196), 'project.feval.feval', 'feval', (["poped_db['model']['fg_pointer']", 'x', 'a', 'bpop', 'b_ind', 'bocc_ind'], {}), "(poped_db['model']['fg_pointer'], x, a, bpop, b_ind, bocc_ind)\n", (1134, 1196), False, 'from project.feval import feval\n'), ((1209, 1288), 'project.feval.feval', 'feval', (["poped_db['model']['ff_pointer']", 'model_switch', 'xt_ind', 'fg_bhat', 'poped_db'], {}), "(poped_db['model']['ff_pointer'], model_switch, xt_ind, fg_bhat, poped_db)\n", (1214, 1288), False, 'from project.feval import feval\n'), ((2110, 2132), 'numpy.linalg.det', 'np.linalg.det', (['res_var'], {}), '(res_var)\n', (2123, 2132), True, 'import numpy as np\n'), ((2355, 2372), 'numpy.transpose', 'np.transpose', (['res'], {}), '(res)\n', (2367, 2372), True, 'import numpy as np\n'), ((2028, 2055), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['res_var'], {}), '(res_var)\n', (2046, 2055), True, 'import numpy as np\n'), ((2056, 2081), 'project.diag_matlab.diag_matlab', 'diag_matlab', (['res_var.size'], {}), '(res_var.size)\n', (2067, 2081), False, 'from project.diag_matlab import diag_matlab\n'), ((2397, 2416), 'numpy.log', 'np.log', (['det_res_var'], {}), '(det_res_var)\n', (2403, 2416), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.transpose', 'np.transpose', (['model_switch'], {}), '(model_switch)\n', (1596, 1610), True, 'import numpy as np\n'), ((1634, 1654), 'numpy.transpose', 'np.transpose', (['xt_ind'], {}), '(xt_ind)\n', (1646, 1654), True, 'import numpy as np\n'), ((1678, 1693), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (1690, 1693), True, 'import numpy as np\n'), ((1717, 1732), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (1729, 1732), True, 'import numpy as np\n'), ((2433, 2448), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (2445, 2448), True, 'import numpy as np\n'), ((1956, 1975), 'numpy.matmul', 'np.matmul', (['h', 'sigma'], {}), '(h, sigma)\n', (1965, 1975), True, 'import numpy as np\n'), ((1975, 1990), 'numpy.transpose', 'np.transpose', (['h'], {}), '(h)\n', (1987, 1990), True, 'import numpy as np\n')] |
import string
from typing import Dict, Callable, List, Union
import numpy as np
import spacy
def spacy_wrap(fn: Callable, language: str = "en_core_web_sm", **kwargs) -> Callable:
"""
Wrap the function so that it runs the input text data
through a spacy model before the function call.
"""
from allennlp.common.util import get_spacy_model
def new_fn(data: Union[spacy.tokens.doc.Doc, Dict, str]):
if not isinstance(data, spacy.tokens.doc.Doc):
model = get_spacy_model(language, **kwargs)
if isinstance(data, Dict):
for key, val in data.items():
if isinstance(val, str):
data[key] = model(val)
elif isinstance(data, tuple):
data = tuple(model(tup) if isinstance(tup, str) else tup for tup in data)
elif isinstance(data, str):
data = model(data)
else:
pass
return fn(data)
return new_fn
def strip_punctuation(data: Union[str, spacy.tokens.doc.Doc]) -> str:
"""
Removes all punctuation from `data`.
"""
if isinstance(data, str):
return data.rstrip(string.punctuation)
elif isinstance(data, spacy.tokens.doc.Doc):
while len(data) and data[-1].is_punct:
data = data[:-1]
else:
# Can log a warning here, but it may get noisy.
pass
return str(data)
def toggle_punctuation(data: str) -> List[str]:
"""
If `data` contains any punctuation, it is removed.
Otherwise, a `.` is added to the string.
Returns a list of strings.
Eg.
`data` = "This was great!"
Returns ["This was great", "This was great."]
`data` = "The movie was good"
Returns ["The movie was good."]
"""
s = strip_punctuation(data)
ret = []
if s != data:
ret.append(s)
if s + "." != data:
ret.append(s + ".")
return ret
def random_string(n: int) -> str:
"""
Returns a random alphanumeric string of length `n`.
"""
return "".join(np.random.choice([x for x in string.ascii_letters + string.digits], n))
def random_url(n: int = 6) -> str:
"""
Returns a random url of length `n`.
"""
return "https://t.co/%s" % random_string(n)
def random_handle(n: int = 6) -> str:
"""
Returns a random handle of length `n`. Eg. "@randomstr23`
"""
return "@%s" % random_string(n)
def add_random_strings(data: str) -> List[str]:
"""
Adds random strings to the start and end of the string `data`.
Returns a list of strings.
"""
urls_and_handles = [random_url(n=6) for _ in range(5)] + [random_handle() for _ in range(5)]
rets = ["%s %s" % (x, data) for x in urls_and_handles]
rets += ["%s %s" % (data, x) for x in urls_and_handles]
return rets
| [
"allennlp.common.util.get_spacy_model",
"numpy.random.choice"
] | [((2068, 2138), 'numpy.random.choice', 'np.random.choice', (['[x for x in string.ascii_letters + string.digits]', 'n'], {}), '([x for x in string.ascii_letters + string.digits], n)\n', (2084, 2138), True, 'import numpy as np\n'), ((498, 533), 'allennlp.common.util.get_spacy_model', 'get_spacy_model', (['language'], {}), '(language, **kwargs)\n', (513, 533), False, 'from allennlp.common.util import get_spacy_model\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LossMonitor Callback class."""
import time
import numpy as np
from mindspore.common.tensor import Tensor
from ._callback import Callback
class LossMonitor(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF, it will terminate training.
Note:
If per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
lr_init (numpy array): train learning rate. Default: None.
Raises:
ValueError: If print_step is not int or less than zero.
Examples:
>>> LossMonitor(100, lr_init=Tensor([0.05]*100).asnumpy())
"""
def __init__(self, per_print_times=1, lr_init=None):
super(LossMonitor, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self._per_print_times = per_print_times
self.lr_init = lr_init
def epoch_begin(self, run_context):
self.losses = []
self.epoch_time = time.time()
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / cb_params.batch_num
print("Epoch time: {:5.3f}, per step time: {:5.3f}, "
"avg loss: {:5.3f}".format(epoch_mseconds,
per_step_mseconds,
np.mean(self.losses)))
print("*" * 60)
def step_begin(self, run_context):
self.step_time = time.time()
def step_end(self, run_context):
cb_params = run_context.original_args()
step_mseconds = (time.time() - self.step_time) * 1000
step_loss = cb_params.net_outputs
if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
step_loss = step_loss[0]
if isinstance(step_loss, Tensor):
step_loss = np.mean(step_loss.asnumpy())
self.losses.append(step_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num
if isinstance(step_loss, float) and (np.isnan(step_loss) or np.isinf(step_loss)):
raise ValueError("Epoch: [{:3d}/{:3d}], step: [{:5d}/{:5d}]. "
"Invalid loss, terminating training.".format(
cb_params.cur_epoch_num - 1, cb_params.epoch_num,
cur_step_in_epoch, cb_params.batch_num))
if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
print("Epoch: [{:3d}/{:3d}], step: [{:5d}/{:5d}], "
"loss: [{:5.4f}/{:5.4f}], time: [{:5.4f}]".format(
cb_params.cur_epoch_num - 1, cb_params.epoch_num,
cur_step_in_epoch, cb_params.batch_num,
step_loss, np.mean(self.losses),
step_mseconds), flush=True)
| [
"numpy.mean",
"numpy.isinf",
"numpy.isnan",
"time.time"
] | [((1726, 1737), 'time.time', 'time.time', ([], {}), '()\n', (1735, 1737), False, 'import time\n'), ((2286, 2297), 'time.time', 'time.time', ([], {}), '()\n', (2295, 2297), False, 'import time\n'), ((1851, 1862), 'time.time', 'time.time', ([], {}), '()\n', (1860, 1862), False, 'import time\n'), ((2174, 2194), 'numpy.mean', 'np.mean', (['self.losses'], {}), '(self.losses)\n', (2181, 2194), True, 'import numpy as np\n'), ((2409, 2420), 'time.time', 'time.time', ([], {}), '()\n', (2418, 2420), False, 'import time\n'), ((2871, 2890), 'numpy.isnan', 'np.isnan', (['step_loss'], {}), '(step_loss)\n', (2879, 2890), True, 'import numpy as np\n'), ((2894, 2913), 'numpy.isinf', 'np.isinf', (['step_loss'], {}), '(step_loss)\n', (2902, 2913), True, 'import numpy as np\n'), ((3619, 3639), 'numpy.mean', 'np.mean', (['self.losses'], {}), '(self.losses)\n', (3626, 3639), True, 'import numpy as np\n')] |
from __future__ import division, print_function, absolute_import
from itertools import groupby
from warnings import warn
import numpy as np
from scipy.sparse import find, coo_matrix
EPS = np.finfo(float).eps
def validate_first_step(first_step, t0, t_bound):
"""Assert that first_step is valid and return it."""
if first_step <= 0:
raise ValueError("`first_step` must be positive.")
if first_step > np.abs(t_bound - t0):
raise ValueError("`first_step` exceeds bounds.")
return first_step
def validate_max_step(max_step):
"""Assert that max_Step is valid and return it."""
if max_step <= 0:
raise ValueError("`max_step` must be positive.")
return max_step
def warn_extraneous(extraneous):
"""Display a warning for extraneous keyword arguments.
The initializer of each solver class is expected to collect keyword
arguments that it doesn't understand and warn about them. This function
prints a warning for each key in the supplied dictionary.
Parameters
----------
extraneous : dict
Extraneous keyword arguments
"""
if extraneous:
warn("The following arguments have no effect for a chosen solver: {}."
.format(", ".join("`{}`".format(x) for x in extraneous)))
def validate_tol(rtol, atol, n):
"""Validate tolerance values."""
if rtol < 100 * EPS:
warn("`rtol` is too low, setting to {}".format(100 * EPS))
rtol = 100 * EPS
atol = np.asarray(atol)
if atol.ndim > 0 and atol.shape != (n,):
raise ValueError("`atol` has wrong shape.")
if np.any(atol < 0):
raise ValueError("`atol` must be positive.")
return rtol, atol
def norm(x):
"""Compute RMS norm."""
return np.linalg.norm(x) / x.size ** 0.5
def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
"""Empirically select a good initial step.
The algorithm is described in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t0 : float
Initial value of the independent variable.
y0 : ndarray, shape (n,)
Initial value of the dependent variable.
f0 : ndarray, shape (n,)
Initial value of the derivative, i.e., ``fun(t0, y0)``.
direction : float
Integration direction.
order : float
Error estimator order. It means that the error controlled by the
algorithm is proportional to ``step_size ** (order + 1)`.
rtol : float
Desired relative tolerance.
atol : float
Desired absolute tolerance.
Returns
-------
h_abs : float
Absolute value of the suggested initial step.
References
----------
.. [1] <NAME>, <NAME> <NAME>, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
if y0.size == 0:
return np.inf
scale = atol + np.abs(y0) * rtol
d0 = norm(y0 / scale)
d1 = norm(f0 / scale)
if d0 < 1e-5 or d1 < 1e-5:
h0 = 1e-6
else:
h0 = 0.01 * d0 / d1
y1 = y0 + h0 * direction * f0
f1 = fun(t0 + h0 * direction, y1)
d2 = norm((f1 - f0) / scale) / h0
if d1 <= 1e-15 and d2 <= 1e-15:
h1 = max(1e-6, h0 * 1e-3)
else:
h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
return min(100 * h0, h1)
class OdeSolution(object):
"""Continuous ODE solution.
It is organized as a collection of `DenseOutput` objects which represent
local interpolants. It provides an algorithm to select a right interpolant
for each given point.
The interpolants cover the range between `t_min` and `t_max` (see
Attributes below). Evaluation outside this interval is not forbidden, but
the accuracy is not guaranteed.
When evaluating at a breakpoint (one of the values in `ts`) a segment with
the lower index is selected.
Parameters
----------
ts : array_like, shape (n_segments + 1,)
Time instants between which local interpolants are defined. Must
be strictly increasing or decreasing (zero segment with two points is
also allowed).
interpolants : list of DenseOutput with n_segments elements
Local interpolants. An i-th interpolant is assumed to be defined
between ``ts[i]`` and ``ts[i + 1]``.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
def __init__(self, ts, interpolants):
ts = np.asarray(ts)
d = np.diff(ts)
# The first case covers integration on zero segment.
if not ((ts.size == 2 and ts[0] == ts[-1])
or np.all(d > 0) or np.all(d < 0)):
raise ValueError("`ts` must be strictly increasing or decreasing.")
self.n_segments = len(interpolants)
if ts.shape != (self.n_segments + 1,):
raise ValueError("Numbers of time stamps and interpolants "
"don't match.")
self.ts = ts
self.interpolants = interpolants
if ts[-1] >= ts[0]:
self.t_min = ts[0]
self.t_max = ts[-1]
self.ascending = True
self.ts_sorted = ts
else:
self.t_min = ts[-1]
self.t_max = ts[0]
self.ascending = False
self.ts_sorted = ts[::-1]
def _call_single(self, t):
# Here we preserve a certain symmetry that when t is in self.ts,
# then we prioritize a segment with a lower index.
if self.ascending:
ind = np.searchsorted(self.ts_sorted, t, side='left')
else:
ind = np.searchsorted(self.ts_sorted, t, side='right')
segment = min(max(ind - 1, 0), self.n_segments - 1)
if not self.ascending:
segment = self.n_segments - 1 - segment
return self.interpolants[segment](t)
def __call__(self, t):
"""Evaluate the solution.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate at.
Returns
-------
y : ndarray, shape (n_states,) or (n_states, n_points)
Computed values. Shape depends on whether `t` is a scalar or a
1-D array.
"""
t = np.asarray(t)
if t.ndim == 0:
return self._call_single(t)
order = np.argsort(t)
reverse = np.empty_like(order)
reverse[order] = np.arange(order.shape[0])
t_sorted = t[order]
# See comment in self._call_single.
if self.ascending:
segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')
else:
segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')
segments -= 1
segments[segments < 0] = 0
segments[segments > self.n_segments - 1] = self.n_segments - 1
if not self.ascending:
segments = self.n_segments - 1 - segments
ys = []
group_start = 0
for segment, group in groupby(segments):
group_end = group_start + len(list(group))
y = self.interpolants[segment](t_sorted[group_start:group_end])
ys.append(y)
group_start = group_end
ys = np.hstack(ys)
ys = ys[:, reverse]
return ys
NUM_JAC_DIFF_REJECT = EPS ** 0.875
NUM_JAC_DIFF_SMALL = EPS ** 0.75
NUM_JAC_DIFF_BIG = EPS ** 0.25
NUM_JAC_MIN_FACTOR = 1e3 * EPS
NUM_JAC_FACTOR_INCREASE = 10
NUM_JAC_FACTOR_DECREASE = 0.1
def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
"""Finite differences Jacobian approximation tailored for ODE solvers.
This function computes finite difference approximation to the Jacobian
matrix of `fun` with respect to `y` using forward differences.
The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``.
A special feature of this function is the ability to correct the step
size from iteration to iteration. The main idea is to keep the finite
difference significantly separated from its round-off error which
approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
huge error and assures that the estimated derivative are reasonably close
to the true values (i.e., the finite difference approximation is at least
qualitatively reflects the structure of the true Jacobian).
Parameters
----------
fun : callable
Right-hand side of the system implemented in a vectorized fashion.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Value of the right hand side at (t, y).
threshold : float
Threshold for `y` value used for computing the step size as
``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
absolute tolerance (atol) for a solver should be passed as `threshold`.
factor : ndarray with shape (n,) or None
Factor to use for computing the step size. Pass None for the very
evaluation, then use the value returned from this function.
sparsity : tuple (structure, groups) or None
Sparsity structure of the Jacobian, `structure` must be csc_matrix.
Returns
-------
J : ndarray or csc_matrix, shape (n, n)
Jacobian matrix.
factor : ndarray, shape (n,)
Suggested `factor` for the next evaluation.
"""
y = np.asarray(y)
n = y.shape[0]
if n == 0:
return np.empty((0, 0)), factor
if factor is None:
factor = np.full(n, EPS ** 0.5)
else:
factor = factor.copy()
# Direct the step as ODE dictates, hoping that such a step won't lead to
# a problematic region. For complex ODEs it makes sense to use the real
# part of f as we use steps along real axis.
f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
y_scale = f_sign * np.maximum(threshold, np.abs(y))
h = (y + factor * y_scale) - y
# Make sure that the step is not 0 to start with. Not likely it will be
# executed often.
for i in np.nonzero(h == 0)[0]:
while h[i] == 0:
factor[i] *= 10
h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
if sparsity is None:
return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
else:
structure, groups = sparsity
return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
structure, groups)
def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
n = y.shape[0]
h_vecs = np.diag(h)
f_new = fun(t, y[:, None] + h_vecs)
diff = f_new - f[:, None]
max_ind = np.argmax(np.abs(diff), axis=0)
r = np.arange(n)
max_diff = np.abs(diff[max_ind, r])
scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
if np.any(diff_too_small):
ind, = np.nonzero(diff_too_small)
new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
h_vecs[ind, ind] = h_new
f_new = fun(t, y[:, None] + h_vecs[:, ind])
diff_new = f_new - f[:, None]
max_ind = np.argmax(np.abs(diff_new), axis=0)
r = np.arange(ind.shape[0])
max_diff_new = np.abs(diff_new[max_ind, r])
scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
if np.any(update):
update, = np.nonzero(update)
update_ind = ind[update]
factor[update_ind] = new_factor[update]
h[update_ind] = h_new[update]
diff[:, update_ind] = diff_new[:, update]
scale[update_ind] = scale_new[update]
max_diff[update_ind] = max_diff_new[update]
diff /= h
factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
return diff, factor
def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
n = y.shape[0]
n_groups = np.max(groups) + 1
h_vecs = np.empty((n_groups, n))
for group in range(n_groups):
e = np.equal(group, groups)
h_vecs[group] = h * e
h_vecs = h_vecs.T
f_new = fun(t, y[:, None] + h_vecs)
df = f_new - f[:, None]
i, j, _ = find(structure)
diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
r = np.arange(n)
max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
scale = np.maximum(np.abs(f[max_ind]),
np.abs(f_new[max_ind, groups[r]]))
diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
if np.any(diff_too_small):
ind, = np.nonzero(diff_too_small)
new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
h_new_all = np.zeros(n)
h_new_all[ind] = h_new
groups_unique = np.unique(groups[ind])
groups_map = np.empty(n_groups, dtype=int)
h_vecs = np.empty((groups_unique.shape[0], n))
for k, group in enumerate(groups_unique):
e = np.equal(group, groups)
h_vecs[k] = h_new_all * e
groups_map[group] = k
h_vecs = h_vecs.T
f_new = fun(t, y[:, None] + h_vecs)
df = f_new - f[:, None]
i, j, _ = find(structure[:, ind])
diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
(i, j)), shape=(n, ind.shape[0])).tocsc()
max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
r = np.arange(ind.shape[0])
max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
scale_new = np.maximum(
np.abs(f[max_ind_new]),
np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
if np.any(update):
update, = np.nonzero(update)
update_ind = ind[update]
factor[update_ind] = new_factor[update]
h[update_ind] = h_new[update]
diff[:, update_ind] = diff_new[:, update]
scale[update_ind] = scale_new[update]
max_diff[update_ind] = max_diff_new[update]
diff.data /= np.repeat(h, np.diff(diff.indptr))
factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
return diff, factor
| [
"numpy.abs",
"numpy.maximum",
"numpy.empty",
"numpy.argsort",
"numpy.arange",
"numpy.linalg.norm",
"numpy.diag",
"numpy.unique",
"numpy.full",
"numpy.empty_like",
"numpy.equal",
"numpy.finfo",
"numpy.max",
"scipy.sparse.coo_matrix",
"numpy.real",
"numpy.asarray",
"scipy.sparse.find",... | [((190, 205), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (198, 205), True, 'import numpy as np\n'), ((1485, 1501), 'numpy.asarray', 'np.asarray', (['atol'], {}), '(atol)\n', (1495, 1501), True, 'import numpy as np\n'), ((1607, 1623), 'numpy.any', 'np.any', (['(atol < 0)'], {}), '(atol < 0)\n', (1613, 1623), True, 'import numpy as np\n'), ((9450, 9463), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (9460, 9463), True, 'import numpy as np\n'), ((10576, 10586), 'numpy.diag', 'np.diag', (['h'], {}), '(h)\n', (10583, 10586), True, 'import numpy as np\n'), ((10711, 10723), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (10720, 10723), True, 'import numpy as np\n'), ((10739, 10763), 'numpy.abs', 'np.abs', (['diff[max_ind, r]'], {}), '(diff[max_ind, r])\n', (10745, 10763), True, 'import numpy as np\n'), ((10902, 10924), 'numpy.any', 'np.any', (['diff_too_small'], {}), '(diff_too_small)\n', (10908, 10924), True, 'import numpy as np\n'), ((12044, 12082), 'numpy.maximum', 'np.maximum', (['factor', 'NUM_JAC_MIN_FACTOR'], {}), '(factor, NUM_JAC_MIN_FACTOR)\n', (12054, 12082), True, 'import numpy as np\n'), ((12250, 12273), 'numpy.empty', 'np.empty', (['(n_groups, n)'], {}), '((n_groups, n))\n', (12258, 12273), True, 'import numpy as np\n'), ((12480, 12495), 'scipy.sparse.find', 'find', (['structure'], {}), '(structure)\n', (12484, 12495), False, 'from scipy.sparse import find, coo_matrix\n'), ((12633, 12645), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (12642, 12645), True, 'import numpy as np\n'), ((12875, 12897), 'numpy.any', 'np.any', (['diff_too_small'], {}), '(diff_too_small)\n', (12881, 12897), True, 'import numpy as np\n'), ((14690, 14728), 'numpy.maximum', 'np.maximum', (['factor', 'NUM_JAC_MIN_FACTOR'], {}), '(factor, NUM_JAC_MIN_FACTOR)\n', (14700, 14728), True, 'import numpy as np\n'), ((422, 442), 'numpy.abs', 'np.abs', (['(t_bound - t0)'], {}), '(t_bound - t0)\n', (428, 442), True, 'import numpy as np\n'), ((1755, 1772), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (1769, 1772), True, 'import numpy as np\n'), ((4480, 4494), 'numpy.asarray', 'np.asarray', (['ts'], {}), '(ts)\n', (4490, 4494), True, 'import numpy as np\n'), ((4507, 4518), 'numpy.diff', 'np.diff', (['ts'], {}), '(ts)\n', (4514, 4518), True, 'import numpy as np\n'), ((6279, 6292), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (6289, 6292), True, 'import numpy as np\n'), ((6375, 6388), 'numpy.argsort', 'np.argsort', (['t'], {}), '(t)\n', (6385, 6388), True, 'import numpy as np\n'), ((6407, 6427), 'numpy.empty_like', 'np.empty_like', (['order'], {}), '(order)\n', (6420, 6427), True, 'import numpy as np\n'), ((6453, 6478), 'numpy.arange', 'np.arange', (['order.shape[0]'], {}), '(order.shape[0])\n', (6462, 6478), True, 'import numpy as np\n'), ((7034, 7051), 'itertools.groupby', 'groupby', (['segments'], {}), '(segments)\n', (7041, 7051), False, 'from itertools import groupby\n'), ((7259, 7272), 'numpy.hstack', 'np.hstack', (['ys'], {}), '(ys)\n', (7268, 7272), True, 'import numpy as np\n'), ((9579, 9601), 'numpy.full', 'np.full', (['n', '(EPS ** 0.5)'], {}), '(n, EPS ** 0.5)\n', (9586, 9601), True, 'import numpy as np\n'), ((10102, 10120), 'numpy.nonzero', 'np.nonzero', (['(h == 0)'], {}), '(h == 0)\n', (10112, 10120), True, 'import numpy as np\n'), ((10681, 10693), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (10687, 10693), True, 'import numpy as np\n'), ((10787, 10805), 'numpy.abs', 'np.abs', (['f[max_ind]'], {}), '(f[max_ind])\n', (10793, 10805), True, 'import numpy as np\n'), ((10807, 10832), 'numpy.abs', 'np.abs', (['f_new[max_ind, r]'], {}), '(f_new[max_ind, r])\n', (10813, 10832), True, 'import numpy as np\n'), ((10941, 10967), 'numpy.nonzero', 'np.nonzero', (['diff_too_small'], {}), '(diff_too_small)\n', (10951, 10967), True, 'import numpy as np\n'), ((11278, 11301), 'numpy.arange', 'np.arange', (['ind.shape[0]'], {}), '(ind.shape[0])\n', (11287, 11301), True, 'import numpy as np\n'), ((11325, 11353), 'numpy.abs', 'np.abs', (['diff_new[max_ind, r]'], {}), '(diff_new[max_ind, r])\n', (11331, 11353), True, 'import numpy as np\n'), ((11515, 11529), 'numpy.any', 'np.any', (['update'], {}), '(update)\n', (11521, 11529), True, 'import numpy as np\n'), ((12218, 12232), 'numpy.max', 'np.max', (['groups'], {}), '(groups)\n', (12224, 12232), True, 'import numpy as np\n'), ((12320, 12343), 'numpy.equal', 'np.equal', (['group', 'groups'], {}), '(group, groups)\n', (12328, 12343), True, 'import numpy as np\n'), ((12729, 12747), 'numpy.abs', 'np.abs', (['f[max_ind]'], {}), '(f[max_ind])\n', (12735, 12747), True, 'import numpy as np\n'), ((12772, 12805), 'numpy.abs', 'np.abs', (['f_new[max_ind, groups[r]]'], {}), '(f_new[max_ind, groups[r]])\n', (12778, 12805), True, 'import numpy as np\n'), ((12914, 12940), 'numpy.nonzero', 'np.nonzero', (['diff_too_small'], {}), '(diff_too_small)\n', (12924, 12940), True, 'import numpy as np\n'), ((13082, 13093), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13090, 13093), True, 'import numpy as np\n'), ((13150, 13172), 'numpy.unique', 'np.unique', (['groups[ind]'], {}), '(groups[ind])\n', (13159, 13172), True, 'import numpy as np\n'), ((13194, 13223), 'numpy.empty', 'np.empty', (['n_groups'], {'dtype': 'int'}), '(n_groups, dtype=int)\n', (13202, 13223), True, 'import numpy as np\n'), ((13241, 13278), 'numpy.empty', 'np.empty', (['(groups_unique.shape[0], n)'], {}), '((groups_unique.shape[0], n))\n', (13249, 13278), True, 'import numpy as np\n'), ((13562, 13585), 'scipy.sparse.find', 'find', (['structure[:, ind]'], {}), '(structure[:, ind])\n', (13566, 13585), False, 'from scipy.sparse import find, coo_matrix\n'), ((13807, 13830), 'numpy.arange', 'np.arange', (['ind.shape[0]'], {}), '(ind.shape[0])\n', (13816, 13830), True, 'import numpy as np\n'), ((14123, 14137), 'numpy.any', 'np.any', (['update'], {}), '(update)\n', (14129, 14137), True, 'import numpy as np\n'), ((14502, 14522), 'numpy.diff', 'np.diff', (['diff.indptr'], {}), '(diff.indptr)\n', (14509, 14522), True, 'import numpy as np\n'), ((2912, 2922), 'numpy.abs', 'np.abs', (['y0'], {}), '(y0)\n', (2918, 2922), True, 'import numpy as np\n'), ((5551, 5598), 'numpy.searchsorted', 'np.searchsorted', (['self.ts_sorted', 't'], {'side': '"""left"""'}), "(self.ts_sorted, t, side='left')\n", (5566, 5598), True, 'import numpy as np\n'), ((5631, 5679), 'numpy.searchsorted', 'np.searchsorted', (['self.ts_sorted', 't'], {'side': '"""right"""'}), "(self.ts_sorted, t, side='right')\n", (5646, 5679), True, 'import numpy as np\n'), ((6602, 6656), 'numpy.searchsorted', 'np.searchsorted', (['self.ts_sorted', 't_sorted'], {'side': '"""left"""'}), "(self.ts_sorted, t_sorted, side='left')\n", (6617, 6656), True, 'import numpy as np\n'), ((6694, 6749), 'numpy.searchsorted', 'np.searchsorted', (['self.ts_sorted', 't_sorted'], {'side': '"""right"""'}), "(self.ts_sorted, t_sorted, side='right')\n", (6709, 6749), True, 'import numpy as np\n'), ((9513, 9529), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (9521, 9529), True, 'import numpy as np\n'), ((9944, 9953), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (9950, 9953), True, 'import numpy as np\n'), ((11240, 11256), 'numpy.abs', 'np.abs', (['diff_new'], {}), '(diff_new)\n', (11246, 11256), True, 'import numpy as np\n'), ((11385, 11403), 'numpy.abs', 'np.abs', (['f[max_ind]'], {}), '(f[max_ind])\n', (11391, 11403), True, 'import numpy as np\n'), ((11405, 11430), 'numpy.abs', 'np.abs', (['f_new[max_ind, r]'], {}), '(f_new[max_ind, r])\n', (11411, 11430), True, 'import numpy as np\n'), ((11553, 11571), 'numpy.nonzero', 'np.nonzero', (['update'], {}), '(update)\n', (11563, 11571), True, 'import numpy as np\n'), ((12507, 12559), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(df[i, groups[j]], (i, j))'], {'shape': '(n, n)'}), '((df[i, groups[j]], (i, j)), shape=(n, n))\n', (12517, 12559), False, 'from scipy.sparse import find, coo_matrix\n'), ((13345, 13368), 'numpy.equal', 'np.equal', (['group', 'groups'], {}), '(group, groups)\n', (13353, 13368), True, 'import numpy as np\n'), ((13951, 13973), 'numpy.abs', 'np.abs', (['f[max_ind_new]'], {}), '(f[max_ind_new])\n', (13957, 13973), True, 'import numpy as np\n'), ((13987, 14038), 'numpy.abs', 'np.abs', (['f_new[max_ind_new, groups_map[groups[ind]]]'], {}), '(f_new[max_ind_new, groups_map[groups[ind]]])\n', (13993, 14038), True, 'import numpy as np\n'), ((14161, 14179), 'numpy.nonzero', 'np.nonzero', (['update'], {}), '(update)\n', (14171, 14179), True, 'import numpy as np\n'), ((4650, 4663), 'numpy.all', 'np.all', (['(d > 0)'], {}), '(d > 0)\n', (4656, 4663), True, 'import numpy as np\n'), ((4667, 4680), 'numpy.all', 'np.all', (['(d < 0)'], {}), '(d < 0)\n', (4673, 4680), True, 'import numpy as np\n'), ((12672, 12696), 'numpy.abs', 'np.abs', (['diff[max_ind, r]'], {}), '(diff[max_ind, r])\n', (12678, 12696), True, 'import numpy as np\n'), ((13605, 13690), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(df[i, groups_map[groups[ind[j]]]], (i, j))'], {'shape': '(n, ind.shape[0])'}), '((df[i, groups_map[groups[ind[j]]]], (i, j)), shape=(n, ind.shape[0])\n )\n', (13615, 13690), False, 'from scipy.sparse import find, coo_matrix\n'), ((13865, 13897), 'numpy.abs', 'np.abs', (['diff_new[max_ind_new, r]'], {}), '(diff_new[max_ind_new, r])\n', (13871, 13897), True, 'import numpy as np\n'), ((9864, 9874), 'numpy.real', 'np.real', (['f'], {}), '(f)\n', (9871, 9874), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
# &Author AnFany
# 适用于多维输出
import numpy as np
import tensorflow as tf
'''基于TensorFlow构建训练函数'''
# 创建激活函数
def activate(input_layer, weights, biases, actfunc):
layer = tf.add(tf.matmul(input_layer, weights), biases)
if actfunc == 'relu':
return tf.nn.relu(layer)
elif actfunc == 'tanh':
return tf.nn.tanh(layer)
elif actfunc == 'sigmoid':
return tf.nn.sigmoid(layer)
# 权重初始化的方式和利用激活函数的关系很大
# sigmoid: xavir tanh: xavir relu: he
# 构建训练函数
def Ten_train(xdata, ydata, prexdata, preydata, hiddenlayers=3, hiddennodes=100, \
learn_rate=0.05, itertimes=100000, batch_size=200, activate_func='sigmoid', break_error=0.0043):
# 开始搭建神经网络
Input_Dimen = len(xdata[0])
Unit_Layers = [Input_Dimen] + [hiddennodes] * hiddenlayers + [len(ydata[0])] # 输入的维数,隐层的神经数,输出的维数1
# 创建占位符
x_data = tf.placeholder(shape=[None, Input_Dimen], dtype=tf.float32, name='x_data')
y_target = tf.placeholder(shape=[None, len(ydata[0])], dtype=tf.float32)
# 实现动态命名变量
VAR_NAME = locals()
for jj in range(hiddenlayers + 1):
VAR_NAME['weight%s' % jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,\
name='weight%s' % jj) / np.sqrt(Unit_Layers[jj]) # sigmoid tanh
# VAR_NAME['weight%s'%jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,name='weight%s' % jj) \/ np.sqrt(Unit_Layers[jj] / 2) # relu
VAR_NAME['bias%s' % jj] = tf.Variable(tf.random_normal([Unit_Layers[jj + 1]], stddev=10, name='bias%s' % jj),
dtype=tf.float32)
if jj == 0:
VAR_NAME['ooutda%s' % jj] = activate(x_data, eval('weight%s' % jj), eval('bias%s' % jj), actfunc=activate_func)
else:
VAR_NAME['ooutda%s' % jj] = activate(eval('ooutda%s' % (jj - 1)), eval('weight%s' % jj), \
eval('bias%s' % jj), actfunc=activate_func)
# 均方误差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y_target - eval('ooutda%s' % (hiddenlayers))), reduction_indices=[1]))
# 优化的方法
my_opt = tf.train.AdamOptimizer(learn_rate)
train_step = my_opt.minimize(loss)
# 初始化
init = tf.global_variables_initializer()
# 存储误差的字典
accudict = {}
loss_vec = [] # 训练误差
loss_pre = [] # 验证数据误差
accunum = np.inf
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init)
for i in range(itertimes):
rand_index = np.random.choice(len(xdata), size=batch_size, replace=False)
rand_x = xdata[rand_index]
rand_y = ydata[rand_index]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: xdata, y_target: ydata})
temmp_losspre = sess.run(loss, feed_dict={x_data: prexdata, y_target: preydata})
loss_vec.append(temp_loss)
loss_pre.append(temmp_losspre)
accudict[i] = [temp_loss, temmp_losspre]
# 根据输出的误差,判断训练的情况
if (i + 1) % 20 == 0:
print('Generation: ' + str(i + 1) + '. 归一训练误差:Loss = ' + str(temp_loss) +
'. 归一验证误差:Loss = ' + str(temmp_losspre))
# 提前退出的判断
if temp_loss < break_error: # 根据经验获得此数值, 因为采用的是随机下降,因此误差在前期可能出现浮动
break
# 在所有的循环次数中,找到综合误差最低的一次,保存参数
zongheaccu = 0.01 * temp_loss + 0.99 * temmp_losspre
if zongheaccu < accunum:
accunum = zongheaccu
# 保存模型
saver.save(sess, './pm25', global_step=i) # 注意路径
sign = min(accudict.items(), key=lambda d: 0.01 * d[1][0] + 0.99 * d[1][1])[0]
# 返回训练,验证误差
xunlian_error, adderror = loss_vec[sign], loss_pre[sign]
return sign, hiddenlayers, xunlian_error, adderror
| [
"tensorflow.nn.relu",
"tensorflow.train.Saver",
"tensorflow.nn.tanh",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.random_normal",
"numpy.random.rand",
"tensorflow.train.AdamOptimizer",
"num... | [((904, 978), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, Input_Dimen]', 'dtype': 'tf.float32', 'name': '"""x_data"""'}), "(shape=[None, Input_Dimen], dtype=tf.float32, name='x_data')\n", (918, 978), True, 'import tensorflow as tf\n'), ((2269, 2303), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learn_rate'], {}), '(learn_rate)\n', (2291, 2303), True, 'import tensorflow as tf\n'), ((2369, 2402), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2400, 2402), True, 'import tensorflow as tf\n'), ((211, 242), 'tensorflow.matmul', 'tf.matmul', (['input_layer', 'weights'], {}), '(input_layer, weights)\n', (220, 242), True, 'import tensorflow as tf\n'), ((295, 312), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (305, 312), True, 'import tensorflow as tf\n'), ((2531, 2543), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2541, 2543), True, 'import tensorflow as tf\n'), ((2570, 2586), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2584, 2586), True, 'import tensorflow as tf\n'), ((358, 375), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['layer'], {}), '(layer)\n', (368, 375), True, 'import tensorflow as tf\n'), ((1337, 1361), 'numpy.sqrt', 'np.sqrt', (['Unit_Layers[jj]'], {}), '(Unit_Layers[jj])\n', (1344, 1361), True, 'import numpy as np\n'), ((1608, 1678), 'tensorflow.random_normal', 'tf.random_normal', (['[Unit_Layers[jj + 1]]'], {'stddev': '(10)', 'name': "('bias%s' % jj)"}), "([Unit_Layers[jj + 1]], stddev=10, name='bias%s' % jj)\n", (1624, 1678), True, 'import tensorflow as tf\n'), ((424, 444), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['layer'], {}), '(layer)\n', (437, 444), True, 'import tensorflow as tf\n'), ((1191, 1243), 'numpy.random.rand', 'np.random.rand', (['Unit_Layers[jj]', 'Unit_Layers[jj + 1]'], {}), '(Unit_Layers[jj], Unit_Layers[jj + 1])\n', (1205, 1243), True, 'import numpy as np\n')] |
# formulas for this file from:
# http://www.easyrgb.com/index.php?X=MATH
import numpy as np
import contextlib
import math
# Converts sRGB color to CIE's XYZ.
# defaults to white, in order to quickly assert white point in CIE-L*a*b*.
def rgb2xyz(rgb = [255, 255, 255]):
# normalize gamut
rgb = np.array(rgb) / 255
xyz = np.zeros(3)
# offset calc.
# linear div. for very small color values.
for i, c in enumerate(rgb):
if c > 0.04045:
rgb[i] = ((c + 0.055) / 1.055)**2.4
else:
rgb[i] = c / 12.92
rgb = np.array(rgb) * 100
# resultin XYZ color coords.
# observer: 2°, illuminant: D65
xyz[0] = rgb[0] * 0.4124 + rgb[1] * 0.3576 + rgb[2] * 0.1805
xyz[1] = rgb[0] * 0.2126 + rgb[1] * 0.7152 + rgb[2] * 0.0722
xyz[2] = rgb[0] * 0.0193 + rgb[1] * 0.1192 + rgb[2] * 0.9505
return xyz
# TODO implement
def xyz2rgb(xyz = rgb2xyz()):
return false
# convert CIE's XYZ to L*a*b*.
# defaults to white.
def xyz2lab(xyz = rgb2xyz()):
lab = np.zeros(3)
# observer = 2°, illuminant = D65
xyz[0] = xyz[0] / 95.047
xyz[1] = xyz[1] / 100.000
xyz[2] = xyz[2] / 108.883
# offset calc.
# linear div. for very small color values.
for i, c in enumerate(xyz):
if c > 0.008856:
xyz[i] = c**(1/3) # third root
else:
xyz[i] = (c * 7.787) + (16/116)
lab[0] = (116 * xyz[1]) - 16
lab[1] = 500 * (xyz[0] - xyz[1])
lab[2] = 200 * (xyz[1] - xyz[2])
return lab
# TODO implement
def lab2xyz(lab = rgb2lab()):
return false
def rgb2lab(rgb = [255, 255, 255]):
return xyz2lab(rgb2xyz(rgb))
def lab2rgb(lab = rgb2lab()):
return xyz2rgb(lab2xyz(lab))
if __name__ == "__main__":
print("white 0xFFFFFF")
print(np.vectorize("%.3f".__mod__)(rgb2xyz()))
print(np.vectorize("%.3f".__mod__)(rgb2lab()))
print("red 0xFF000")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([255, 0, 0])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([255, 0, 0])))
print("green 0x00FF00")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([0, 255, 0])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([0, 255, 0])))
print("blue 0x0000FF")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([0, 0, 255])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([0, 0, 255])))
print("yellow 0xFFFF00")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([255, 255, 0])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([255, 255, 0])))
print("magenta 0xFF00FF")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([255, 0, 255])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([255, 0, 255])))
print("cyan 0x00FFFF")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([0, 255, 255])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([0, 255, 255])))
print("black 0x000000")
print(np.vectorize("%.3f".__mod__)(rgb2xyz([0, 0, 0])))
print(np.vectorize("%.3f".__mod__)(rgb2lab([0, 0, 0])))
| [
"numpy.array",
"numpy.zeros",
"numpy.vectorize"
] | [((333, 344), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (341, 344), True, 'import numpy as np\n'), ((1033, 1044), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1041, 1044), True, 'import numpy as np\n'), ((303, 316), 'numpy.array', 'np.array', (['rgb'], {}), '(rgb)\n', (311, 316), True, 'import numpy as np\n'), ((572, 585), 'numpy.array', 'np.array', (['rgb'], {}), '(rgb)\n', (580, 585), True, 'import numpy as np\n'), ((1791, 1819), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (1803, 1819), True, 'import numpy as np\n'), ((1842, 1870), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (1854, 1870), True, 'import numpy as np\n'), ((1919, 1947), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (1931, 1947), True, 'import numpy as np\n'), ((1981, 2009), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (1993, 2009), True, 'import numpy as np\n'), ((2072, 2100), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2084, 2100), True, 'import numpy as np\n'), ((2134, 2162), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2146, 2162), True, 'import numpy as np\n'), ((2224, 2252), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2236, 2252), True, 'import numpy as np\n'), ((2286, 2314), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2298, 2314), True, 'import numpy as np\n'), ((2378, 2406), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2390, 2406), True, 'import numpy as np\n'), ((2442, 2470), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2454, 2470), True, 'import numpy as np\n'), ((2541, 2569), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2553, 2569), True, 'import numpy as np\n'), ((2605, 2633), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2617, 2633), True, 'import numpy as np\n'), ((2701, 2729), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2713, 2729), True, 'import numpy as np\n'), ((2765, 2793), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2777, 2793), True, 'import numpy as np\n'), ((2858, 2886), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2870, 2886), True, 'import numpy as np\n'), ((2918, 2946), 'numpy.vectorize', 'np.vectorize', (['"""%.3f""".__mod__'], {}), "('%.3f'.__mod__)\n", (2930, 2946), True, 'import numpy as np\n')] |
"""infinite_jukebox.py - (c) 2017 - <NAME> - <EMAIL>
An attempt to re-create the amazing Infinite Jukebox (http://www.infinitejuke.com)
created by <NAME> of Echo Nest. Uses the Remixatron module to do most of the
work.
"""
import argparse
import curses
import curses.textpad
import numpy as np
import os
import pygame
import pygame.event
import pygame.locals
import signal
import soundfile as sf
import sys
import time
from Remixatron import InfiniteJukebox
from pygame import mixer
SOUND_FINISHED = pygame.locals.USEREVENT + 1
def process_args():
""" Process the command line args """
description = """Creates an infinite remix of an audio file by finding musically similar beats and computing a randomized play path through them. The default choices should be suitable for a variety of musical styles. This work is inspired by the Infinite Jukebox (http://www.infinitejuke.com) project creaeted by <NAME> (<EMAIL>)"""
epilog = """
"""
parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("filename", type=str,
help="the name of the audio file to play. Most common audio types should work. (mp3, wav, ogg, etc..)")
parser.add_argument("-clusters", metavar='N', type=int, default=0,
help="set the number of clusters into which we want to bucket the audio. Default: 0 (automatically try to find the optimal cluster value.)")
parser.add_argument("-start", metavar='start_beat', type=int, default=1,
help="start on a specific beat. Default: 1")
parser.add_argument("-save", metavar='label', type=str,
help="Save the remix to a file, rather than play it. Will create file named [label].wav")
parser.add_argument("-duration", metavar='seconds', type=int, default=180,
help="length (in seconds) to save. Must use with -save. Default: 180")
parser.add_argument("-verbose", action='store_true',
help="print extra info about the track and play vector")
parser.add_argument("-use_v1", action='store_true',
help="use the original auto clustering algorithm instead of the new one. -clusters must not be set.")
return parser.parse_args()
def MyCallback(pct_complete, message):
""" The callback function that gets status updates. Just prints a low-fi progress bar and reflects
the status message passed in.
Example: [###### ] Doing some thing...
"""
progress_bar = " [" + "".ljust(int(pct_complete * 10),'#') + "".ljust(10 - int(pct_complete * 10), ' ') + "] "
log_line = progress_bar + message
window.clear()
window.addstr(1,0,log_line)
window.refresh()
def display_playback_progress(v):
"""
Displays a super low-fi playback progress map
See README.md for details..
Returns the time this function took so we can deduct it from the
sleep time for the beat
"""
time_start = time.time()
term_width = curses.tigetnum('cols')
y_offset = 11
beat = v['beat']
min_sequence = v['seq_len']
current_sequence = v['seq_pos']
# compute a segment map and display it. See README.md for an
# explanation of segment maps and cluster maps.
segment_map = ''
segment_chars = '#-'
for b in jukebox.beats:
segment_map += segment_chars[ b['segment'] % 2 ]
window.addstr(y_offset,0,segment_map + " ")
# highlight all the jump candidates in the segment
# map
for c in jukebox.beats[beat]['jump_candidates']:
b = jukebox.beats[c]
window.addch(y_offset + int(b['id'] / term_width), # y position of character
b['id'] % term_width, # x position of character
ord(segment_chars[b['segment'] %2]), # either '#' or '-' depending on the segment
curses.A_REVERSE) # print in reverse highlight
# print the position tracker on the segment map
x_pos = beat % term_width
y_pos = int(beat/term_width) + y_offset
beats_until_jump = min_sequence - current_sequence
buj_disp = ''
# show the beats until the next jump. If the value == 0 then
# then sequence wanted to jump but couldn't find a suitable
# target. Display an appropriate symbol for that (a frowny face, of course!)
if beats_until_jump > 0:
buj_disp = str(beats_until_jump).zfill(2)
else:
buj_disp = ':('
window.addstr(y_pos, x_pos, buj_disp, curses.A_BOLD | curses.A_REVERSE | curses.A_STANDOUT )
window.refresh()
time_finish = time.time()
return time_finish - time_start
def get_verbose_info():
"""Show statistics about the song and the analysis"""
info = """
filename: %s
duration: %02d:%02d:%02d
beats: %d
tempo: %d bpm
clusters: %d
segments: %d
samplerate: %d
"""
(minutes,seconds) = divmod(round(jukebox.duration),60)
(hours, minutes) = divmod(minutes, 60)
verbose_info = info % (os.path.basename(args.filename), hours, minutes, seconds,
len(jukebox.beats), int(round(jukebox.tempo)), jukebox.clusters, jukebox.segments,
jukebox.sample_rate)
segment_map = ''
cluster_map = ''
segment_chars = '#-'
cluster_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890-=,.<>/?;:!@#$%^&*()_+'
for b in jukebox.beats:
segment_map += segment_chars[ b['segment'] % 2 ]
cluster_map += cluster_chars[ b['cluster'] ]
verbose_info += "\n" + segment_map + "\n\n"
if args.verbose:
verbose_info += cluster_map + "\n\n"
verbose_info += jukebox._extra_diag
return verbose_info
def get_window_contents():
"""Dump the contents of the current curses window."""
tbox = curses.textpad.Textbox(window)
tbox.stripspaces = False
w_str = tbox.gather()
return w_str
def cleanup():
"""Cleanup before exiting"""
if not window:
return
w_str = get_window_contents()
curses.curs_set(1)
curses.endwin()
print(w_str.rstrip())
print
mixer.quit()
def graceful_exit(signum, frame):
"""Catch SIGINT gracefully"""
# restore the original signal handler as otherwise evil things will happen
# in raw_input when CTRL+C is pressed, and our signal handler is not re-entrant
signal.signal(signal.SIGINT, original_sigint)
cleanup()
sys.exit(0)
def save_to_file(jukebox, label, duration):
''' Save a fixed length of audio to disk. '''
avg_beat_duration = 60 / jukebox.tempo
num_beats_to_save = int(duration / avg_beat_duration)
# this list comprehension returns all the 'buffer' arrays from the beats
# associated with the [0..num_beats_to_save] entries in the play vector
main_bytes = [jukebox.beats[v['beat']]['buffer'] for v in jukebox.play_vector[0:num_beats_to_save]]
# main_bytes is an array of byte[] arrays. We need to flatten it to just a
# regular byte[]
output_bytes = np.concatenate( main_bytes )
# write out the wav file
sf.write(label + '.wav', output_bytes, jukebox.sample_rate, format='WAV', subtype='PCM_24')
if __name__ == "__main__":
# store the original SIGINT handler and install a new handler
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, graceful_exit)
#
# Main program logic
#
window = None
args = process_args()
curses.setupterm()
window = curses.initscr()
curses.curs_set(0)
# do the clustering. Run synchronously. Post status messages to MyCallback()
jukebox = InfiniteJukebox(filename=args.filename, start_beat=args.start, clusters=args.clusters,
progress_callback=MyCallback, do_async=False, use_v1=args.use_v1)
# show more info about what was found
window.addstr(2,0, get_verbose_info())
window.refresh()
# if we're just saving the remix to a file, then just
# find the necessarry beats and do that
if args.save:
save_to_file(jukebox, args.save, args.duration)
graceful_exit(0, 0)
# it's important to make sure the mixer is setup with the
# same sample rate as the audio. Otherwise the playback will
# sound too slow/fast/awful
mixer.init(frequency=jukebox.sample_rate)
channel = mixer.Channel(0)
# pygame's event handling functions won't work unless the
# display module has been initialized -- even though we
# won't be making any display calls.
pygame.display.init()
# register the event type we want fired when a sound buffer
# finishes playing
channel.set_endevent(SOUND_FINISHED)
# queue and start playing the first event in the play vector. This is basic
# audio double buffering that will reduce choppy audio from impercise timings. The
# goal is to always have one beat in queue to play as soon as the last one is done.
beat_to_play = jukebox.beats[ jukebox.play_vector[0]['beat'] ]
snd = mixer.Sound(buffer=beat_to_play['buffer'])
channel.queue(snd)
display_playback_progress(jukebox.play_vector[0])
# go through the rest of the playback list, start playing each beat, display
# the progress and wait for the playback to complete. Playback happens on another
# thread in the pygame library, so we have to wait to be signaled to queue another
# event.
for v in jukebox.play_vector[1:]:
beat_to_play = jukebox.beats[ v['beat'] ]
snd = mixer.Sound(buffer=beat_to_play['buffer'])
channel.queue(snd)
pygame.event.wait()
display_playback_progress(v)
| [
"pygame.event.wait",
"argparse.ArgumentParser",
"pygame.mixer.init",
"curses.endwin",
"curses.textpad.Textbox",
"curses.initscr",
"curses.tigetnum",
"soundfile.write",
"curses.curs_set",
"pygame.mixer.Sound",
"os.path.basename",
"signal.getsignal",
"signal.signal",
"pygame.mixer.quit",
"... | [((976, 1097), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'epilog': 'epilog', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=description, epilog=epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n', (999, 1097), False, 'import argparse\n'), ((3094, 3105), 'time.time', 'time.time', ([], {}), '()\n', (3103, 3105), False, 'import time\n'), ((3124, 3147), 'curses.tigetnum', 'curses.tigetnum', (['"""cols"""'], {}), "('cols')\n", (3139, 3147), False, 'import curses\n'), ((4745, 4756), 'time.time', 'time.time', ([], {}), '()\n', (4754, 4756), False, 'import time\n'), ((5979, 6009), 'curses.textpad.Textbox', 'curses.textpad.Textbox', (['window'], {}), '(window)\n', (6001, 6009), False, 'import curses\n'), ((6207, 6225), 'curses.curs_set', 'curses.curs_set', (['(1)'], {}), '(1)\n', (6222, 6225), False, 'import curses\n'), ((6230, 6245), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (6243, 6245), False, 'import curses\n'), ((6288, 6300), 'pygame.mixer.quit', 'mixer.quit', ([], {}), '()\n', (6298, 6300), False, 'from pygame import mixer\n'), ((6539, 6584), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'original_sigint'], {}), '(signal.SIGINT, original_sigint)\n', (6552, 6584), False, 'import signal\n'), ((6604, 6615), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6612, 6615), False, 'import sys\n'), ((7193, 7219), 'numpy.concatenate', 'np.concatenate', (['main_bytes'], {}), '(main_bytes)\n', (7207, 7219), True, 'import numpy as np\n'), ((7256, 7351), 'soundfile.write', 'sf.write', (["(label + '.wav')", 'output_bytes', 'jukebox.sample_rate'], {'format': '"""WAV"""', 'subtype': '"""PCM_24"""'}), "(label + '.wav', output_bytes, jukebox.sample_rate, format='WAV',\n subtype='PCM_24')\n", (7264, 7351), True, 'import soundfile as sf\n'), ((7466, 7497), 'signal.getsignal', 'signal.getsignal', (['signal.SIGINT'], {}), '(signal.SIGINT)\n', (7482, 7497), False, 'import signal\n'), ((7502, 7545), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'graceful_exit'], {}), '(signal.SIGINT, graceful_exit)\n', (7515, 7545), False, 'import signal\n'), ((7635, 7653), 'curses.setupterm', 'curses.setupterm', ([], {}), '()\n', (7651, 7653), False, 'import curses\n'), ((7668, 7684), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (7682, 7684), False, 'import curses\n'), ((7689, 7707), 'curses.curs_set', 'curses.curs_set', (['(0)'], {}), '(0)\n', (7704, 7707), False, 'import curses\n'), ((7804, 7966), 'Remixatron.InfiniteJukebox', 'InfiniteJukebox', ([], {'filename': 'args.filename', 'start_beat': 'args.start', 'clusters': 'args.clusters', 'progress_callback': 'MyCallback', 'do_async': '(False)', 'use_v1': 'args.use_v1'}), '(filename=args.filename, start_beat=args.start, clusters=\n args.clusters, progress_callback=MyCallback, do_async=False, use_v1=\n args.use_v1)\n', (7819, 7966), False, 'from Remixatron import InfiniteJukebox\n'), ((8467, 8508), 'pygame.mixer.init', 'mixer.init', ([], {'frequency': 'jukebox.sample_rate'}), '(frequency=jukebox.sample_rate)\n', (8477, 8508), False, 'from pygame import mixer\n'), ((8523, 8539), 'pygame.mixer.Channel', 'mixer.Channel', (['(0)'], {}), '(0)\n', (8536, 8539), False, 'from pygame import mixer\n'), ((8709, 8730), 'pygame.display.init', 'pygame.display.init', ([], {}), '()\n', (8728, 8730), False, 'import pygame\n'), ((9196, 9238), 'pygame.mixer.Sound', 'mixer.Sound', ([], {'buffer': "beat_to_play['buffer']"}), "(buffer=beat_to_play['buffer'])\n", (9207, 9238), False, 'from pygame import mixer\n'), ((9691, 9733), 'pygame.mixer.Sound', 'mixer.Sound', ([], {'buffer': "beat_to_play['buffer']"}), "(buffer=beat_to_play['buffer'])\n", (9702, 9733), False, 'from pygame import mixer\n'), ((9770, 9789), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (9787, 9789), False, 'import pygame\n'), ((5168, 5199), 'os.path.basename', 'os.path.basename', (['args.filename'], {}), '(args.filename)\n', (5184, 5199), False, 'import os\n')] |
import matplotlib.figure as mf
import matplotlib.ticker as ticker
import numpy as np
import scipy.interpolate as ip
_GREY400 = "#BDBDBD"
_LSTYLES = ("-", "--", "-.", ":")
_RED100 = "#FFCDD2"
_RED300 = "#E57373"
_RED700 = "#D32F2F"
def profile(name, data_exp, data_fit):
fig, ax1, ax2 = _create_fig(name)
_plot_fit(data_fit, ax2)
if data_exp.size:
_plot_exp(data_exp, data_fit, ax1, ax2)
for axis in (ax1, ax2):
axis.axhline(0, color="k", linewidth=0.5, zorder=1)
return fig
def _create_fig(name):
fig = mf.Figure()
ax1, ax2 = fig.subplots(2, 1, sharex="all", gridspec_kw={"height_ratios": [1, 4]})
fig.align_labels()
fig.suptitle(f"{str(name).upper()}")
ax1.set_ylabel("Residuals")
ax1.ticklabel_format(style="sci", scilimits=(0, 0), axis="y", useMathText=True)
return fig, ax1, ax2
def _plot_fit(data_fit, ax2):
xname, yname, *_ = data_fit.dtype.names
fit_x, fit_y = data_fit[xname], data_fit[yname]
range_x = get_range(fit_x, 0.02)
ax2.set_xlim(*range_x)
ax2.plot(fit_x, fit_y, linestyle="-", color=_RED300)
def _plot_exp(data_exp, data_fit, ax1, ax2):
xname, yname, ename, *_ = data_exp.dtype.names
exp_x, exp_y, exp_e = data_exp[xname], data_exp[yname], abs(data_exp[ename])
res_y = _get_residuals(data_exp, data_fit)
m_sel = data_exp.mask
m_inf = exp_e.sum(axis=1) > 1e16
s1 = m_sel & ~m_inf
ax1.errorbar(exp_x[s1], res_y[s1], exp_e[s1].T, fmt=".", color=_RED700, zorder=3)
ax2.errorbar(exp_x[s1], exp_y[s1], exp_e[s1].T, fmt=".", color=_RED700, zorder=3)
range1_y = ax1.get_ylim()
range2_y = ax2.get_ylim()
exp_e[exp_e == np.inf] = 500.0
s2 = m_sel & m_inf
ax1.errorbar(exp_x[s2], res_y[s2], exp_e[s2].T, fmt=".", color=_RED700, zorder=3)
ax2.errorbar(exp_x[s2], exp_y[s2], exp_e[s2].T, fmt=".", color=_RED700, zorder=3)
s3 = ~m_sel
ax1.errorbar(exp_x[s3], res_y[s3], exp_e[s3].T, fmt=".", color=_RED100, zorder=3)
ax2.errorbar(exp_x[s3], exp_y[s3], exp_e[s3].T, fmt=".", color=_RED100, zorder=3)
ax1.set_ylim(range1_y)
ax2.set_ylim(range2_y)
def cpmg(file_pdf, name, data_exp, data_fit):
xname, *_ = data_fit.dtype.names
fig = profile(name, data_exp, data_fit)
ax2 = fig.axes[1]
ax2.set_xlabel(r"$\nu_\mathregular{CPMG}$ (Hz)")
ax2.set_ylabel(r"$R_{2,\mathregular{eff}}$ (s$^{-1}$)")
ax2.set_xlim(0.0, max(data_fit[xname]) + min(data_fit[xname]))
file_pdf.savefig(fig)
def relaxation(file_pdf, name, data_exp, data_fit):
xname, *_ = data_fit.dtype.names
fig = profile(name, data_exp, data_fit)
ax2 = fig.axes[1]
ax2.set_xlabel(r"Time (s)")
ax2.set_ylabel(r"Intensity")
ax2.set_xlim(0.0, max(data_fit[xname]) + min(data_fit[xname]))
file_pdf.savefig(fig)
def cest(file_pdf, name, data_exp, data_fit, cs_values, alias_values):
residuals = _get_residuals(data_exp, data_fit)
sigma = _estimate_sigma(residuals)
fig = profile(name, data_exp, data_fit)
ax1, ax2 = fig.axes
lim = sorted(ax2.get_xlim(), reverse=True)
ax1.set_xlim(lim)
ax2.set_xlim(lim)
ax1.xaxis.set_major_locator(ticker.MaxNLocator(6))
ax2.xaxis.set_major_locator(ticker.MaxNLocator(6))
ax2.set_xlabel(r"$B_1$ position (ppm)")
ax2.set_ylabel(r"$I/I_0$")
kwargs1 = {"facecolor": (0, 0, 0, 0.1), "edgecolor": "none"}
ax1.fill_between(ax1.get_xlim(), -1.0 * sigma, 1.0 * sigma, **kwargs1)
ax1.fill_between(ax1.get_xlim(), -2.0 * sigma, 2.0 * sigma, **kwargs1)
kwargs2 = {"color": _GREY400, "linewidth": 0.75, "zorder": -1}
for a_cs, alias, lstyle in zip(cs_values, alias_values, _LSTYLES):
ax1.axvline(a_cs, linestyle=lstyle, **kwargs2)
ax2.axvline(a_cs, linestyle=lstyle, **kwargs2)
if alias:
x, _ = ax2.transLimits.transform((a_cs, 0))
ax2.text(x - 0.02, 0.95, "*", transform=ax2.transAxes)
file_pdf.savefig(fig)
def shift(name_pdf, name, fit, exp, err):
fig = mf.Figure()
ax = fig.subplots(1, 1)
fig.align_labels()
ax.errorbar(fit, exp, yerr=err, fmt=".", color=_RED700)
val_min = min(ax.get_xlim()[0], ax.get_ylim()[0])
val_max = max(ax.get_xlim()[1], ax.get_ylim()[1])
ax.set_aspect("equal", "box")
ax.plot([val_min, val_max], [val_min, val_max], color="k", linewidth=0.5, zorder=1)
ax.set_xlabel(r"$Δδ_\mathregular{fit}$ (ppb)")
ax.set_ylabel(r"$Δδ_\mathregular{exp}$ (ppb)")
fig.savefig(name_pdf)
def _get_residuals(data_exp, data_fit):
xname, yname, *_ = data_exp.dtype.names
data_fit_ = np.unique(np.sort(data_fit, order=xname))
data_fit_f = ip.interp1d(data_fit_[xname], data_fit_[yname])
return data_exp[yname] - data_fit_f(data_exp[xname])
def _estimate_sigma(values):
"""Estimates standard deviation using median to exclude outliers.
Up to 50% can be bad.
Reference:
Rousseeuw, Peter & Croux, Christophe. (1993). Alternatives to Median Absolute
Deviation. Journal of the American Statistical Association. 88. 1273 - 1283.
10.1080/01621459.1993.10476408.
"""
if not all(values):
return 0.0
_values = values.reshape(1, -1)
return 1.1926 * np.median(np.median(abs(_values - _values.T), axis=0))
def get_range(values, extension=0.0):
value_min = np.min(values)
value_max = np.max(values)
extra = (value_max - value_min) * extension
return value_min - extra, value_max + extra
def write_plots(experiments, params, path, simulation=False):
"""Plot the experimental and fitted data."""
print("Plotting data...")
path_ = path / "Plots"
path_.mkdir(parents=True, exist_ok=True)
try:
experiments.plot(path=path_, params=params, simulation=simulation)
except KeyboardInterrupt:
print(" - Plotting cancelled\n")
print("")
| [
"matplotlib.ticker.MaxNLocator",
"numpy.sort",
"matplotlib.figure.Figure",
"numpy.max",
"numpy.min",
"scipy.interpolate.interp1d"
] | [((553, 564), 'matplotlib.figure.Figure', 'mf.Figure', ([], {}), '()\n', (562, 564), True, 'import matplotlib.figure as mf\n'), ((3993, 4004), 'matplotlib.figure.Figure', 'mf.Figure', ([], {}), '()\n', (4002, 4004), True, 'import matplotlib.figure as mf\n'), ((4635, 4682), 'scipy.interpolate.interp1d', 'ip.interp1d', (['data_fit_[xname]', 'data_fit_[yname]'], {}), '(data_fit_[xname], data_fit_[yname])\n', (4646, 4682), True, 'import scipy.interpolate as ip\n'), ((5302, 5316), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (5308, 5316), True, 'import numpy as np\n'), ((5333, 5347), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (5339, 5347), True, 'import numpy as np\n'), ((3156, 3177), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', (['(6)'], {}), '(6)\n', (3174, 3177), True, 'import matplotlib.ticker as ticker\n'), ((3211, 3232), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', (['(6)'], {}), '(6)\n', (3229, 3232), True, 'import matplotlib.ticker as ticker\n'), ((4586, 4616), 'numpy.sort', 'np.sort', (['data_fit'], {'order': 'xname'}), '(data_fit, order=xname)\n', (4593, 4616), True, 'import numpy as np\n')] |
import argparse
from abc import ABC
from pathlib import Path
import numpy as np
import epoch_counter
from main import add_arguments
from qlearning.network import TeacherAgent
from qlearning.teacher import TeacherEnv
from trainer import Trainer
class EpochCounter(epoch_counter.EpochCounter):
infos_name = "infos"
def __init__(self, num_processes):
super().__init__(num_processes)
keys = ["our_return", "base_return"]
self.info_lists = {k: [[] for _ in range(num_processes)] for k in keys}
self.episode_lists = {k: [None for _ in range(num_processes)] for k in keys}
def update(self, reward, done, infos):
for k, lists in self.info_lists.items():
for i, (info, d) in enumerate(zip(infos, done)):
if k in info:
lists[i].append(info[k])
if d:
self.episode_lists[k][i] = lists[i]
lists[i] = []
return super().update(reward, done, infos)
def items(self, prefix=""):
episode_lists = {
k: [x for x in v if x is not None] for k, v in self.episode_lists.items()
}
yield prefix + EpochCounter.infos_name, episode_lists
yield from super().items(prefix)
def main(training_iterations, alpha, q_learning_gamma, **kwargs):
class TeacherTrainer(Trainer, ABC):
def step(self):
result = super().step()
for prefix in ("", "eval_"):
name = prefix + EpochCounter.infos_name
for k, v in result.pop(name).items():
path = Path(self.logdir, f"{prefix}{k}")
np.save(str(path), np.array(v))
return result
def make_env(self, env_id, seed, rank, evaluation):
return TeacherEnv(
seed=seed + rank,
training_iterations=training_iterations,
env_id=env_id,
alpha=alpha,
gamma=q_learning_gamma,
)
@staticmethod
def build_agent(envs, **agent_args):
return TeacherAgent(envs.observation_space, envs.action_space, **agent_args)
@classmethod
def build_epoch_counter(cls, num_processes):
return EpochCounter(num_processes)
kwargs.update(recurrent=True)
TeacherTrainer.main(**kwargs)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--training-iterations", "-T", type=int)
PARSER.add_argument("--alpha", "-a", type=float)
PARSER.add_argument("--q-learning-gamma", "-qg", type=float)
add_arguments(PARSER)
main(**vars(PARSER.parse_args()))
| [
"argparse.ArgumentParser",
"qlearning.network.TeacherAgent",
"qlearning.teacher.TeacherEnv",
"pathlib.Path",
"numpy.array",
"main.add_arguments"
] | [((2409, 2434), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2432, 2434), False, 'import argparse\n'), ((2622, 2643), 'main.add_arguments', 'add_arguments', (['PARSER'], {}), '(PARSER)\n', (2635, 2643), False, 'from main import add_arguments\n'), ((1802, 1927), 'qlearning.teacher.TeacherEnv', 'TeacherEnv', ([], {'seed': '(seed + rank)', 'training_iterations': 'training_iterations', 'env_id': 'env_id', 'alpha': 'alpha', 'gamma': 'q_learning_gamma'}), '(seed=seed + rank, training_iterations=training_iterations,\n env_id=env_id, alpha=alpha, gamma=q_learning_gamma)\n', (1812, 1927), False, 'from qlearning.teacher import TeacherEnv\n'), ((2106, 2175), 'qlearning.network.TeacherAgent', 'TeacherAgent', (['envs.observation_space', 'envs.action_space'], {}), '(envs.observation_space, envs.action_space, **agent_args)\n', (2118, 2175), False, 'from qlearning.network import TeacherAgent\n'), ((1609, 1642), 'pathlib.Path', 'Path', (['self.logdir', 'f"""{prefix}{k}"""'], {}), "(self.logdir, f'{prefix}{k}')\n", (1613, 1642), False, 'from pathlib import Path\n'), ((1682, 1693), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1690, 1693), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Experiments @Fischer in Montebelluna 28.02.20
# We had the oppurtunity to use the Flexometer for ski boots of Fischer with their help at Montebelluna. The idea is to validate our system acquiring simultaneously data by our sensor setup and the one from their machine. With the machine of Fischer it's possible to apply exact loads.
# We used booth our sensorized ski boots (Dynafit Hoji Pro Tour W and Dynafit TLT Speedfit). The Hoji we already used in the past for our experiments in the lab @Bz with our selfbuild experiment test bench. For the TLT Speedfit this was the first experiment.
#
# Strain gauge setup:
# - Dynafit Hoji Pro Tour: 4 pairs of strain gauges 1-4 (a=0°, b=90°)
# - Dynafit TLT Speedfit: 4 triples of strain gauges 1-4 (a=0°,b=45°,c=90°)
# As we had only a restricted time, we tested all 4 strain gauges pairs of the Hoji and only strain gauge triple 3 for TLT Speedfit. For the first time the new prototype of datalogger was running in an experiment. In addition also the first time in battery mode and not at room temperature. Unfortunately the connection of the strains to the logging system was not the best as in battery mode we don't have any possibility to control the connection to the channels yet. We'll get a solution for this the next days.
#
# Experiments (ambient temperature: 4°C):
# - #1: Hoji Pro Tour, 4a&b
# - #2: Hoji Pro Tour, 3a&b
# - #3: Hoji Pro Tour, 2a&b
# - #4: Hoji Pro Tour, 1a&b
# - #5: TLT Speedfit, 3a&b&c
#
# ATTENTION: The Hoji boot was not closed as much as the TLT. Take in consideration this when looking at force/angular displacement graph.
# In[50]:
# Importing libraries
import pandas as pd
import numpy as np
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import csv
import matplotlib.patches as mpatches #needed for plot legend
from matplotlib.pyplot import *
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')
# # Machine Data: load and plot
# The boot was loaded cyclical by the machine with a maximum of F = 150N. In each single experiment 1-5 we exported the data of the last 5 cycles.
#
# In[51]:
#Loading data in df[expnr]: exprnr-> experiment 1-5 with cycle 1-5
expnr=5 #number of exp
cyclenr = 5 #number of cycle per experiment
colnr = 2*cyclenr #
dfm={}
for expnr in range(expnr):
d = {}
for i in range(cyclenr): #load data from cycle 1-5
d[expnr,i] = pd.DataFrame()
d[expnr,i] = pd.read_csv('ESP'+ str(expnr+1) + 'ciclo'+ str(i+1) +'.csv', sep='\t',header=None)
dfm[expnr]=pd.concat([d[expnr,0], d[expnr,1], d[expnr,2], d[expnr,3], d[expnr,4]], axis=1, join='inner')
dfm[expnr] = np.array(dfm[expnr]) #transform in np.array
for i in range(len(dfm[expnr])): #replace , with . and change format to float
for j in range(colnr):
dfm[expnr][i,j]=float(dfm[expnr][i,j].replace(',', '.'))
#print(dfm[1][:,0])
# In[52]:
figm, axm = plt.subplots(5, 5, figsize=(13, 11), sharex='col') #define plot settings
col_title = ['Experiment {}'.format(col) for col in range(1, 5)]
for i in range(expnr+1):
for j in range(cyclenr):
axm[j,i].plot(dfm[i][:,2*j+1],dfm[i][:,2*j])
axm[0,i].set_title('Experiment '+ str(i+1))
axm[j,0].set(ylabel='F[N] Cycle'+ str(j+1))
axm[4,i].set(xlabel='angle [°]')
plt.tight_layout()
figm.suptitle('Machine Data Plot (Hoji Pro Tour: 1-4, TLT Speedfit: 5)',fontsize=16)
figm.subplots_adjust(top=0.88)
# On the x-axis the force F is shown (max 150N) and on the y-axis the displacement angle alpha.
# In the plot above the columns are showing the experiment and the rows the single cycles. The cycles within the same experiment are quite similar (qualitative). It's cool how clear is the difference between the two different ski boot models we used. Experiment 1-4 is showing Dynafit Hoji Pro Tour and experiment 5 the Dynafit TLT Speedfit.
# # Calculate surface under curve
# To compare the energy release between Hoji and TLT we are going to calculate the surface in the closed curve.
# We can calculate an area under a curve (curve to x-axis) by integration (E = \int{M dphi}). Via interpolation of extracted points on the curve we generate a function which is integrated afterwards by trapezian rule to get the surface. By subtracting the surface of unloading from the one of loading the area between can be calculated, which corresponds the energy release.
# In[53]:
from scipy.interpolate import interp1d
from numpy import trapz
# Experiment data
x1=dfm[1][:,1] # Exp1 cycle 1 Hoji
y1=dfm[1][:,0] # Exp1 cycle 1 Hoji
x2=dfm[4][:,1] # Exp5 cycle 1 Hoji
y2=dfm[4][:,0] # Exp5 cycle 1 Hoji
ym1=np.array([-29,17,41.14,63,96,147.8]) # x points loading Hoji
xm1=np.array([-1.5,2.9,7.312,11,13.7,13.94]) # y points loading Hoji
ym2=np.array([-29,3.741,25,43.08,63,72,106,147.8]) # x points unloading Hoji
xm2=np.array([-1.5,-0.646,1.2,3.127,6.6,8.37,13.28,13.94]) # y points unloading Hoji
ym3=np.array([-28.5,-12.27,4.841,18.01,31.92,39.46,87.48,145.6]) # x points loading TLT
xm3=np.array([-2.752,-0.989,1.022,3.23,5.387,6.012,6.521,6.915]) # y point loading TLT
ym4=np.array([-28.5,2.042,26.35,41.36,51.86,56.33,93.87,145.6]) # x points unloading TLT
xm4=np.array([-2.752,-1.94,-0.43,1.524,3.76,5.625,6.24,6.915]) # y points unloading TLt
# Interpolation
f1 = interp1d(xm1, ym1)
f2 = interp1d(xm2, ym2)
f3 = interp1d(xm3, ym3)
f4 = interp1d(xm4, ym4)
# Plot of original data and interpolation
fig0, ax0 = plt.subplots(1, 2, figsize=(15, 8))
fig0.suptitle('Ski boot testing machine', fontsize=16)
#fig0.suptitle('Interpolation of experiment data 1&5 cycle 1 (left: Hoji, right: TLT)', fontsize=16)
ax0[0].plot(x1,y1) # loading Hoji
ax0[0].set_title('Hoji Pro Tour W')
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
#ax0[0].plot(x1,y1,xm1,ym1, 'o', xm1, f1(xm1), '-') # loading Hoji
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
ax0[0].set(xlabel='angle [°]')
ax0[0].set(ylabel='Force [N]')
ax0[1].plot(x2,y2) # loading Hoji
ax0[1].set_title('TLT Speedfit')
#ax0[1].plot(x2,y2,xm3,ym3, 'o', xm3, f3(xm3), '-') # loading Hoji
#ax0[1].plot(xm4,ym4, 'o', xm4, f4(xm4), '-', xm4, f4(xm4), '--') # unloading Hoji
ax0[1].set(xlabel='angle [°]')
ax0[1].set(ylabel='Force [N]')
plt.show()
# Calculation of area between loading and unloading curve -> Energy
area1_hoji=np.trapz(f1(xm1), xm1)
area2_hoji=np.trapz(f2(xm2), xm2)
area1_TLT=np.trapz(f3(xm3), xm3)
area2_TLT=np.trapz(f4(xm4), xm4)
energy_hoji=abs(area1_hoji-area2_hoji)
energy_TLT=abs(area1_TLT-area2_TLT)
#print('Energy release Hoji = ', energy_hoji, '[J]')
#print('Energy release TLT = ', energy_TLT, '[J]')
# # Bootsensing: load and plot
# We created a datalogger which is saving the experiment data in a .txt file on a SD card. After the experiments we took them from the SD card to our PC.
# <NAME> did an excellent work with his file reader (https://github.com/raphaFanti/multiSensor/blob/master/analysis/03.%20Experiments_200220/Analysis%20v02/datanalysis_200220-v02.ipynb) which I'm using here to load this data. I modified the col_names as we used adapted column names the last time and updated the experiment date. He implemented also a good way to store all in a big dataframe. I'll copy also this code from Raphael.
# In[54]:
# transforms a time string into a datetime element
def toDate(timeString):
hh, mm, ss = timeString.split(":")
return datetime.datetime(2020, 2, 28, int(hh), int(mm), int(ss)) # date of experiment: 28.02.20
# returns a dataframe for each sub experient
col_names = ["ID","strain1","strain2","strain3","temp","millis"] # column names from file
cols_ordered = ["time","strain1","strain2","strain3"] # order wished
cols_int = ["strain1","strain2","strain3"] # to be transformed to int columns
def getDf(fl, startTime):
# ! note that we remove the first data line for each measurement since the timestamp remains zero for two first lines
fl.readline() # line removed
line = fl.readline()
lines = []
while "Time" not in line:
cleanLine = line.rstrip()
# trick for int since parsing entire column was not working
intsLine = cleanLine.replace(".00", "")
splitedLine = intsLine.split(",")
lines.append(splitedLine)
line = fl.readline()
# create dataframe
df = pd.DataFrame(lines, columns = col_names)
# create time colum
df["time"] = df["millis"].apply(lambda x: startTime + datetime.timedelta(milliseconds = int(x)))
# drop ID, millis and temperature, and order columns
df = df.drop(["ID", "temp", "millis"], axis = 1)
df = df[cols_ordered]
# adjust types
df[cols_int] = df[cols_int].astype(int)
return df
# Load data to dataframe. As we were not working with our usually experiment protocol, I had to skip phase = bs2.
# In[55]:
filenames = ["2022823_exp1","2022848_exp2","2022857_exp3", "202285_exp4", "2022829_exp5"]
nExp = len(filenames) # we simply calculate the number of experiments
# big data frame
df = pd.DataFrame()
for i, this_file in enumerate(filenames):
# experiment counter
exp = i + 1
# open file
with open(this_file + ".TXT", 'r') as fl:
# throw away first 3 lines and get baseline 1 start time
for i in range(3):
fl.readline()
# get start time for first baseline
bl1_time = fl.readline().replace("BASELINE Time: ", "")
startTime = toDate(bl1_time)
# get data for first baseline
df_bl1 = getDf(fl, startTime)
df_bl1["phase"] = "bl1"
# get start time for experiment
exp_time = fl.readline().replace("RECORDING Time: ", "")
startTime = toDate(exp_time)
# get data for experiment
df_exp = getDf(fl, startTime)
df_exp["phase"] = "exp"
# get start time for second baseline
#bl2_time = fl.readline().replace("BASELINE Time: ", "")
#startTime = toDate(bl2_time)
# get data for second baseline
#df_bl2 = getDf(fl, startTime)
#df_bl2["phase"] = "bl2"
# create full panda
df_exp_full = pd.concat([df_bl1, df_exp])
# create experiment column
df_exp_full["exp"] = exp
df = pd.concat([df, df_exp_full])
# shift columns exp and phase to begining
cols = list(df.columns)
cols = [cols[0]] + [cols[-1]] + [cols[-2]] + cols[1:-2]
df = df[cols]
#print(df)
# In[56]:
def plotExpLines(df, exp):
fig, ax = plt.subplots(3, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment ' + str(exp), fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(dfExp["time"], dfExp["strain3"], 'tab:green')
ax[0].set(ylabel='strain3')
ax[1].plot(dfExp["time"], dfExp["strain1"], 'tab:red')
ax[1].set(ylabel='strain1')
ax[2].plot(dfExp["time"], dfExp["strain2"], 'tab:blue')
ax[2].set(ylabel='strain2')
ax[2].set(xlabel='time [ms]')
plt.show()
# ### Experiment 1
# In[57]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 1]['time'],df[df["exp"] == 1]['strain3'])
plt.xlabel('daytime')
plt.ylabel('4A')
plt.title('Experiment 1: 4A ')
plt.show()
# We applied 34 cycles.
# ### Experiment 2
# In[58]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 2]['time'],df[df["exp"] == 2]['strain3'])
plt.xlabel('daytime')
plt.ylabel('3A')
plt.title('Experiment 2: 3A ')
plt.show()
# # Experiment 3
# In[59]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 3]['time'],df[df["exp"] == 3]['strain3'])
plt.xlabel('daytime')
plt.ylabel('2B')
plt.title('Experiment 3: 2B ')
plt.show()
# ### Experiment 4
# In[60]:
figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 4]['time'],df[df["exp"] == 4]['strain3'])
plt.xlabel('daytime')
plt.ylabel('1A')
plt.title('Experiment 4: 1A ')
plt.show()
# ### Experiment 5
# In[61]:
fig, ax = plt.subplots(2, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment 5: 3B & 3C ', fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain3'], 'tab:green')
ax[0].set(ylabel='3C')
ax[1].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain2'], 'tab:red')
ax[1].set(ylabel='3B')
ax[1].set(xlabel='daytime')
plt.show()
# In[62]:
#dfExp = df[df["exp"] == 3]
#plotExpLines(dfExp, 3)
# # Analysis
# Now we try to compare the data from the Flexometer of Fischer and from our Bootsensing.
# - Fischer: force F over displacement angle alpha
# - Bootsensing: deformation measured by strain gauge (resistance change) in at the moment unknown unit over time (daytime in plot shown)
# The idea now is to identify the last 5 cycles in Bootsensing data automatically and to exstract time information (t0,t). Afterwards this delta t can be applied on Fischers data to plot force F over the extracted time.
# ### Bootsensing: Cycle identification
# For Experiment 1-5 we will identfy the last 5 cycles of strain3. As the data of Fischer starts at a peak (maximum load), we will identify them also in our bootsensing data and extract the last 6 peak indexes. Applying these indices on strain3/time data we get the last 5 cycles.
#
# Find peaks: find_peaks function https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
# Find valley: with Inverse of find peaks
#
#
# In[63]:
from scipy.signal import find_peaks
import numpy as np
# Load data of Experiments 1-5
ds={} # dict for strain data -> dataformat will be changed
dt={} # time data
peaks={} # peaks
valleys={} # valleys
inv_ds={} # inverse for valleys calculation
ds_peaks={} # index of peak (used for 5-2)
ds_peaks_end={} # index of last peaks
ds_valleys_end = {} # index of last valley
ds_valleys={} # index of valley (used for 5-2)
len_valley={} # valley lenght
for i in range(1,6): # i = Experiment number
ds[i]=df[df["exp"] == i]['strain3'] #data for strain3
dt[i]=df[df["exp"] == i]['time'] # time data
ds[i]=ds[i].dropna() # drop NaN
dt[i]=dt[i].dropna()
ds[i]=ds[i].reset_index(drop=True) #reset index
dt[i]=dt[i].reset_index(drop=True)
peaks[i],_=find_peaks(ds[i],prominence=100000) # find peaks
inv_ds[i]=ds[i]*(-1) # inverse of ds
valleys[i],_=find_peaks(inv_ds[i],prominence=10000) # find valleys
for j in range(1,6): # j = cycle number
ds_valleys[j,i]=valleys[i][-1-j:-j] # selecting last 5 valleys
ds_valleys_end[j,i]=valleys[i][-1:] # select last valley
ds_valleys[j,i]=ds_valleys[j,i][0] # assign index
ds_valleys_end[j,i]=ds_valleys_end[j,i][0]
ds_peaks[j,i]=peaks[i][-1-j:-j] # selecting last 5 peaks
ds_peaks_end[j,i]=peaks[i][-1:] # select last peak
ds_peaks[j,i]=ds_peaks[j,i][0] # assign index
ds_peaks_end[j,i]=ds_peaks_end[j,i][0]
#print(ds1[1][ds_valleys[1,1]])
#Calculate cycle lengths
#for i in range(1,6):
#len_valley[e] = dt1[e][ds_valleys[1,1]] - dt1[e][ds_valleys[2,1]] #1th
#len_valley1_2[i] = dt1[ds_valley_3[i]] - dt1[ds_valley_4[i]] #2th
#len_valley2_3[i] = dt1[ds_valley_2[i]] - dt1[ds_valley_3[i]] #3th
#len_valley3_4[i] = dt1[ds_valley_1[i]] - dt1[ds_valley_2[i]] #4th
#len_valley4_5[i] = dt1[ds_valley_last_end[i]] - dt1[ds_valley_1[i]] #5th
# EXPERIMENT 1: pay attention for peaks/valley after cycles
# Now we will plot the data for strain3 for each experiment with their peaks and valleys.
# In[64]:
# Plot peaks and valleys for Exp 1-5 for strain3
fig1, ax1 = plt.subplots(5, 1, figsize=(15, 8))
fig1.subplots_adjust(top=2)
fig1.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
for i in range(5): # i for Experiment number
ax1[i].plot(df[df["exp"] == (i+1)]['time'], df[df["exp"] == (i+1)]['strain3'], 'tab:green')
ax1[i].plot(dt[(i+1)][peaks[(i+1)]],ds[(i+1)][peaks[(i+1)]],"x") #Plot peaks with x
ax1[i].plot(dt[(i+1)][valleys[(i+1)]],ds[(i+1)][valleys[(i+1)]],"o") #Plot valleys with o
ax1[i].set(ylabel='raw signal')
ax1[i].set(xlabel='daytime')
ax1[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig1.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# Plot last 5 cycles for Exp 1-5 for strain3
fig2, ax2 = plt.subplots(5, 1, figsize=(10, 8))
fig2.suptitle('Experiments 1-5: last 5 cycles ', fontsize=16)
for i in range(5): # i for Experiment number
ax2[i].plot(dt[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]],ds[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]]) # select data between 5th last and last valley
#ax2[i].plot(dt[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]],ds[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]])# select data between 5th last and last peak
ax2[i].set(ylabel='raw signal')
ax2[i].set(xlabel='daytime')
ax2[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig2.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
#plt.axvline(x=dt[ds_valley_2_index],color="grey") #time borders 3th cycle
#plt.axvline(x=dt[ds_valley_3_index],color="grey")
#plt.axhline(y=ds[ds_valley_3_index],color="red") # h line
# For Experiment 2-5 the last 5 cycles are clear. The signal of experiment 1 is raising again after the cyclic loading as it's not possible to select the last 5 cycles with this "peaks" method, but happily we can extract still the last cycle.
# As we can see in the plot of the last 5 cycles above, the last cycle for Exp1, Exp3 and Exp5 is ending with a peak where Exp2 and Exp4 is ending with a valley. We can say this from the plots as we know from our exported machine data that a cycle ends always with the maximum force of 150N. This means a valley or peak for our bootsensing system.
# ### Match Fischer Data with Bootsensing cycle time
# Now we are going to match the Bootsensing cycle time with the force data of Fischer for each experiment 1-5. As the machine of Fischer applied the load with a frequency of 0.33 Hz, the cycle length of each cycle should be approximately t=3s. We verified this calculating the length between 2 neighbour valley of our bootsensing data (see code above).
# In[65]:
#Identify frequency of Fischer Dataacquisition
f={} # Fischer force matrix
freq={} # matrix with vector lenght to identify frequency
for i in range(5): #
f[i] = dfm[i][:,2*i] # load force data for Exp5, strain3 0,2,4,6,8
freq[i] = len(dfm[i][:,2*i]) # force vector len
#Create time linspace for Fischer data
#Timestamp can not be selected by item, done without manually
time_start1=dt[1][ds_peaks[5,1]] # Exp1: select manually last cycle
time_end1=dt[1][ds_peaks[4,1]]
time_start2=dt[2][ds_valleys[5,2]] # Exp2
time_end2=dt[2][ds_valleys[4,2]]
time_start3=dt[3][ds_peaks[5,3]] # Exp3
time_end3=dt[3][ds_peaks[4,3]]
time_start4=dt[4][ds_valleys[5,4]] # Exp4
time_end4=dt[4][ds_valleys[4,4]]
time_start5=dt[5][ds_peaks[5,5]] # Exp5
time_end5=dt[5][ds_peaks[4,5]]
#print(time_start1,time_end1)
x1=pd.date_range(time_start1, time_end1, periods=freq[0]).to_pydatetime()
x2=pd.date_range(time_start2, time_end2, periods=freq[1]).to_pydatetime()
x3=pd.date_range(time_start3, time_end3, periods=freq[2]).to_pydatetime()
x4=pd.date_range(time_start4, time_end4, periods=freq[3]).to_pydatetime()
x5=pd.date_range(time_start5, time_end5, periods=freq[4]).to_pydatetime()
#Plot Fischer Data in timerange x
fig3, ax3 = plt.subplots(5, 2, figsize=(12, 10))
fig3.suptitle('Experiments 1-5: Fischer F over Bootsensing daytime (left), Bootsensing cycle (right) ', fontsize=16)
ax3[0,0].plot(x1,f[0])
ax3[0,0].set(xlabel='daytime')
ax3[0,0].set(ylabel='F[N]')
ax3[0,0].set_title('Experiment 1')
ax3[1,0].plot(x2,f[1])
ax3[1,0].set(xlabel='daytime')
ax3[1,0].set(ylabel='F[N]')
ax3[1,0].set_title('Experiment 2')
ax3[2,0].plot(x3,f[2])
ax3[2,0].set(xlabel='daytime')
ax3[2,0].set(ylabel='F[N]')
ax3[2,0].set_title('Experiment 3')
ax3[3,0].plot(x4,f[3])
ax3[3,0].set(xlabel='daytime')
ax3[3,0].set(ylabel='F[N]')
ax3[3,0].set_title('Experiment 4')
ax3[4,0].plot(x5,f[4])
ax3[4,0].set(xlabel='daytime')
ax3[4,0].set(ylabel='F[N]')
ax3[4,0].set_title('Experiment 5')
#for i in range(1,5): # Exp2-5
#ax3[i,1].plot(dt[i+1][ds_peaks[2,i+1]:ds_peaks[1,i+1]],ds[i+1][ds_peaks[2,i+1]:ds_peaks[1,i+1]])
#ax3[i,1].set(ylabel='strain3')
#ax3[i,1].set(xlabel='daytime')
ax3[0,1].plot(dt[1][ds_peaks[5,1]:ds_peaks[4,1]],ds[1][ds_peaks[5,1]:ds_peaks[4,1]]) # special for Exp1 with peaks
ax3[0,1].set(xlabel='daytime')
ax3[0,1].set(ylabel='4A')
ax3[1,1].plot(dt[2][ds_valleys[5,2]:ds_valleys[4,2]],ds[2][ds_valleys[5,2]:ds_valleys[4,2]]) #Exp2 with valleys
ax3[1,1].set(xlabel='daytime')
ax3[1,1].set(ylabel='3A')
ax3[2,1].plot(dt[3][ds_peaks[5,3]:ds_peaks[4,3]],ds[3][ds_peaks[5,3]:ds_peaks[4,3]]) #Exp3 with peaks
ax3[2,1].set(xlabel='daytime')
ax3[2,1].set(ylabel='2B')
ax3[3,1].plot(dt[4][ds_valleys[5,4]:ds_valleys[4,4]],ds[4][ds_valleys[5,4]:ds_valleys[4,4]]) # Exp4 with valley
ax3[3,1].set(xlabel='daytime')
ax3[3,1].set(ylabel='1A')
ax3[4,1].plot(dt[5][ds_peaks[5,5]:ds_peaks[4,5]],ds[5][ds_peaks[5,5]:ds_peaks[4,5]]) #Exp5 with peaks
ax3[4,1].set(xlabel='daytime')
ax3[4,1].set(ylabel='3B')
plt.tight_layout()
fig3.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# In the graphs of Fischer data (left side) you can note a little kink in unloading as well as in loading. In experiment 5 (TLT) the kink is much more prominent.
# ATTENTION: As we verified the length between neighbour valleys as well as neighbour peaks in our bootsensing data, we can confirm the freqeuncy of f=0.33 Hz applied by the machine (see plots below).
# ### Time delta Fischer&bootsensing
# Now we're going to find identify the extrema for Fischer force data and out bootsensing strain data for each single Experiment 1-5. As we applied the same timespan on the x-axis for both plot we can compare the x-coordinate of the left plot with the corresponding right one to check the response time (time delay) of our bootsensing system (like reaction time of strain gauges).
# In[66]:
# Find extrema in Fischer F for Exp 1-5 in last cycle
inv_f={} # inverse of F
valleys_f={} # valleys in Fischer F
fmin={} # f for extrema
for i in range(5): # find extrema (in this case valley)
inv_f[i]=f[i]*(-1) # inverse of f
valleys_f[i],_=find_peaks(inv_f[i],prominence=10) # find valleys
fmin[i]=f[i][valleys_f[i]] # y-coordinate for minima
# x-coordinate for minima
x1min=x1[valleys_f[0]] #Exp1
x2min=x2[valleys_f[1]] #Exp2
x3min=x3[valleys_f[2]] #Exp3
x4min=x4[valleys_f[3]] #Exp4
x5min=x5[valleys_f[4]] #Exp5
# Find extrema in bootsensing data for Exp 1-5 in last cycle
# extract time and strain for last cycle Exp1-5 (manually)
t1=dt[1][ds_peaks[5,1]:ds_peaks[4,1]] # Exp1 -> valley
t1=t1.reset_index(drop=True) # reset index
ds1=ds[1][ds_peaks[5,1]:ds_peaks[4,1]]
ds1=ds1.reset_index(drop=True)
t2=dt[2][ds_valleys[5,2]:ds_valleys[4,2]] # Exp2 -> peak
t2=t2.reset_index(drop=True)
ds2=ds[2][ds_valleys[5,2]:ds_valleys[4,2]]
ds2=ds2.reset_index(drop=True)
t3=dt[3][ds_peaks[5,3]:ds_peaks[4,3]] # Exp3 -> valley
t3=t3.reset_index(drop=True)
ds3=ds[3][ds_peaks[5,3]:ds_peaks[4,3]]
ds3=ds3.reset_index(drop=True)
t4=dt[4][ds_valleys[5,4]:ds_valleys[4,4]] # Exp4 -> peak
t4=t4.reset_index(drop=True)
ds4=ds[4][ds_valleys[5,4]:ds_valleys[4,4]]
ds4=ds4.reset_index(drop=True)
t5=dt[5][ds_peaks[5,5]:ds_peaks[4,5]] # Exp5 -> valley
t5=t5.reset_index(drop=True)
ds5=ds[5][ds_peaks[5,5]:ds_peaks[4,5]]
ds5=ds5.reset_index(drop=True)
# Find valley for Exp1,3,5
valley_ds1,_=find_peaks(ds1*(-1)) # Exp1
valley_ds3,_=find_peaks(ds3*(-1)) # Exp3
valley_ds5,_=find_peaks(ds5*(-1)) # Exp5
# Find peak for Exp2,4
peak_ds2,_=find_peaks(ds2) # Exp2
peak_ds4,_=find_peaks(ds4) # Exp4
# Apply extrema index on x-coordinate of bootsensing for Exp1-5
t1ext=t1[valley_ds1].dt.to_pydatetime() # converting in same format as xmin
t2ext=t2[peak_ds2].dt.to_pydatetime()
t3ext=t3[valley_ds3].dt.to_pydatetime()
t4ext=t4[peak_ds4].dt.to_pydatetime()
t5ext=t5[valley_ds5].dt.to_pydatetime()
#Calculating timedelta in format to_pydatetime()
deltat1=t1ext-x1min
deltat2=t2ext-x2min
deltat3=t3ext-x3min
deltat4=t4ext-x4min
deltat5=t5ext-x5min
print(deltat1,deltat2,deltat3,deltat4,deltat5)
# If we look at the timedelta for Exp1-5 we see that we are in range of deltat=0,007678s-0,1669s. For the setup at the moment if is enough. Maybe by increasing the data acquisition frequency we could improve this time delta.
# As we know that the machine applied the load with a frequency of f=0.33 Hz with f=1/T we can calculate the timespan of loading. Identifying the vector length of Fischer force data we can plot the force over time for each single cycle.
# In[67]:
fm=0.33 # frequency in Hz (preset)
T=1/fm #calculate time period T
fd={}
for i in range(5):
fd[i]= len(f[i])
freq=fd[0] #as all fd[i] have the same length we choose f[0]
x = np.linspace(0, T, freq, endpoint=False)
#Plot
fig4, ax4 = plt.subplots(5, 1, figsize=(6, 8))
fig4.suptitle('Experiments 1-5: Fischer F over time t ', fontsize=16)
for i in range(5):
ax4[i].plot(x,f[i])
ax4[i].set(xlabel='daytime')
ax4[i].set(ylabel='F[N]')
ax4[i].set_title('Experiment '+str(i+1))
plt.tight_layout()
fig4.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# In[68]:
# Plot an example experiment with peaks and valleys for thesis
fig5, ax5 = plt.subplots(1, figsize=(15, 8))
#fig5.subplots_adjust(top=2)
#fig5.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
ax5.plot(df[df["exp"] == (3)]['time'], df[df["exp"] == (3)]['strain3'], 'tab:blue',label='strain gauge 2b')
ax5.plot(dt[(3)][peaks[(3)]],ds[(3)][peaks[(3)]],"rx",label='peak') #Plot peaks with x
ax5.plot(dt[(3)][valleys[(3)]],ds[(3)][valleys[(3)]],"ro",label='valley') #Plot valleys with o
ax5.set(ylabel='raw signal')
ax5.set(xlabel='daytime')
ax5.set_title('Cyclic loading of TLT Speedfit')
ax5.legend()
plt.tight_layout()
fig5.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# # Machine force and strain data matching
# In[69]:
from datetime import timedelta
# Select strain 4A (stored in strain3) and machine data for Experiment 1
data_s1=pd.concat([dt[1][ds_peaks[5,1]:ds_peaks[4,1]], ds[1][ds_peaks[5,1]:ds_peaks[4,1]]],axis=1).reset_index(drop=True) # one dataframe with strain and time
# Select strain 3C (stored in strain3) and machine data for Experiment 5
data_s5C=pd.concat([dt[5][ds_peaks[5,5]:ds_peaks[4,5]],ds[5][ds_peaks[5,5]:ds_peaks[4,5]]],axis=1).reset_index(drop=True) # one dataframe with strain and time
# Convert machine time to DataFrame in ms precision
x1=pd.DataFrame(x1,columns=['time']).astype('datetime64[ms]') # Experiment 1
x5=pd.DataFrame(x5,columns=['time']).astype('datetime64[ms]') # Experiment 5
# Convert machine force data to DataFrame
f1=pd.DataFrame(f[0],columns=['force [N]']) # Experiment 1
f5=pd.DataFrame(f[4],columns=['force [N]']) # Experiment 5
# Make one dataframe with machine time and force
data_m1=pd.concat([x1,f1],axis=1)
data_m5C=pd.concat([x5,f5],axis=1)
# Create new time for data_s storing in data_splus1
d = timedelta(microseconds=1000)
data_snew1=[]
data_snew5=[]
for i in range(0,len(data_s1)): # Experiment 1
data_new1=data_s1.iloc[i,0]+d
data_snew1.append(data_new1)
for i in range(0,len(data_s5C)): # Experiment 5
data_new5=data_s5C.iloc[i,0]+d
data_snew5.append(data_new5)
data_splus11=pd.DataFrame(data_snew1,columns=['time']) # convert data_snew in DataFrame
data_splus12=pd.concat([data_splus11,data_s1['strain3']],axis=1) # concat data_s with data_splus1
data_splus51C=pd.DataFrame(data_snew5,columns=['time']) # convert data_snew in DataFrame
data_splus52C=pd.concat([data_splus51C,data_s5C['strain3']],axis=1) # concat data_s with data_splus1
# Data matching of strain 4A with corresponding force Experiment 1
data_match11=pd.merge(data_s1, data_m1, on=['time'])
data_match12=pd.merge(data_splus12, data_m1, on=['time'])
data_4A=pd.concat([data_match11, data_match12]).sort_values('time').reset_index(drop=True)
data_4A=data_4A.rename(columns={'strain3':'strain 4A'})
# Data matching of strain 3B with corresponding force Experiment 5
data_match51C=pd.merge(data_s5C, data_m5C, on=['time'])
data_match52C=pd.merge(data_splus52C, data_m5C, on=['time'])
data_3C=pd.concat([data_match51C, data_match52C]).sort_values('time').reset_index(drop=True)
data_3C=data_3C.rename(columns={'strain3':'strain 3C'})
# In[70]:
fig6, ax6 = plt.subplots(1, 2, figsize=(15, 6))
fig6.suptitle('Experiment 1: HOJI PRO TOUR W', fontsize=16)
ax6[0].plot(data_4A.iloc[0:15,2],data_4A.iloc[0:15,1])
ax6[0].set(ylabel="Strain 4A")
ax6[0].set(xlabel="Force [N]")
ax6[0].set_title('Loading')
ax6[1].plot(data_4A.iloc[15:-1,2],data_4A.iloc[15:-1,1])
ax6[1].set(ylabel="Strain 4A")
ax6[1].set(xlabel="Force [N]")
ax6[1].set_title('Unloading')
plt.show()
plt.plot(data_4A.iloc[:,2],data_4A.iloc[:,1])
plt.xlabel('Force [N]')
plt.ylabel('Strain 4A')
plt.title('Loading & Unloading')
plt.show()
# In[71]:
fig7, ax7 = plt.subplots(1, 2, figsize=(15, 6))
fig7.suptitle('Experiment 5: TLT SPEEDFIT', fontsize=16)
ax7[0].plot(data_3C.iloc[0:15,2],data_3C.iloc[0:15,1])
ax7[0].set(ylabel="Strain 3C")
ax7[0].set(xlabel="Force [N]")
ax7[0].set_title('Loading')
ax7[1].plot(data_3C.iloc[15:-1,2],data_3C.iloc[15:-1,1])
ax7[1].set(ylabel="Strain 3C")
ax7[1].set(xlabel="Force [N]")
ax7[1].set_title('Unloading')
plt.show()
plt.plot(data_3C.iloc[:,2],data_3C.iloc[:,1])
plt.xlabel('Force [N]')
plt.ylabel('Strain 3C')
plt.title('Loading & Unloading')
plt.show()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.date_range",
"matplotlib.pyplot.plot",
"pandas.merge",
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.figure",
"numpy.array",
"scipy.signal.find_peak... | [((2043, 2079), 'IPython.display.set_matplotlib_formats', 'set_matplotlib_formats', (['"""png"""', '"""pdf"""'], {}), "('png', 'pdf')\n", (2065, 2079), False, 'from IPython.display import set_matplotlib_formats\n'), ((3094, 3144), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(5)'], {'figsize': '(13, 11)', 'sharex': '"""col"""'}), "(5, 5, figsize=(13, 11), sharex='col')\n", (3106, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3492, 3510), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3508, 3510), True, 'import matplotlib.pyplot as plt\n'), ((4830, 4871), 'numpy.array', 'np.array', (['[-29, 17, 41.14, 63, 96, 147.8]'], {}), '([-29, 17, 41.14, 63, 96, 147.8])\n', (4838, 4871), True, 'import numpy as np\n'), ((4895, 4940), 'numpy.array', 'np.array', (['[-1.5, 2.9, 7.312, 11, 13.7, 13.94]'], {}), '([-1.5, 2.9, 7.312, 11, 13.7, 13.94])\n', (4903, 4940), True, 'import numpy as np\n'), ((4964, 5017), 'numpy.array', 'np.array', (['[-29, 3.741, 25, 43.08, 63, 72, 106, 147.8]'], {}), '([-29, 3.741, 25, 43.08, 63, 72, 106, 147.8])\n', (4972, 5017), True, 'import numpy as np\n'), ((5041, 5102), 'numpy.array', 'np.array', (['[-1.5, -0.646, 1.2, 3.127, 6.6, 8.37, 13.28, 13.94]'], {}), '([-1.5, -0.646, 1.2, 3.127, 6.6, 8.37, 13.28, 13.94])\n', (5049, 5102), True, 'import numpy as np\n'), ((5126, 5193), 'numpy.array', 'np.array', (['[-28.5, -12.27, 4.841, 18.01, 31.92, 39.46, 87.48, 145.6]'], {}), '([-28.5, -12.27, 4.841, 18.01, 31.92, 39.46, 87.48, 145.6])\n', (5134, 5193), True, 'import numpy as np\n'), ((5214, 5281), 'numpy.array', 'np.array', (['[-2.752, -0.989, 1.022, 3.23, 5.387, 6.012, 6.521, 6.915]'], {}), '([-2.752, -0.989, 1.022, 3.23, 5.387, 6.012, 6.521, 6.915])\n', (5222, 5281), True, 'import numpy as np\n'), ((5301, 5367), 'numpy.array', 'np.array', (['[-28.5, 2.042, 26.35, 41.36, 51.86, 56.33, 93.87, 145.6]'], {}), '([-28.5, 2.042, 26.35, 41.36, 51.86, 56.33, 93.87, 145.6])\n', (5309, 5367), True, 'import numpy as np\n'), ((5390, 5455), 'numpy.array', 'np.array', (['[-2.752, -1.94, -0.43, 1.524, 3.76, 5.625, 6.24, 6.915]'], {}), '([-2.752, -1.94, -0.43, 1.524, 3.76, 5.625, 6.24, 6.915])\n', (5398, 5455), True, 'import numpy as np\n'), ((5496, 5514), 'scipy.interpolate.interp1d', 'interp1d', (['xm1', 'ym1'], {}), '(xm1, ym1)\n', (5504, 5514), False, 'from scipy.interpolate import interp1d\n'), ((5520, 5538), 'scipy.interpolate.interp1d', 'interp1d', (['xm2', 'ym2'], {}), '(xm2, ym2)\n', (5528, 5538), False, 'from scipy.interpolate import interp1d\n'), ((5544, 5562), 'scipy.interpolate.interp1d', 'interp1d', (['xm3', 'ym3'], {}), '(xm3, ym3)\n', (5552, 5562), False, 'from scipy.interpolate import interp1d\n'), ((5568, 5586), 'scipy.interpolate.interp1d', 'interp1d', (['xm4', 'ym4'], {}), '(xm4, ym4)\n', (5576, 5586), False, 'from scipy.interpolate import interp1d\n'), ((5642, 5677), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 8)'}), '(1, 2, figsize=(15, 8))\n', (5654, 5677), True, 'import matplotlib.pyplot as plt\n'), ((6478, 6488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6486, 6488), True, 'import matplotlib.pyplot as plt\n'), ((9273, 9287), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9285, 9287), True, 'import pandas as pd\n'), ((11307, 11378), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(10, 8)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')\n", (11313, 11378), False, 'from matplotlib.pyplot import figure\n'), ((11379, 11446), 'matplotlib.pyplot.plot', 'plt.plot', (["df[df['exp'] == 1]['time']", "df[df['exp'] == 1]['strain3']"], {}), "(df[df['exp'] == 1]['time'], df[df['exp'] == 1]['strain3'])\n", (11387, 11446), True, 'import matplotlib.pyplot as plt\n'), ((11446, 11467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""daytime"""'], {}), "('daytime')\n", (11456, 11467), True, 'import matplotlib.pyplot as plt\n'), ((11468, 11484), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""4A"""'], {}), "('4A')\n", (11478, 11484), True, 'import matplotlib.pyplot as plt\n'), ((11485, 11515), 'matplotlib.pyplot.title', 'plt.title', (['"""Experiment 1: 4A """'], {}), "('Experiment 1: 4A ')\n", (11494, 11515), True, 'import matplotlib.pyplot as plt\n'), ((11516, 11526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11524, 11526), True, 'import matplotlib.pyplot as plt\n'), ((11586, 11657), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(10, 8)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')\n", (11592, 11657), False, 'from matplotlib.pyplot import figure\n'), ((11658, 11725), 'matplotlib.pyplot.plot', 'plt.plot', (["df[df['exp'] == 2]['time']", "df[df['exp'] == 2]['strain3']"], {}), "(df[df['exp'] == 2]['time'], df[df['exp'] == 2]['strain3'])\n", (11666, 11725), True, 'import matplotlib.pyplot as plt\n'), ((11725, 11746), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""daytime"""'], {}), "('daytime')\n", (11735, 11746), True, 'import matplotlib.pyplot as plt\n'), ((11747, 11763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""3A"""'], {}), "('3A')\n", (11757, 11763), True, 'import matplotlib.pyplot as plt\n'), ((11764, 11794), 'matplotlib.pyplot.title', 'plt.title', (['"""Experiment 2: 3A """'], {}), "('Experiment 2: 3A ')\n", (11773, 11794), True, 'import matplotlib.pyplot as plt\n'), ((11795, 11805), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11803, 11805), True, 'import matplotlib.pyplot as plt\n'), ((11838, 11909), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(10, 8)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')\n", (11844, 11909), False, 'from matplotlib.pyplot import figure\n'), ((11910, 11977), 'matplotlib.pyplot.plot', 'plt.plot', (["df[df['exp'] == 3]['time']", "df[df['exp'] == 3]['strain3']"], {}), "(df[df['exp'] == 3]['time'], df[df['exp'] == 3]['strain3'])\n", (11918, 11977), True, 'import matplotlib.pyplot as plt\n'), ((11977, 11998), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""daytime"""'], {}), "('daytime')\n", (11987, 11998), True, 'import matplotlib.pyplot as plt\n'), ((11999, 12015), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""2B"""'], {}), "('2B')\n", (12009, 12015), True, 'import matplotlib.pyplot as plt\n'), ((12016, 12046), 'matplotlib.pyplot.title', 'plt.title', (['"""Experiment 3: 2B """'], {}), "('Experiment 3: 2B ')\n", (12025, 12046), True, 'import matplotlib.pyplot as plt\n'), ((12047, 12057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12055, 12057), True, 'import matplotlib.pyplot as plt\n'), ((12092, 12163), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(12, 8)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n", (12098, 12163), False, 'from matplotlib.pyplot import figure\n'), ((12164, 12231), 'matplotlib.pyplot.plot', 'plt.plot', (["df[df['exp'] == 4]['time']", "df[df['exp'] == 4]['strain3']"], {}), "(df[df['exp'] == 4]['time'], df[df['exp'] == 4]['strain3'])\n", (12172, 12231), True, 'import matplotlib.pyplot as plt\n'), ((12231, 12252), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""daytime"""'], {}), "('daytime')\n", (12241, 12252), True, 'import matplotlib.pyplot as plt\n'), ((12253, 12269), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""1A"""'], {}), "('1A')\n", (12263, 12269), True, 'import matplotlib.pyplot as plt\n'), ((12270, 12300), 'matplotlib.pyplot.title', 'plt.title', (['"""Experiment 4: 1A """'], {}), "('Experiment 4: 1A ')\n", (12279, 12300), True, 'import matplotlib.pyplot as plt\n'), ((12301, 12311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12309, 12311), True, 'import matplotlib.pyplot as plt\n'), ((12356, 12405), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(15, 8)', 'sharex': '"""col"""'}), "(2, 1, figsize=(15, 8), sharex='col')\n", (12368, 12405), True, 'import matplotlib.pyplot as plt\n'), ((12733, 12743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12741, 12743), True, 'import matplotlib.pyplot as plt\n'), ((15971, 16006), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'figsize': '(15, 8)'}), '(5, 1, figsize=(15, 8))\n', (15983, 16006), True, 'import matplotlib.pyplot as plt\n'), ((16627, 16637), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16635, 16637), True, 'import matplotlib.pyplot as plt\n'), ((16696, 16731), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'figsize': '(10, 8)'}), '(5, 1, figsize=(10, 8))\n', (16708, 16731), True, 'import matplotlib.pyplot as plt\n'), ((17380, 17390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17388, 17390), True, 'import matplotlib.pyplot as plt\n'), ((19817, 19853), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(2)'], {'figsize': '(12, 10)'}), '(5, 2, figsize=(12, 10))\n', (19829, 19853), True, 'import matplotlib.pyplot as plt\n'), ((21654, 21672), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21670, 21672), True, 'import matplotlib.pyplot as plt\n'), ((21736, 21746), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21744, 21746), True, 'import matplotlib.pyplot as plt\n'), ((24040, 24060), 'scipy.signal.find_peaks', 'find_peaks', (['(ds1 * -1)'], {}), '(ds1 * -1)\n', (24050, 24060), False, 'from scipy.signal import find_peaks\n'), ((24081, 24101), 'scipy.signal.find_peaks', 'find_peaks', (['(ds3 * -1)'], {}), '(ds3 * -1)\n', (24091, 24101), False, 'from scipy.signal import find_peaks\n'), ((24122, 24142), 'scipy.signal.find_peaks', 'find_peaks', (['(ds5 * -1)'], {}), '(ds5 * -1)\n', (24132, 24142), False, 'from scipy.signal import find_peaks\n'), ((24185, 24200), 'scipy.signal.find_peaks', 'find_peaks', (['ds2'], {}), '(ds2)\n', (24195, 24200), False, 'from scipy.signal import find_peaks\n'), ((24219, 24234), 'scipy.signal.find_peaks', 'find_peaks', (['ds4'], {}), '(ds4)\n', (24229, 24234), False, 'from scipy.signal import find_peaks\n'), ((25392, 25431), 'numpy.linspace', 'np.linspace', (['(0)', 'T', 'freq'], {'endpoint': '(False)'}), '(0, T, freq, endpoint=False)\n', (25403, 25431), True, 'import numpy as np\n'), ((25453, 25487), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'figsize': '(6, 8)'}), '(5, 1, figsize=(6, 8))\n', (25465, 25487), True, 'import matplotlib.pyplot as plt\n'), ((25711, 25729), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25727, 25729), True, 'import matplotlib.pyplot as plt\n'), ((25793, 25803), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25801, 25803), True, 'import matplotlib.pyplot as plt\n'), ((25893, 25925), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(15, 8)'}), '(1, figsize=(15, 8))\n', (25905, 25925), True, 'import matplotlib.pyplot as plt\n'), ((26428, 26446), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26444, 26446), True, 'import matplotlib.pyplot as plt\n'), ((26510, 26520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26518, 26520), True, 'import matplotlib.pyplot as plt\n'), ((27329, 27370), 'pandas.DataFrame', 'pd.DataFrame', (['f[0]'], {'columns': "['force [N]']"}), "(f[0], columns=['force [N]'])\n", (27341, 27370), True, 'import pandas as pd\n'), ((27388, 27429), 'pandas.DataFrame', 'pd.DataFrame', (['f[4]'], {'columns': "['force [N]']"}), "(f[4], columns=['force [N]'])\n", (27400, 27429), True, 'import pandas as pd\n'), ((27502, 27529), 'pandas.concat', 'pd.concat', (['[x1, f1]'], {'axis': '(1)'}), '([x1, f1], axis=1)\n', (27511, 27529), True, 'import pandas as pd\n'), ((27538, 27565), 'pandas.concat', 'pd.concat', (['[x5, f5]'], {'axis': '(1)'}), '([x5, f5], axis=1)\n', (27547, 27565), True, 'import pandas as pd\n'), ((27622, 27650), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(1000)'}), '(microseconds=1000)\n', (27631, 27650), False, 'from datetime import timedelta\n'), ((27932, 27974), 'pandas.DataFrame', 'pd.DataFrame', (['data_snew1'], {'columns': "['time']"}), "(data_snew1, columns=['time'])\n", (27944, 27974), True, 'import pandas as pd\n'), ((28020, 28073), 'pandas.concat', 'pd.concat', (["[data_splus11, data_s1['strain3']]"], {'axis': '(1)'}), "([data_splus11, data_s1['strain3']], axis=1)\n", (28029, 28073), True, 'import pandas as pd\n'), ((28120, 28162), 'pandas.DataFrame', 'pd.DataFrame', (['data_snew5'], {'columns': "['time']"}), "(data_snew5, columns=['time'])\n", (28132, 28162), True, 'import pandas as pd\n'), ((28209, 28264), 'pandas.concat', 'pd.concat', (["[data_splus51C, data_s5C['strain3']]"], {'axis': '(1)'}), "([data_splus51C, data_s5C['strain3']], axis=1)\n", (28218, 28264), True, 'import pandas as pd\n'), ((28377, 28416), 'pandas.merge', 'pd.merge', (['data_s1', 'data_m1'], {'on': "['time']"}), "(data_s1, data_m1, on=['time'])\n", (28385, 28416), True, 'import pandas as pd\n'), ((28430, 28474), 'pandas.merge', 'pd.merge', (['data_splus12', 'data_m1'], {'on': "['time']"}), "(data_splus12, data_m1, on=['time'])\n", (28438, 28474), True, 'import pandas as pd\n'), ((28704, 28745), 'pandas.merge', 'pd.merge', (['data_s5C', 'data_m5C'], {'on': "['time']"}), "(data_s5C, data_m5C, on=['time'])\n", (28712, 28745), True, 'import pandas as pd\n'), ((28760, 28806), 'pandas.merge', 'pd.merge', (['data_splus52C', 'data_m5C'], {'on': "['time']"}), "(data_splus52C, data_m5C, on=['time'])\n", (28768, 28806), True, 'import pandas as pd\n'), ((28982, 29017), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 6)'}), '(1, 2, figsize=(15, 6))\n', (28994, 29017), True, 'import matplotlib.pyplot as plt\n'), ((29372, 29382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29380, 29382), True, 'import matplotlib.pyplot as plt\n'), ((29384, 29432), 'matplotlib.pyplot.plot', 'plt.plot', (['data_4A.iloc[:, 2]', 'data_4A.iloc[:, 1]'], {}), '(data_4A.iloc[:, 2], data_4A.iloc[:, 1])\n', (29392, 29432), True, 'import matplotlib.pyplot as plt\n'), ((29430, 29453), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Force [N]"""'], {}), "('Force [N]')\n", (29440, 29453), True, 'import matplotlib.pyplot as plt\n'), ((29454, 29477), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Strain 4A"""'], {}), "('Strain 4A')\n", (29464, 29477), True, 'import matplotlib.pyplot as plt\n'), ((29478, 29510), 'matplotlib.pyplot.title', 'plt.title', (['"""Loading & Unloading"""'], {}), "('Loading & Unloading')\n", (29487, 29510), True, 'import matplotlib.pyplot as plt\n'), ((29511, 29521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29519, 29521), True, 'import matplotlib.pyplot as plt\n'), ((29548, 29583), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 6)'}), '(1, 2, figsize=(15, 6))\n', (29560, 29583), True, 'import matplotlib.pyplot as plt\n'), ((29935, 29945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29943, 29945), True, 'import matplotlib.pyplot as plt\n'), ((29947, 29995), 'matplotlib.pyplot.plot', 'plt.plot', (['data_3C.iloc[:, 2]', 'data_3C.iloc[:, 1]'], {}), '(data_3C.iloc[:, 2], data_3C.iloc[:, 1])\n', (29955, 29995), True, 'import matplotlib.pyplot as plt\n'), ((29993, 30016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Force [N]"""'], {}), "('Force [N]')\n", (30003, 30016), True, 'import matplotlib.pyplot as plt\n'), ((30017, 30040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Strain 3C"""'], {}), "('Strain 3C')\n", (30027, 30040), True, 'import matplotlib.pyplot as plt\n'), ((30041, 30073), 'matplotlib.pyplot.title', 'plt.title', (['"""Loading & Unloading"""'], {}), "('Loading & Unloading')\n", (30050, 30073), True, 'import matplotlib.pyplot as plt\n'), ((30074, 30084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30082, 30084), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2806), 'pandas.concat', 'pd.concat', (['[d[expnr, 0], d[expnr, 1], d[expnr, 2], d[expnr, 3], d[expnr, 4]]'], {'axis': '(1)', 'join': '"""inner"""'}), "([d[expnr, 0], d[expnr, 1], d[expnr, 2], d[expnr, 3], d[expnr, 4]],\n axis=1, join='inner')\n", (2713, 2806), True, 'import pandas as pd\n'), ((2815, 2835), 'numpy.array', 'np.array', (['dfm[expnr]'], {}), '(dfm[expnr])\n', (2823, 2835), True, 'import numpy as np\n'), ((8572, 8610), 'pandas.DataFrame', 'pd.DataFrame', (['lines'], {'columns': 'col_names'}), '(lines, columns=col_names)\n', (8584, 8610), True, 'import pandas as pd\n'), ((10565, 10593), 'pandas.concat', 'pd.concat', (['[df, df_exp_full]'], {}), '([df, df_exp_full])\n', (10574, 10593), True, 'import pandas as pd\n'), ((10801, 10850), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(15, 8)', 'sharex': '"""col"""'}), "(3, 1, figsize=(15, 8), sharex='col')\n", (10813, 10850), True, 'import matplotlib.pyplot as plt\n'), ((11262, 11272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11270, 11272), True, 'import matplotlib.pyplot as plt\n'), ((14605, 14641), 'scipy.signal.find_peaks', 'find_peaks', (['ds[i]'], {'prominence': '(100000)'}), '(ds[i], prominence=100000)\n', (14615, 14641), False, 'from scipy.signal import find_peaks\n'), ((14712, 14751), 'scipy.signal.find_peaks', 'find_peaks', (['inv_ds[i]'], {'prominence': '(10000)'}), '(inv_ds[i], prominence=10000)\n', (14722, 14751), False, 'from scipy.signal import find_peaks\n'), ((16541, 16559), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16557, 16559), True, 'import matplotlib.pyplot as plt\n'), ((17294, 17312), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17310, 17312), True, 'import matplotlib.pyplot as plt\n'), ((22798, 22833), 'scipy.signal.find_peaks', 'find_peaks', (['inv_f[i]'], {'prominence': '(10)'}), '(inv_f[i], prominence=10)\n', (22808, 22833), False, 'from scipy.signal import find_peaks\n'), ((2557, 2571), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2569, 2571), True, 'import pandas as pd\n'), ((10442, 10469), 'pandas.concat', 'pd.concat', (['[df_bl1, df_exp]'], {}), '([df_bl1, df_exp])\n', (10451, 10469), True, 'import pandas as pd\n'), ((19403, 19457), 'pandas.date_range', 'pd.date_range', (['time_start1', 'time_end1'], {'periods': 'freq[0]'}), '(time_start1, time_end1, periods=freq[0])\n', (19416, 19457), True, 'import pandas as pd\n'), ((19477, 19531), 'pandas.date_range', 'pd.date_range', (['time_start2', 'time_end2'], {'periods': 'freq[1]'}), '(time_start2, time_end2, periods=freq[1])\n', (19490, 19531), True, 'import pandas as pd\n'), ((19551, 19605), 'pandas.date_range', 'pd.date_range', (['time_start3', 'time_end3'], {'periods': 'freq[2]'}), '(time_start3, time_end3, periods=freq[2])\n', (19564, 19605), True, 'import pandas as pd\n'), ((19625, 19679), 'pandas.date_range', 'pd.date_range', (['time_start4', 'time_end4'], {'periods': 'freq[3]'}), '(time_start4, time_end4, periods=freq[3])\n', (19638, 19679), True, 'import pandas as pd\n'), ((19699, 19753), 'pandas.date_range', 'pd.date_range', (['time_start5', 'time_end5'], {'periods': 'freq[4]'}), '(time_start5, time_end5, periods=freq[4])\n', (19712, 19753), True, 'import pandas as pd\n'), ((26692, 26792), 'pandas.concat', 'pd.concat', (['[dt[1][ds_peaks[5, 1]:ds_peaks[4, 1]], ds[1][ds_peaks[5, 1]:ds_peaks[4, 1]]]'], {'axis': '(1)'}), '([dt[1][ds_peaks[5, 1]:ds_peaks[4, 1]], ds[1][ds_peaks[5, 1]:\n ds_peaks[4, 1]]], axis=1)\n', (26701, 26792), True, 'import pandas as pd\n'), ((26926, 27026), 'pandas.concat', 'pd.concat', (['[dt[5][ds_peaks[5, 5]:ds_peaks[4, 5]], ds[5][ds_peaks[5, 5]:ds_peaks[4, 5]]]'], {'axis': '(1)'}), '([dt[5][ds_peaks[5, 5]:ds_peaks[4, 5]], ds[5][ds_peaks[5, 5]:\n ds_peaks[4, 5]]], axis=1)\n', (26935, 27026), True, 'import pandas as pd\n'), ((27132, 27166), 'pandas.DataFrame', 'pd.DataFrame', (['x1'], {'columns': "['time']"}), "(x1, columns=['time'])\n", (27144, 27166), True, 'import pandas as pd\n'), ((27209, 27243), 'pandas.DataFrame', 'pd.DataFrame', (['x5'], {'columns': "['time']"}), "(x5, columns=['time'])\n", (27221, 27243), True, 'import pandas as pd\n'), ((28483, 28522), 'pandas.concat', 'pd.concat', (['[data_match11, data_match12]'], {}), '([data_match11, data_match12])\n', (28492, 28522), True, 'import pandas as pd\n'), ((28815, 28856), 'pandas.concat', 'pd.concat', (['[data_match51C, data_match52C]'], {}), '([data_match51C, data_match52C])\n', (28824, 28856), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
File file_util.py
@author:ZhengYuwei
"""
import os
import logging
import functools
import tensorflow as tf
from dataset.dataset_util import DatasetUtil
class FileUtil(object):
"""
从标签文件中,构造返回(image, label)的tf.data.Dataset数据集
标签文件内容如下:
image_name label0,label1,label2,...
"""
@staticmethod
def _parse_string_line(string_line, root_path):
"""
解析文本中的一行字符串行,得到图片路径(拼接图片根目录)和标签
:param string_line: 文本中的一行字符串,image_name label0 label1 label2 label3 ...
:param root_path: 图片根目录
:return: DatasetV1Adapter<(图片路径Tensor(shape=(), dtype=string),标签Tensor(shape=(?,), dtype=float32))>
"""
strings = tf.string_split([string_line], delimiter=' ').values
image_path = tf.string_join([root_path, strings[0]], separator=os.sep)
labels = tf.string_to_number(strings[1:])
return image_path, labels
@staticmethod
def _parse_image(image_path, _, image_size):
"""
根据图片路径和标签,读取图片
:param image_path: 图片路径, Tensor(shape=(), dtype=string)
:param _: 标签Tensor(shape(?,), dtype=float32)),本函数只产生图像dataset,故不需要
:param image_size: 图像需要resize到的大小
:return: 归一化的图片 Tensor(shape=(48, 144, ?), dtype=float32)
"""
# 图片
image = tf.read_file(image_path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize_images(image, image_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# 这里使用tf.float32会将照片归一化,也就是 *1/255
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.reverse(image, axis=[2]) # 读取的是rgb,需要转为bgr
return image
@staticmethod
def _parse_labels(_, labels, num_labels):
"""
根据图片路径和标签,解析标签
:param _: 图片路径, Tensor(shape=(), dtype=string),本函数只产生标签dataset,故不需要
:param labels: 标签,Tensor(shape=(?,), dtype=float32)
:param num_labels: 每个图像对于输出的标签数(多标签分类模型)
:return: 标签 DatasetV1Adapter<(多个标签Tensor(shape=(), dtype=float32), ...)>
"""
label_list = list()
for label_index in range(num_labels):
label_list.append(labels[label_index])
return label_list
@staticmethod
def get_dataset(file_path, root_path, image_size, num_labels, batch_size, is_augment=True, is_test=False):
"""
从标签文件读取数据,并解析为(image_path, labels)形式的列表
标签文件内容格式为:
image_name label0,label1,label2,label3,...
:param file_path: 标签文件路径
:param root_path: 图片路径的根目录,用于和标签文件中的image_name拼接
:param image_size: 图像需要resize到的尺寸
:param num_labels: 每个图像对于输出的标签数(多标签分类模型)
:param batch_size: 批次大小
:param is_augment: 是否对图片进行数据增强
:param is_test: 是否为测试阶段,测试阶段的话,输出的dataset中多包含image_path
:return: tf.data.Dataset对象
"""
logging.info('利用标签文件、图片根目录生成tf.data数据集对象:')
logging.info('1. 解析标签文件;')
dataset = tf.data.TextLineDataset(file_path)
dataset = DatasetUtil.shuffle_repeat(dataset, batch_size)
dataset = dataset.map(functools.partial(FileUtil._parse_string_line, root_path=root_path),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
logging.info('2. 读取图片数据,构造image set和label set;')
image_set = dataset.map(functools.partial(FileUtil._parse_image, image_size=image_size),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
labels_set = dataset.map(functools.partial(FileUtil._parse_labels, num_labels=num_labels),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_augment:
logging.info('2.1 image set数据增强;')
image_set = DatasetUtil.augment_image(image_set)
logging.info('3. image set数据白化;')
image_set = image_set.map(lambda image: tf.image.per_image_standardization(image),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_test:
logging.info('4. 完成tf.data (image, label, path) 测试数据集构造;')
path_set = dataset.map(lambda image_path, label: image_path,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = tf.data.Dataset.zip((image_set, labels_set, path_set))
else:
logging.info('4. 完成tf.data (image, label) 训练数据集构造;')
# 合并image、labels:
# DatasetV1Adapter<shapes:((48,144,?), ((), ..., ())), types:(float32,(float32,...,flout32))>
dataset = tf.data.Dataset.zip((image_set, labels_set))
logging.info('5. 构造tf.data多epoch训练模式;')
dataset = DatasetUtil.batch_prefetch(dataset, batch_size)
return dataset
if __name__ == '__main__':
import cv2
import numpy as np
import time
# 开启eager模式进行图片读取、增强和展示
tf.enable_eager_execution()
train_file_path = './test_sample/label.txt' # 标签文件
image_root_path = './test_sample' # 图片根目录
train_batch = 100
train_set = FileUtil.get_dataset(train_file_path, image_root_path, image_size=(48, 144), num_labels=10,
batch_size=train_batch, is_augment=True)
start = time.time()
for count, data in enumerate(train_set):
for i in range(data[0].shape[0]):
cv2.imshow('a', np.array(data[0][i]))
cv2.waitKey(1)
for count, data in enumerate(train_set):
print('一批(%d)图像 shape:' % train_batch, data[0].shape)
for i in range(data[0].shape[0]):
cv2.imshow('a', np.array(data[0][i]))
cv2.waitKey(1)
print('一批(%d)标签 shape:' % train_batch, len(data[1]))
for i in range(len(data[1])):
print(data[1][i])
if count == 100:
break
print('耗时:', time.time() - start)
| [
"tensorflow.string_split",
"tensorflow.image.per_image_standardization",
"tensorflow.string_to_number",
"dataset.dataset_util.DatasetUtil.batch_prefetch",
"tensorflow.image.resize_images",
"functools.partial",
"tensorflow.reverse",
"cv2.waitKey",
"tensorflow.data.Dataset.zip",
"tensorflow.enable_e... | [((4853, 4880), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (4878, 4880), True, 'import tensorflow as tf\n'), ((5209, 5220), 'time.time', 'time.time', ([], {}), '()\n', (5218, 5220), False, 'import time\n'), ((774, 831), 'tensorflow.string_join', 'tf.string_join', (['[root_path, strings[0]]'], {'separator': 'os.sep'}), '([root_path, strings[0]], separator=os.sep)\n', (788, 831), True, 'import tensorflow as tf\n'), ((849, 881), 'tensorflow.string_to_number', 'tf.string_to_number', (['strings[1:]'], {}), '(strings[1:])\n', (868, 881), True, 'import tensorflow as tf\n'), ((1311, 1335), 'tensorflow.read_file', 'tf.read_file', (['image_path'], {}), '(image_path)\n', (1323, 1335), True, 'import tensorflow as tf\n'), ((1352, 1379), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image'], {}), '(image)\n', (1372, 1379), True, 'import tensorflow as tf\n'), ((1396, 1489), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['image', 'image_size'], {'method': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(image, image_size, method=tf.image.ResizeMethod.\n NEAREST_NEIGHBOR)\n', (1418, 1489), True, 'import tensorflow as tf\n'), ((1544, 1597), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (1572, 1597), True, 'import tensorflow as tf\n'), ((1614, 1641), 'tensorflow.reverse', 'tf.reverse', (['image'], {'axis': '[2]'}), '(image, axis=[2])\n', (1624, 1641), True, 'import tensorflow as tf\n'), ((2842, 2885), 'logging.info', 'logging.info', (['"""利用标签文件、图片根目录生成tf.data数据集对象:"""'], {}), "('利用标签文件、图片根目录生成tf.data数据集对象:')\n", (2854, 2885), False, 'import logging\n'), ((2894, 2920), 'logging.info', 'logging.info', (['"""1. 解析标签文件;"""'], {}), "('1. 解析标签文件;')\n", (2906, 2920), False, 'import logging\n'), ((2939, 2973), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['file_path'], {}), '(file_path)\n', (2962, 2973), True, 'import tensorflow as tf\n'), ((2992, 3039), 'dataset.dataset_util.DatasetUtil.shuffle_repeat', 'DatasetUtil.shuffle_repeat', (['dataset', 'batch_size'], {}), '(dataset, batch_size)\n', (3018, 3039), False, 'from dataset.dataset_util import DatasetUtil\n'), ((3227, 3275), 'logging.info', 'logging.info', (['"""2. 读取图片数据,构造image set和label set;"""'], {}), "('2. 读取图片数据,构造image set和label set;')\n", (3239, 3275), False, 'import logging\n'), ((3778, 3811), 'logging.info', 'logging.info', (['"""3. image set数据白化;"""'], {}), "('3. image set数据白化;')\n", (3790, 3811), False, 'import logging\n'), ((4604, 4643), 'logging.info', 'logging.info', (['"""5. 构造tf.data多epoch训练模式;"""'], {}), "('5. 构造tf.data多epoch训练模式;')\n", (4616, 4643), False, 'import logging\n'), ((4662, 4709), 'dataset.dataset_util.DatasetUtil.batch_prefetch', 'DatasetUtil.batch_prefetch', (['dataset', 'batch_size'], {}), '(dataset, batch_size)\n', (4688, 4709), False, 'from dataset.dataset_util import DatasetUtil\n'), ((700, 745), 'tensorflow.string_split', 'tf.string_split', (['[string_line]'], {'delimiter': '""" """'}), "([string_line], delimiter=' ')\n", (715, 745), True, 'import tensorflow as tf\n'), ((3070, 3137), 'functools.partial', 'functools.partial', (['FileUtil._parse_string_line'], {'root_path': 'root_path'}), '(FileUtil._parse_string_line, root_path=root_path)\n', (3087, 3137), False, 'import functools\n'), ((3308, 3371), 'functools.partial', 'functools.partial', (['FileUtil._parse_image'], {'image_size': 'image_size'}), '(FileUtil._parse_image, image_size=image_size)\n', (3325, 3371), False, 'import functools\n'), ((3488, 3552), 'functools.partial', 'functools.partial', (['FileUtil._parse_labels'], {'num_labels': 'num_labels'}), '(FileUtil._parse_labels, num_labels=num_labels)\n', (3505, 3552), False, 'import functools\n'), ((3673, 3707), 'logging.info', 'logging.info', (['"""2.1 image set数据增强;"""'], {}), "('2.1 image set数据增强;')\n", (3685, 3707), False, 'import logging\n'), ((3732, 3768), 'dataset.dataset_util.DatasetUtil.augment_image', 'DatasetUtil.augment_image', (['image_set'], {}), '(image_set)\n', (3757, 3768), False, 'from dataset.dataset_util import DatasetUtil\n'), ((4020, 4078), 'logging.info', 'logging.info', (['"""4. 完成tf.data (image, label, path) 测试数据集构造;"""'], {}), "('4. 完成tf.data (image, label, path) 测试数据集构造;')\n", (4032, 4078), False, 'import logging\n'), ((4259, 4313), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(image_set, labels_set, path_set)'], {}), '((image_set, labels_set, path_set))\n', (4278, 4313), True, 'import tensorflow as tf\n'), ((4340, 4392), 'logging.info', 'logging.info', (['"""4. 完成tf.data (image, label) 训练数据集构造;"""'], {}), "('4. 完成tf.data (image, label) 训练数据集构造;')\n", (4352, 4392), False, 'import logging\n'), ((4551, 4595), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(image_set, labels_set)'], {}), '((image_set, labels_set))\n', (4570, 4595), True, 'import tensorflow as tf\n'), ((5370, 5384), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5381, 5384), False, 'import cv2\n'), ((5597, 5611), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5608, 5611), False, 'import cv2\n'), ((5807, 5818), 'time.time', 'time.time', ([], {}), '()\n', (5816, 5818), False, 'import time\n'), ((3860, 3901), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['image'], {}), '(image)\n', (3894, 3901), True, 'import tensorflow as tf\n'), ((5336, 5356), 'numpy.array', 'np.array', (['data[0][i]'], {}), '(data[0][i])\n', (5344, 5356), True, 'import numpy as np\n'), ((5563, 5583), 'numpy.array', 'np.array', (['data[0][i]'], {}), '(data[0][i])\n', (5571, 5583), True, 'import numpy as np\n')] |
import unittest
from os.path import join
import numpy as np
from shapely.geometry import shape
from rastervision2.core.data import ClassConfig
from rastervision2.core import Box
from rastervision2.core.data import (
Scene, IdentityCRSTransformer,
SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig,
GeoJSONVectorSourceConfig, PolygonVectorOutputConfig)
from rastervision2.core.evaluation import SemanticSegmentationEvaluator
from rastervision2.pipeline import rv_config
from rastervision2.pipeline.file_system import file_to_json
from tests_v2.core.data.mock_raster_source import (MockRasterSource)
from tests_v2 import data_file_path
class TestSemanticSegmentationEvaluator(unittest.TestCase):
def setUp(self):
self.tmp_dir = rv_config.get_tmp_dir()
self.class_config = ClassConfig(names=['one', 'two'])
self.class_config.update()
self.class_config.ensure_null_class()
def tearDown(self):
self.tmp_dir.cleanup()
def get_scene(self, class_id):
# Make scene where ground truth is all set to class_id
# and predictions are set to half 0's and half 1's
scene_id = str(class_id)
rs = MockRasterSource(channel_order=[0, 1, 2], num_channels=3)
rs.set_raster(np.zeros((10, 10, 3)))
gt_rs = MockRasterSource(channel_order=[0], num_channels=1)
gt_arr = np.full((10, 10, 1), class_id)
gt_rs.set_raster(gt_arr)
gt_ls = SemanticSegmentationLabelSource(raster_source=gt_rs)
pred_rs = MockRasterSource(channel_order=[0], num_channels=1)
pred_arr = np.zeros((10, 10, 1))
pred_arr[5:10, :, :] = 1
pred_rs.set_raster(pred_arr)
pred_ls = SemanticSegmentationLabelSource(raster_source=pred_rs)
return Scene(scene_id, rs, gt_ls, pred_ls)
def test_evaluator(self):
output_uri = join(self.tmp_dir.name, 'out.json')
scenes = [self.get_scene(0), self.get_scene(1)]
evaluator = SemanticSegmentationEvaluator(self.class_config, output_uri, None)
evaluator.process(scenes, self.tmp_dir.name)
eval_json = file_to_json(output_uri)
exp_eval_json = file_to_json(data_file_path('expected-eval.json'))
self.assertDictEqual(eval_json, exp_eval_json)
def get_vector_scene(self, class_id, use_aoi=False):
gt_uri = data_file_path('{}-gt-polygons.geojson'.format(class_id))
pred_uri = data_file_path('{}-pred-polygons.geojson'.format(class_id))
scene_id = str(class_id)
rs = MockRasterSource(channel_order=[0, 1, 3], num_channels=3)
rs.set_raster(np.zeros((10, 10, 3)))
crs_transformer = IdentityCRSTransformer()
extent = Box.make_square(0, 0, 360)
config = RasterizedSourceConfig(
vector_source=GeoJSONVectorSourceConfig(uri=gt_uri, default_class_id=0),
rasterizer_config=RasterizerConfig(
background_class_id=1))
gt_rs = config.build(self.class_config, crs_transformer, extent)
gt_ls = SemanticSegmentationLabelSource(raster_source=gt_rs)
config = RasterizedSourceConfig(
vector_source=GeoJSONVectorSourceConfig(uri=pred_uri, default_class_id=0),
rasterizer_config=RasterizerConfig(
background_class_id=1))
pred_rs = config.build(self.class_config, crs_transformer, extent)
pred_ls = SemanticSegmentationLabelSource(raster_source=pred_rs)
pred_ls.vector_output = [
PolygonVectorOutputConfig(
uri=pred_uri,
denoise=0,
class_id=class_id)
]
if use_aoi:
aoi_uri = data_file_path('{}-aoi.geojson'.format(class_id))
aoi_geojson = file_to_json(aoi_uri)
aoi_polygons = [shape(aoi_geojson['features'][0]['geometry'])]
return Scene(scene_id, rs, gt_ls, pred_ls, aoi_polygons)
return Scene(scene_id, rs, gt_ls, pred_ls)
def test_vector_evaluator(self):
output_uri = join(self.tmp_dir.name, 'raster-out.json')
vector_output_uri = join(self.tmp_dir.name, 'vector-out.json')
scenes = [self.get_vector_scene(0), self.get_vector_scene(1)]
evaluator = SemanticSegmentationEvaluator(
self.class_config, output_uri, vector_output_uri)
evaluator.process(scenes, self.tmp_dir.name)
vector_eval_json = file_to_json(vector_output_uri)
exp_vector_eval_json = file_to_json(data_file_path('expected-vector-eval.json'))
# NOTE: The precision and recall values found in the file
# `expected-vector-eval.json` are equal to fractions of the
# form (n-1)/n for n <= 7 which can be seen to be (and have
# been manually verified to be) correct.
self.assertDictEqual(vector_eval_json, exp_vector_eval_json)
def test_vector_evaluator_with_aoi(self):
output_uri = join(self.tmp_dir.name, 'raster-out.json')
vector_output_uri = join(self.tmp_dir.name, 'vector-out.json')
scenes = [self.get_vector_scene(0, use_aoi=True)]
evaluator = SemanticSegmentationEvaluator(
self.class_config, output_uri, vector_output_uri)
evaluator.process(scenes, self.tmp_dir.name)
vector_eval_json = file_to_json(vector_output_uri)
exp_vector_eval_json = file_to_json(
data_file_path('expected-vector-eval-with-aoi.json'))
# NOTE: The precision and recall values found in the file
# `expected-vector-eval.json` are equal to fractions of the
# form (n-1)/n for n <= 7 which can be seen to be (and have
# been manually verified to be) correct.
self.assertDictEqual(vector_eval_json, exp_vector_eval_json)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.full",
"rastervision2.core.data.Scene",
"rastervision2.core.Box.make_square",
"rastervision2.core.data.ClassConfig",
"rastervision2.pipeline.file_system.file_to_json",
"numpy.zeros",
"tests_v2.data_file_path",
"rastervision2.core.data.IdentityCRSTransformer",
"rastervision2... | [((5815, 5830), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5828, 5830), False, 'import unittest\n'), ((779, 802), 'rastervision2.pipeline.rv_config.get_tmp_dir', 'rv_config.get_tmp_dir', ([], {}), '()\n', (800, 802), False, 'from rastervision2.pipeline import rv_config\n'), ((832, 865), 'rastervision2.core.data.ClassConfig', 'ClassConfig', ([], {'names': "['one', 'two']"}), "(names=['one', 'two'])\n", (843, 865), False, 'from rastervision2.core.data import ClassConfig\n'), ((1207, 1264), 'tests_v2.core.data.mock_raster_source.MockRasterSource', 'MockRasterSource', ([], {'channel_order': '[0, 1, 2]', 'num_channels': '(3)'}), '(channel_order=[0, 1, 2], num_channels=3)\n', (1223, 1264), False, 'from tests_v2.core.data.mock_raster_source import MockRasterSource\n'), ((1327, 1378), 'tests_v2.core.data.mock_raster_source.MockRasterSource', 'MockRasterSource', ([], {'channel_order': '[0]', 'num_channels': '(1)'}), '(channel_order=[0], num_channels=1)\n', (1343, 1378), False, 'from tests_v2.core.data.mock_raster_source import MockRasterSource\n'), ((1396, 1426), 'numpy.full', 'np.full', (['(10, 10, 1)', 'class_id'], {}), '((10, 10, 1), class_id)\n', (1403, 1426), True, 'import numpy as np\n'), ((1476, 1528), 'rastervision2.core.data.SemanticSegmentationLabelSource', 'SemanticSegmentationLabelSource', ([], {'raster_source': 'gt_rs'}), '(raster_source=gt_rs)\n', (1507, 1528), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((1548, 1599), 'tests_v2.core.data.mock_raster_source.MockRasterSource', 'MockRasterSource', ([], {'channel_order': '[0]', 'num_channels': '(1)'}), '(channel_order=[0], num_channels=1)\n', (1564, 1599), False, 'from tests_v2.core.data.mock_raster_source import MockRasterSource\n'), ((1619, 1640), 'numpy.zeros', 'np.zeros', (['(10, 10, 1)'], {}), '((10, 10, 1))\n', (1627, 1640), True, 'import numpy as np\n'), ((1729, 1783), 'rastervision2.core.data.SemanticSegmentationLabelSource', 'SemanticSegmentationLabelSource', ([], {'raster_source': 'pred_rs'}), '(raster_source=pred_rs)\n', (1760, 1783), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((1800, 1835), 'rastervision2.core.data.Scene', 'Scene', (['scene_id', 'rs', 'gt_ls', 'pred_ls'], {}), '(scene_id, rs, gt_ls, pred_ls)\n', (1805, 1835), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((1888, 1923), 'os.path.join', 'join', (['self.tmp_dir.name', '"""out.json"""'], {}), "(self.tmp_dir.name, 'out.json')\n", (1892, 1923), False, 'from os.path import join\n'), ((2000, 2066), 'rastervision2.core.evaluation.SemanticSegmentationEvaluator', 'SemanticSegmentationEvaluator', (['self.class_config', 'output_uri', 'None'], {}), '(self.class_config, output_uri, None)\n', (2029, 2066), False, 'from rastervision2.core.evaluation import SemanticSegmentationEvaluator\n'), ((2140, 2164), 'rastervision2.pipeline.file_system.file_to_json', 'file_to_json', (['output_uri'], {}), '(output_uri)\n', (2152, 2164), False, 'from rastervision2.pipeline.file_system import file_to_json\n'), ((2554, 2611), 'tests_v2.core.data.mock_raster_source.MockRasterSource', 'MockRasterSource', ([], {'channel_order': '[0, 1, 3]', 'num_channels': '(3)'}), '(channel_order=[0, 1, 3], num_channels=3)\n', (2570, 2611), False, 'from tests_v2.core.data.mock_raster_source import MockRasterSource\n'), ((2684, 2708), 'rastervision2.core.data.IdentityCRSTransformer', 'IdentityCRSTransformer', ([], {}), '()\n', (2706, 2708), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((2726, 2752), 'rastervision2.core.Box.make_square', 'Box.make_square', (['(0)', '(0)', '(360)'], {}), '(0, 0, 360)\n', (2741, 2752), False, 'from rastervision2.core import Box\n'), ((3057, 3109), 'rastervision2.core.data.SemanticSegmentationLabelSource', 'SemanticSegmentationLabelSource', ([], {'raster_source': 'gt_rs'}), '(raster_source=gt_rs)\n', (3088, 3109), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((3420, 3474), 'rastervision2.core.data.SemanticSegmentationLabelSource', 'SemanticSegmentationLabelSource', ([], {'raster_source': 'pred_rs'}), '(raster_source=pred_rs)\n', (3451, 3474), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((3951, 3986), 'rastervision2.core.data.Scene', 'Scene', (['scene_id', 'rs', 'gt_ls', 'pred_ls'], {}), '(scene_id, rs, gt_ls, pred_ls)\n', (3956, 3986), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((4046, 4088), 'os.path.join', 'join', (['self.tmp_dir.name', '"""raster-out.json"""'], {}), "(self.tmp_dir.name, 'raster-out.json')\n", (4050, 4088), False, 'from os.path import join\n'), ((4117, 4159), 'os.path.join', 'join', (['self.tmp_dir.name', '"""vector-out.json"""'], {}), "(self.tmp_dir.name, 'vector-out.json')\n", (4121, 4159), False, 'from os.path import join\n'), ((4250, 4329), 'rastervision2.core.evaluation.SemanticSegmentationEvaluator', 'SemanticSegmentationEvaluator', (['self.class_config', 'output_uri', 'vector_output_uri'], {}), '(self.class_config, output_uri, vector_output_uri)\n', (4279, 4329), False, 'from rastervision2.core.evaluation import SemanticSegmentationEvaluator\n'), ((4423, 4454), 'rastervision2.pipeline.file_system.file_to_json', 'file_to_json', (['vector_output_uri'], {}), '(vector_output_uri)\n', (4435, 4454), False, 'from rastervision2.pipeline.file_system import file_to_json\n'), ((4943, 4985), 'os.path.join', 'join', (['self.tmp_dir.name', '"""raster-out.json"""'], {}), "(self.tmp_dir.name, 'raster-out.json')\n", (4947, 4985), False, 'from os.path import join\n'), ((5014, 5056), 'os.path.join', 'join', (['self.tmp_dir.name', '"""vector-out.json"""'], {}), "(self.tmp_dir.name, 'vector-out.json')\n", (5018, 5056), False, 'from os.path import join\n'), ((5135, 5214), 'rastervision2.core.evaluation.SemanticSegmentationEvaluator', 'SemanticSegmentationEvaluator', (['self.class_config', 'output_uri', 'vector_output_uri'], {}), '(self.class_config, output_uri, vector_output_uri)\n', (5164, 5214), False, 'from rastervision2.core.evaluation import SemanticSegmentationEvaluator\n'), ((5308, 5339), 'rastervision2.pipeline.file_system.file_to_json', 'file_to_json', (['vector_output_uri'], {}), '(vector_output_uri)\n', (5320, 5339), False, 'from rastervision2.pipeline.file_system import file_to_json\n'), ((1287, 1308), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {}), '((10, 10, 3))\n', (1295, 1308), True, 'import numpy as np\n'), ((2202, 2238), 'tests_v2.data_file_path', 'data_file_path', (['"""expected-eval.json"""'], {}), "('expected-eval.json')\n", (2216, 2238), False, 'from tests_v2 import data_file_path\n'), ((2634, 2655), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {}), '((10, 10, 3))\n', (2642, 2655), True, 'import numpy as np\n'), ((3521, 3590), 'rastervision2.core.data.PolygonVectorOutputConfig', 'PolygonVectorOutputConfig', ([], {'uri': 'pred_uri', 'denoise': '(0)', 'class_id': 'class_id'}), '(uri=pred_uri, denoise=0, class_id=class_id)\n', (3546, 3590), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((3769, 3790), 'rastervision2.pipeline.file_system.file_to_json', 'file_to_json', (['aoi_uri'], {}), '(aoi_uri)\n', (3781, 3790), False, 'from rastervision2.pipeline.file_system import file_to_json\n'), ((3885, 3934), 'rastervision2.core.data.Scene', 'Scene', (['scene_id', 'rs', 'gt_ls', 'pred_ls', 'aoi_polygons'], {}), '(scene_id, rs, gt_ls, pred_ls, aoi_polygons)\n', (3890, 3934), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((4499, 4542), 'tests_v2.data_file_path', 'data_file_path', (['"""expected-vector-eval.json"""'], {}), "('expected-vector-eval.json')\n", (4513, 4542), False, 'from tests_v2 import data_file_path\n'), ((5397, 5449), 'tests_v2.data_file_path', 'data_file_path', (['"""expected-vector-eval-with-aoi.json"""'], {}), "('expected-vector-eval-with-aoi.json')\n", (5411, 5449), False, 'from tests_v2 import data_file_path\n'), ((2821, 2878), 'rastervision2.core.data.GeoJSONVectorSourceConfig', 'GeoJSONVectorSourceConfig', ([], {'uri': 'gt_uri', 'default_class_id': '(0)'}), '(uri=gt_uri, default_class_id=0)\n', (2846, 2878), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((2910, 2949), 'rastervision2.core.data.RasterizerConfig', 'RasterizerConfig', ([], {'background_class_id': '(1)'}), '(background_class_id=1)\n', (2926, 2949), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((3178, 3237), 'rastervision2.core.data.GeoJSONVectorSourceConfig', 'GeoJSONVectorSourceConfig', ([], {'uri': 'pred_uri', 'default_class_id': '(0)'}), '(uri=pred_uri, default_class_id=0)\n', (3203, 3237), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((3269, 3308), 'rastervision2.core.data.RasterizerConfig', 'RasterizerConfig', ([], {'background_class_id': '(1)'}), '(background_class_id=1)\n', (3285, 3308), False, 'from rastervision2.core.data import Scene, IdentityCRSTransformer, SemanticSegmentationLabelSource, RasterizedSourceConfig, RasterizerConfig, GeoJSONVectorSourceConfig, PolygonVectorOutputConfig\n'), ((3819, 3864), 'shapely.geometry.shape', 'shape', (["aoi_geojson['features'][0]['geometry']"], {}), "(aoi_geojson['features'][0]['geometry'])\n", (3824, 3864), False, 'from shapely.geometry import shape\n')] |
'''
Copyright (c) 2020, 2021 <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License.
@author: <NAME>, <NAME>
@contact: <EMAIL>, <EMAIL>
'''
import sys,argparse
import os,glob
import numpy as np
import pandas as pd
from scipy import stats
import re,bisect
import random,time
import cooler
import string
def get_local_interaction_from_matrix(matrixfile,tmpdir,chrom_len,genomic_distance):
# for each chr, extract data from .hic file
coolfile = cooler.Cooler(matrixfile)
# check the resolution
bin_start = coolfile.bins()[:1].start
bin_end = coolfile.bins()[:1].end
resolution = int(bin_end - bin_start)
check_bins = int(genomic_distance/resolution)
# check genome assembly
try:
assert max(coolfile.chroms()["length"][:]) == chrom_len['chr1']
except AssertionError:
print('\n****\nERROR! the length of chr1 ({}) does not equal to {}.'.format(max(coolfile.chroms()["length"][:]),chrom_len['chr1']))
print('Please make sure you are using the accurate genome assembly: hg38/mm10.\n****\n')
raise
# ==== read start and end pos for each chrom
chroms = coolfile.chroms()["name"][:]
# chroms = [i for i in chroms if 'chr'+i.lstrip('chr') in chrom_len.keys()]
chr_local_interaction = {}
chr_ord_id = {}
for chr in chroms:
bins_chr = coolfile.bins().fetch(chr)
chr_start_ID = bins_chr.index[0]
chr_start_pos = bins_chr.start[chr_start_ID]
chr_end_ID = bins_chr.index[-1]
chr_end_pos = bins_chr.start[chr_end_ID]
# initiate the local interaction dict
chr_ord_id[chr] = [chr_start_ID,chr_start_pos,chr_end_ID,chr_end_pos]
chr_local_interaction[chr] = {}
for bin in np.arange(chr_start_ID,chr_end_ID+1):
chr_local_interaction[chr][int(bin)] = [0]*check_bins
# == Split the contact matrix pixel records into equally sized chunks to save memory
pixels_len = coolfile.pixels().shape[0]
chunksize=2e7
total_chunks = int(np.ceil(pixels_len/chunksize))
for chunk in np.arange(total_chunks):
chunk_start = int(chunk*chunksize)
chunk_end = int(min(chunk_start+chunksize,pixels_len))#;print(chunk_start,chunk_end)
pixels_chunk = coolfile.pixels()[chunk_start:chunk_end]
# ==== keep only check_bins-limited diagonal pixels
bin_shift = pixels_chunk['bin2_id']-pixels_chunk['bin1_id']
filtered_pixels = pixels_chunk[(bin_shift>=0)&(bin_shift<check_bins)]
print('== processing {}/{} '.format(chunk+1,total_chunks))
for chr in chroms:
chr_start_ID = chr_ord_id[chr][0]
chr_end_ID = chr_ord_id[chr][2]
pixels_chr = filtered_pixels[(filtered_pixels['bin1_id']>=chr_start_ID)&(filtered_pixels['bin1_id']<=chr_end_ID)]
for pixel_id in pixels_chr.index:
leftID = pixels_chr.loc[pixel_id,'bin1_id']
rightID = pixels_chr.loc[pixel_id,'bin2_id']
score = pixels_chr.loc[pixel_id,'count']
chr_local_interaction[chr][leftID][rightID-leftID] = score
# ==== write out the local interaction, for double check
# flag= os.path.basename(matrixfile).split('.cool')[0]
# flag = '{}_{}'.format(flag,''.join(random.choice(string.ascii_letters + string.digits) for _ in range(6)))
# for chr in chroms:
# if 'chr'+chr.lstrip('chr') in chrom_len.keys():
# chr_start_id = chr_ord_id[chr][0]
# chr_start_pos = chr_ord_id[chr][1]
# outfile = tmpdir+os.sep+'tmp_{}_chr{}_res_{}_view_region_{}.csv'.format(flag,chr.lstrip('chr'),resolution,genomic_distance)
# outf = open(outfile,'w')
# outf.write('{}\t{}\n'.format('dis','\t'.join(map(str,np.arange(0,genomic_distance,resolution)))))
# for view_id in chr_local_interaction[chr].keys():
# view_pos = chr_start_pos+(view_id-chr_start_id)*resolution
# outf.write('{}\t{}\n'.format(view_pos,'\t'.join(map(str,chr_local_interaction[chr][view_id]))));
# outf.close()
# save the local interaction
interaction_dfs = {} # interaction dataframe
for chr in chroms:
if 'chr'+chr.lstrip('chr') in chrom_len.keys():
chr_start_id = chr_ord_id[chr][0]
chr_start_pos = chr_ord_id[chr][1]
chr_key='chr{}'.format(chr.lstrip('chr'))
interaction_dfs[chr_key] = pd.DataFrame.from_dict(chr_local_interaction[chr],orient='index')
columns = np.arange(0,genomic_distance,resolution)
index = [chr_start_pos+(view_id-chr_start_id)*resolution for view_id in interaction_dfs[chr_key].index]
interaction_dfs[chr_key].columns = columns
interaction_dfs[chr_key].index = index
return interaction_dfs,resolution
def main(args):
args.infile='a6010_r5000.cool'
args.infile = 'pd9_r50000.cool'
os.makedirs(args.outdir,exist_ok=True)
chrom_len = GenomeData.species_chrom_lengths[args.species]
matrixfile = args.infile
tmpdir = args.outdir
genomic_distance = args.genomicDistance
resolution = 50000
# for input ord/matrix file, get the local interaction for each chrom
flag,resolution = get_local_interaction_from_matrix(matrixfile,tmpdir,chrom_len,genomic_distance)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--infile', action = 'store', type = str,dest = 'infile', help = 'input matrix file', metavar = '<dir>')
parser.add_argument('-o','--outdir', action = 'store', type = str,dest = 'outdir', help = 'outdir of ,default: current dir', metavar = '<dir>',default='./')
parser.add_argument('-s','--species', action = 'store', type = str,dest = 'species', help = 'species used to choose correct chromosome, e.g., hg38 or mm10', metavar = '<str>',required=True)
parser.add_argument('-v', '--genomicDistance', action = 'store', type = int,dest = 'genomicDistance', help = 'genomic distance for local interaction', metavar = '<int>',default=200000)
args = parser.parse_args()
if(len(sys.argv))<0:
parser.print_help()
sys.exit(1)
main(args)
| [
"numpy.ceil",
"os.makedirs",
"argparse.ArgumentParser",
"pandas.DataFrame.from_dict",
"cooler.Cooler",
"numpy.arange",
"sys.exit"
] | [((546, 571), 'cooler.Cooler', 'cooler.Cooler', (['matrixfile'], {}), '(matrixfile)\n', (559, 571), False, 'import cooler\n'), ((2163, 2186), 'numpy.arange', 'np.arange', (['total_chunks'], {}), '(total_chunks)\n', (2172, 2186), True, 'import numpy as np\n'), ((5073, 5112), 'os.makedirs', 'os.makedirs', (['args.outdir'], {'exist_ok': '(True)'}), '(args.outdir, exist_ok=True)\n', (5084, 5112), False, 'import os, glob\n'), ((5530, 5555), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5553, 5555), False, 'import sys, argparse\n'), ((1826, 1865), 'numpy.arange', 'np.arange', (['chr_start_ID', '(chr_end_ID + 1)'], {}), '(chr_start_ID, chr_end_ID + 1)\n', (1835, 1865), True, 'import numpy as np\n'), ((2115, 2146), 'numpy.ceil', 'np.ceil', (['(pixels_len / chunksize)'], {}), '(pixels_len / chunksize)\n', (2122, 2146), True, 'import numpy as np\n'), ((6333, 6344), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6341, 6344), False, 'import sys, argparse\n'), ((4564, 4630), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['chr_local_interaction[chr]'], {'orient': '"""index"""'}), "(chr_local_interaction[chr], orient='index')\n", (4586, 4630), True, 'import pandas as pd\n'), ((4652, 4694), 'numpy.arange', 'np.arange', (['(0)', 'genomic_distance', 'resolution'], {}), '(0, genomic_distance, resolution)\n', (4661, 4694), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
c = 1E-10 #m
V_0 = 8E-15 #J
m = 9.109E-31
hbar = 1.0546E-34 #J s
z_0 = (c/hbar)*np.sqrt(2*m*V_0)
max_iter = 1000
def find_pairs(f, step, a, b):
x = a
pairs = []
while (x + step < b):
if (f(x+step)/f(x) < 0):
pairs.append([x, x+step])
x += step
return pairs
def bisection(f, pairs, tolerance, max_iter):
zeros = []
for pair in pairs:
mid = (pair[1]-pair[0])/2 + pair[0]
iter = 1
while (abs(f(mid)) > tolerance and iter < max_iter):
if (f(mid)/f(pair[0]) <0): pair[1] = mid
else: pair[0] = mid
mid = (pair[1]-pair[0])/2 + pair[0]
iter += 1
if (iter < 1000):
zeros.append(mid)
return zeros
def symmetric(x):
if (x ==0):
return 1
else:
return np.sqrt((z_0/x)**2-1) - np.tan(x)
def antisymmetric(x):
if (x == 0):
return 1
else:
return np.sqrt((z_0/x)**2-1) + np.cos(x)/np.sin(x)
x = np.linspace(0,15,1000)
pairs = find_pairs(symmetric, 0.1, 0, 15)
#print(pairs)
zeros = bisection(symmetric, pairs, 1E-10, 1000)
#print(zeros)
Energies = []
for z in zeros:
Energies.append(hbar**2/(2*m*c**2)*z**2 - V_0)
print(Energies)
pairs = find_pairs(antisymmetric, 0.1, 0, 15)
#print(pairs)
zeros = bisection(antisymmetric, pairs, 1E-10, 1000)
#print(zeros)
for z in zeros:
Energies.append(hbar**2/(2*m*c**2)*z**2 - V_0)
print(Energies)
def Infinite(n):
return n**2*np.pi**2*hbar**2/(8*m*c**2) - V_0
print("infinite", Infinite(1), Infinite(2), Infinite(3), Infinite(4), Infinite(5), Infinite(6), Infinite(7))
x = np.linspace(-15,15,5)
plt.plot(x, Energies)
plt.plot(x, (Infinite(1), Infinite(2), Infinite(3), Infinite(4), Infinite(5)))
plt.plot(x, x)
plt.ylim(-.0000000000000083, -.0000000000000076)
plt.savefig("infiniteandfinite.pdf")
#search bt 0 and z0 after changing V0!!
#change graph limits | [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.tan",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((1031, 1055), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(1000)'], {}), '(0, 15, 1000)\n', (1042, 1055), True, 'import numpy as np\n'), ((1666, 1689), 'numpy.linspace', 'np.linspace', (['(-15)', '(15)', '(5)'], {}), '(-15, 15, 5)\n', (1677, 1689), True, 'import numpy as np\n'), ((1688, 1709), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Energies'], {}), '(x, Energies)\n', (1696, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1803), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {}), '(x, x)\n', (1797, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1832), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-8.3e-15)', '(-7.6e-15)'], {}), '(-8.3e-15, -7.6e-15)\n', (1812, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1889), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""infiniteandfinite.pdf"""'], {}), "('infiniteandfinite.pdf')\n", (1864, 1889), True, 'import matplotlib.pyplot as plt\n'), ((132, 152), 'numpy.sqrt', 'np.sqrt', (['(2 * m * V_0)'], {}), '(2 * m * V_0)\n', (139, 152), True, 'import numpy as np\n'), ((866, 893), 'numpy.sqrt', 'np.sqrt', (['((z_0 / x) ** 2 - 1)'], {}), '((z_0 / x) ** 2 - 1)\n', (873, 893), True, 'import numpy as np\n'), ((890, 899), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (896, 899), True, 'import numpy as np\n'), ((982, 1009), 'numpy.sqrt', 'np.sqrt', (['((z_0 / x) ** 2 - 1)'], {}), '((z_0 / x) ** 2 - 1)\n', (989, 1009), True, 'import numpy as np\n'), ((1006, 1015), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1012, 1015), True, 'import numpy as np\n'), ((1016, 1025), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1022, 1025), True, 'import numpy as np\n')] |
import numpy as np
def mean_tau_w(tau_a, w_a):
return np.average( tau_a / w_a, weights=w_a)
def var_tau_w(tau_a, w_a, av):
return np.sum( w_a*(tau_a/w_a - av)**2) / (np.sum(w_a) )
def check_moments_w(tau_a, w_a):
_av = mean_tau_w(tau_a, w_a)
_var = var_tau_w(tau_a, w_a, _av)
return _av, _var
def loop_check_moments_w(dt_l, dw_dict, exclude_empty_waits=True):
var_l = []
av_l = []
x_l = []
for dt in sorted(dt_l):
dw_df = dw_dict[dt]
if np.any(dw_df.wait_T.values > 0):
if exclude_empty_waits:
m = dw_df.weight > 0
_dw = dw_df[m]
else:
_dw = dw_df
_exp = check_moments_w(_dw.wait_T,
_dw.weight)
var_l.append(_exp[1])
av_l.append(_exp[0])
x_l.append(dt)
return np.column_stack((x_l, av_l, var_l))
| [
"numpy.any",
"numpy.average",
"numpy.sum",
"numpy.column_stack"
] | [((59, 95), 'numpy.average', 'np.average', (['(tau_a / w_a)'], {'weights': 'w_a'}), '(tau_a / w_a, weights=w_a)\n', (69, 95), True, 'import numpy as np\n'), ((888, 923), 'numpy.column_stack', 'np.column_stack', (['(x_l, av_l, var_l)'], {}), '((x_l, av_l, var_l))\n', (903, 923), True, 'import numpy as np\n'), ((141, 178), 'numpy.sum', 'np.sum', (['(w_a * (tau_a / w_a - av) ** 2)'], {}), '(w_a * (tau_a / w_a - av) ** 2)\n', (147, 178), True, 'import numpy as np\n'), ((177, 188), 'numpy.sum', 'np.sum', (['w_a'], {}), '(w_a)\n', (183, 188), True, 'import numpy as np\n'), ((496, 527), 'numpy.any', 'np.any', (['(dw_df.wait_T.values > 0)'], {}), '(dw_df.wait_T.values > 0)\n', (502, 527), True, 'import numpy as np\n')] |
# coding: utf-8
# # Neurodesign comparison of design generators
#
# In this notebook, we will compare 3 methods to generate an experimental design:
# - a design optimised using the genetic algorithm
# - a design optimised using simulations
# - a randomly drawn design
#
# We will do so using simulations: what is the resulting observed power when we simulate experiments according to the three designs.
# In[1]:
from neurodesign import optimisation,experiment
import matplotlib.pyplot as plt
from scipy.stats import t
import seaborn as sns
import pandas as pd
import numpy as np
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u'load_ext rpy2.ipython')
cycles = 1000
sims = 10000
# ## Optimise designs
# First we define the experiment. We will optimise an experiment with a TR of 2 seconds and 250 trials of 0.5 seconds each. There are 4 stimulus types, and we are interested in the shared effect of the first and second stimulus versus baseline, as well as the difference between the first and the fourth stimulus. We assume an autoregressive temporal autocorrelation of 0.3.
#
# We sample ITI's from a truncated exponential distribution with minimum 0.3 seconds and maximum 4 seconds, and the mean is 1 second.
# In[2]:
# define the experiment
EXP = experiment(
TR=2,
n_trials=450,
P = [0.25,0.25,0.25],
C = [[1,0,0],[0,1,0],[0,0,1],[1,0,-1]],
n_stimuli = 3,
rho = 0.3,
resolution=0.1,
stim_duration=1,
ITImodel = "exponential",
ITImin = 0.3,
ITImean = 1,
ITImax=4
)
# In[3]:
POP_Max = optimisation(
experiment=EXP,
weights=[0,0.5,0.25,0.25],
preruncycles = cycles,
cycles = 2,
optimisation='GA'
)
POP_Max.optimise()
# In[4]:
EXP.FeMax = POP_Max.exp.FeMax
EXP.FdMax = POP_Max.exp.FdMax
# Below we define two populations of designs. We will optimise one using the genetic algorithm, and the other using randomly drawn designs.
#
# We optimise for statistical power (weights = [0,1,0,0]). We run 100 cycles.
# In[5]:
POP_GA = optimisation(
experiment=EXP,
weights=[0,0.5,0.25,0.25],
preruncycles = 2,
cycles = cycles,
seed=1,
outdes=5,
I=10,
folder='/tmp/',
optimisation='GA'
)
POP_RN = optimisation(
experiment=EXP,
weights=[0,0.5,0.25,0.25],
preruncycles = 2,
cycles = cycles,
seed=100,
outdes=5,
I=50,
G=10,
folder='/tmp/',
optimisation='simulation'
)
# In[6]:
POP_GA.optimise()
# In[7]:
POP_RN.optimise()
# Below, we show how the efficiency scores improve over cycles for both algorithms, although the Genetic Algorithm clearly improves faster and reaches a higher plateau.
# In[8]:
plt.plot(POP_GA.optima,label='Genetic Algorithm')
plt.plot(POP_RN.optima,label='Simulation')
plt.legend()
plt.savefig("output/test_scores.pdf")
# Below, we repeat the random design generator, but we search only 100 designs and one generation. As such, this is a random design.
# In[9]:
# 1 gen
POP_JO = optimisation(
experiment=EXP,
weights=[0,0.5,0.25,0.25],
preruncycles = 1,
cycles = 1,
seed=1,
outdes=5,
G=100,
folder='/tmp/',
optimisation='simulation'
)
POP_JO.optimise()
# In[10]:
#collect scores and take average
scores = [x.F for x in POP_JO.designs]
median_idx = np.where(scores == np.median(scores))[0][0]
rnd_median = POP_JO.designs[median_idx]
# get PI
BTI_l = np.percentile(scores,5)
BTI_u = np.percentile(scores,95)
# In[11]:
print("Optimisation score - random: %s \nOptimisation score - genetic algorithm: %s \nOptimisation score - simulation (90 percent PI): %s-%s"%(POP_RN.optima[::-1][0],
POP_GA.optima[::-1][0],BTI_l,BTI_u))
# Let's look at the resulting experimental designs.
# In[12]:
des = np.array([POP_GA.bestdesign.Xconv,POP_RN.bestdesign.Xconv,rnd_median.Xconv])
labels = ['Genetic Algorithm','Simulation','Median random design']
plt.figure(figsize=(10,7))
for ind,label in enumerate(labels):
plt.subplot(3,1,ind+1)
plt.plot(des[ind,:,:])
plt.title(label)
plt.tick_params(axis = 'x',which = 'both', bottom = 'off', labelbottom='off')
plt.savefig("output/designs.pdf")
# In[13]:
des = np.array([POP_GA.bestdesign.Xconv,POP_RN.bestdesign.Xconv]+[x.Xconv for x in POP_JO.designs])
# ## Simulate data
#
# We continue with the best designs from the two algorithms and the random design. Below, we simulate data in one voxel that is significantly related to the task. We assume beta values of (0.5, 0, -0.5).
# In[ ]:
# create datatables
tp = des.shape[1]
Y = np.zeros([tp,sims,des.shape[0]])
for i in range(sims):
rnd = np.random.normal(0,1,tp)
for lb in range(Y.shape[2]):
Y[:,i,lb] = np.dot(des[lb,:,:],np.array([0.5,0,-0.5]))+rnd
# We analyse the data using `R` below.
# In[ ]:
get_ipython().run_cell_magic(u'R', u'-i des,Y,sims -o tvals_main,tvals_diff', u'tvals_main <- array(NA,dim=c(sims,dim(Y)[3]))\ntvals_diff <- array(NA,dim=c(sims,dim(Y)[3]))\nfor (method in 1:dim(Y)[3]){\n for (sim in 1:sims){\n dif <- des[method,,1]-des[method,,2]\n fit <- lm(Y[,sim,method]~des[method,,])\n tvals_main[sim,method] <- summary(fit)$coef[2,3]\n fit <- lm(Y[,sim,method]~dif)\n tvals_diff[sim,method] <- summary(fit)$coef[2,3]\n }\n}')
# This is what the distributions for the two contrasts look like.
# In[ ]:
nms = ['Main effect','Contrast effect']
plt.figure(figsize=(18,4))
dists = [0,1,median_idx]
for idx,tv in enumerate([tvals_main,tvals_diff]):
plt.subplot(1,2,idx+1)
for idy,method in enumerate(labels):
sns.distplot(tv[:,dists[idy]],label=method)
plt.title(nms[idx])
plt.legend()
plt.savefig("output/distributions.pdf")
# ## Observed power
# In[ ]:
# We're assuming a single threshold on a single test, a representative simplification.
threshold = t.ppf(0.95,des.shape[1]-2)
nms = ['main effect','contrast effect']
out = {label:[] for label in labels}
for idx,tv in enumerate([tvals_main,tvals_diff]):
for idy,method in enumerate(labels):
if idy < 2:
power = np.mean(tv[:,idy]>threshold)
out[method].append(power)
print("The power for the %s with %s: %f"%(nms[idx],method,power))
else:
powers = [np.mean(tv[:,k]>threshold) for k in range(2,tv.shape[1])]
out[method].append(powers)
print("The 90 percent PI for the %s with a randomly drawn design: %f-%f"%(nms[idx],
np.percentile(powers,5),np.percentile(powers,95)))
| [
"neurodesign.optimisation",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.median",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.percentile",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"seaborn.dist... | [((1283, 1505), 'neurodesign.experiment', 'experiment', ([], {'TR': '(2)', 'n_trials': '(450)', 'P': '[0.25, 0.25, 0.25]', 'C': '[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, -1]]', 'n_stimuli': '(3)', 'rho': '(0.3)', 'resolution': '(0.1)', 'stim_duration': '(1)', 'ITImodel': '"""exponential"""', 'ITImin': '(0.3)', 'ITImean': '(1)', 'ITImax': '(4)'}), "(TR=2, n_trials=450, P=[0.25, 0.25, 0.25], C=[[1, 0, 0], [0, 1, 0\n ], [0, 0, 1], [1, 0, -1]], n_stimuli=3, rho=0.3, resolution=0.1,\n stim_duration=1, ITImodel='exponential', ITImin=0.3, ITImean=1, ITImax=4)\n", (1293, 1505), False, 'from neurodesign import optimisation, experiment\n'), ((1574, 1687), 'neurodesign.optimisation', 'optimisation', ([], {'experiment': 'EXP', 'weights': '[0, 0.5, 0.25, 0.25]', 'preruncycles': 'cycles', 'cycles': '(2)', 'optimisation': '"""GA"""'}), "(experiment=EXP, weights=[0, 0.5, 0.25, 0.25], preruncycles=\n cycles, cycles=2, optimisation='GA')\n", (1586, 1687), False, 'from neurodesign import optimisation, experiment\n'), ((2049, 2201), 'neurodesign.optimisation', 'optimisation', ([], {'experiment': 'EXP', 'weights': '[0, 0.5, 0.25, 0.25]', 'preruncycles': '(2)', 'cycles': 'cycles', 'seed': '(1)', 'outdes': '(5)', 'I': '(10)', 'folder': '"""/tmp/"""', 'optimisation': '"""GA"""'}), "(experiment=EXP, weights=[0, 0.5, 0.25, 0.25], preruncycles=2,\n cycles=cycles, seed=1, outdes=5, I=10, folder='/tmp/', optimisation='GA')\n", (2061, 2201), False, 'from neurodesign import optimisation, experiment\n'), ((2251, 2423), 'neurodesign.optimisation', 'optimisation', ([], {'experiment': 'EXP', 'weights': '[0, 0.5, 0.25, 0.25]', 'preruncycles': '(2)', 'cycles': 'cycles', 'seed': '(100)', 'outdes': '(5)', 'I': '(50)', 'G': '(10)', 'folder': '"""/tmp/"""', 'optimisation': '"""simulation"""'}), "(experiment=EXP, weights=[0, 0.5, 0.25, 0.25], preruncycles=2,\n cycles=cycles, seed=100, outdes=5, I=50, G=10, folder='/tmp/',\n optimisation='simulation')\n", (2263, 2423), False, 'from neurodesign import optimisation, experiment\n'), ((2705, 2755), 'matplotlib.pyplot.plot', 'plt.plot', (['POP_GA.optima'], {'label': '"""Genetic Algorithm"""'}), "(POP_GA.optima, label='Genetic Algorithm')\n", (2713, 2755), True, 'import matplotlib.pyplot as plt\n'), ((2755, 2798), 'matplotlib.pyplot.plot', 'plt.plot', (['POP_RN.optima'], {'label': '"""Simulation"""'}), "(POP_RN.optima, label='Simulation')\n", (2763, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2810), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2808, 2810), True, 'import matplotlib.pyplot as plt\n'), ((2811, 2848), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/test_scores.pdf"""'], {}), "('output/test_scores.pdf')\n", (2822, 2848), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3174), 'neurodesign.optimisation', 'optimisation', ([], {'experiment': 'EXP', 'weights': '[0, 0.5, 0.25, 0.25]', 'preruncycles': '(1)', 'cycles': '(1)', 'seed': '(1)', 'outdes': '(5)', 'G': '(100)', 'folder': '"""/tmp/"""', 'optimisation': '"""simulation"""'}), "(experiment=EXP, weights=[0, 0.5, 0.25, 0.25], preruncycles=1,\n cycles=1, seed=1, outdes=5, G=100, folder='/tmp/', optimisation=\n 'simulation')\n", (3025, 3174), False, 'from neurodesign import optimisation, experiment\n'), ((3428, 3452), 'numpy.percentile', 'np.percentile', (['scores', '(5)'], {}), '(scores, 5)\n', (3441, 3452), True, 'import numpy as np\n'), ((3460, 3485), 'numpy.percentile', 'np.percentile', (['scores', '(95)'], {}), '(scores, 95)\n', (3473, 3485), True, 'import numpy as np\n'), ((3778, 3856), 'numpy.array', 'np.array', (['[POP_GA.bestdesign.Xconv, POP_RN.bestdesign.Xconv, rnd_median.Xconv]'], {}), '([POP_GA.bestdesign.Xconv, POP_RN.bestdesign.Xconv, rnd_median.Xconv])\n', (3786, 3856), True, 'import numpy as np\n'), ((3922, 3949), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (3932, 3949), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4176), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/designs.pdf"""'], {}), "('output/designs.pdf')\n", (4154, 4176), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4296), 'numpy.array', 'np.array', (['([POP_GA.bestdesign.Xconv, POP_RN.bestdesign.Xconv] + [x.Xconv for x in\n POP_JO.designs])'], {}), '([POP_GA.bestdesign.Xconv, POP_RN.bestdesign.Xconv] + [x.Xconv for\n x in POP_JO.designs])\n', (4204, 4296), True, 'import numpy as np\n'), ((4575, 4609), 'numpy.zeros', 'np.zeros', (['[tp, sims, des.shape[0]]'], {}), '([tp, sims, des.shape[0]])\n', (4583, 4609), True, 'import numpy as np\n'), ((5426, 5453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 4)'}), '(figsize=(18, 4))\n', (5436, 5453), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5682, 5684), True, 'import matplotlib.pyplot as plt\n'), ((5685, 5724), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/distributions.pdf"""'], {}), "('output/distributions.pdf')\n", (5696, 5724), True, 'import matplotlib.pyplot as plt\n'), ((5857, 5886), 'scipy.stats.t.ppf', 't.ppf', (['(0.95)', '(des.shape[1] - 2)'], {}), '(0.95, des.shape[1] - 2)\n', (5862, 5886), False, 'from scipy.stats import t\n'), ((3989, 4015), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(ind + 1)'], {}), '(3, 1, ind + 1)\n', (4000, 4015), True, 'import matplotlib.pyplot as plt\n'), ((4016, 4040), 'matplotlib.pyplot.plot', 'plt.plot', (['des[ind, :, :]'], {}), '(des[ind, :, :])\n', (4024, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4043, 4059), 'matplotlib.pyplot.title', 'plt.title', (['label'], {}), '(label)\n', (4052, 4059), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4136), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '"""off"""', 'labelbottom': '"""off"""'}), "(axis='x', which='both', bottom='off', labelbottom='off')\n", (4079, 4136), True, 'import matplotlib.pyplot as plt\n'), ((4641, 4667), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'tp'], {}), '(0, 1, tp)\n', (4657, 4667), True, 'import numpy as np\n'), ((5532, 5558), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(idx + 1)'], {}), '(1, 2, idx + 1)\n', (5543, 5558), True, 'import matplotlib.pyplot as plt\n'), ((5652, 5671), 'matplotlib.pyplot.title', 'plt.title', (['nms[idx]'], {}), '(nms[idx])\n', (5661, 5671), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5649), 'seaborn.distplot', 'sns.distplot', (['tv[:, dists[idy]]'], {'label': 'method'}), '(tv[:, dists[idy]], label=method)\n', (5616, 5649), True, 'import seaborn as sns\n'), ((6092, 6123), 'numpy.mean', 'np.mean', (['(tv[:, idy] > threshold)'], {}), '(tv[:, idy] > threshold)\n', (6099, 6123), True, 'import numpy as np\n'), ((3345, 3362), 'numpy.median', 'np.median', (['scores'], {}), '(scores)\n', (3354, 3362), True, 'import numpy as np\n'), ((4738, 4762), 'numpy.array', 'np.array', (['[0.5, 0, -0.5]'], {}), '([0.5, 0, -0.5])\n', (4746, 4762), True, 'import numpy as np\n'), ((6273, 6302), 'numpy.mean', 'np.mean', (['(tv[:, k] > threshold)'], {}), '(tv[:, k] > threshold)\n', (6280, 6302), True, 'import numpy as np\n'), ((6484, 6508), 'numpy.percentile', 'np.percentile', (['powers', '(5)'], {}), '(powers, 5)\n', (6497, 6508), True, 'import numpy as np\n'), ((6508, 6533), 'numpy.percentile', 'np.percentile', (['powers', '(95)'], {}), '(powers, 95)\n', (6521, 6533), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from common import Config
from disjoint_set import DisjointSet
from itertools import product
import itertools
import numpy as np
import networkx as nx
import GPy
import logging
import random
import scipy
from myAcquisitionLCB import MyAcquisitionLCB
from datasets import ComponentFunction, SyntheticComponentFunction
from functools import partial
from GPy.inference.latent_function_inference import exact_gaussian_inference, expectation_propagation
from GPy.models.gp_regression import likelihoods
from GPy.util.linalg import pdinv, dpotrs, tdot
from GPy.util import diag
from functools import lru_cache
# Alternative computation using sklearn's kernels
import sklearn.gaussian_process
from functools import lru_cache
class KernelWrap(object):
def __init__(self, kernel, data):
self.kernel = kernel
self.active_dims = kernel.active_dims
self.data = data
def __eq__(self, other):
return (self.__hash__() == other.__hash__())
def __hash__(self):
ls_wrap = self.kernel.lengthscale
if type(self.kernel.lengthscale) == np.ndarray:
ls_wrap = tuple(self.kernel.lengthscale)
return hash((id(self.kernel), tuple(self.kernel.active_dims), self.kernel.variance, ls_wrap))
def __call__(self, *args):
return self.kernel(*args)
@lru_cache(maxsize=1024)
def cached_apply_X(kernel):
return kernel(kernel.data.X[:,kernel.active_dims])
#from profilehooks import profile
# Optimizes a single GP kernel function with parameters
class KernelOptimizer(object):
pass
# Specifically for RBF Kernel
class RBFOptimizer(KernelOptimizer):
pass
# Optimizes the entire graph structure
class FunctionOptimizer(object):
pass
class GraphOptimizer(object):
def __init__(self, graphSamplingNumIter, lengthscaleNumIter, cycles, fully_optimize_lengthscales, p, M, max_group_size, sigma2, opt_restart, param_exploration):
self.graphSamplingNumIter = graphSamplingNumIter
self.cycles = cycles
self.fully_optimize_lengthscales = fully_optimize_lengthscales
self.p = p
self.M = M
self.max_group_size = max_group_size
#self.sigma2 = 1e-8
self.sigma2 = sigma2 + 1e-8
assert(self.M!=0)
assert(self.max_group_size!=0)
self.context = Context(self.fully_optimize_lengthscales, self.p, self.M, self.max_group_size, self.sigma2, self.cycles, lengthscaleNumIter, opt_restart, param_exploration)
# TODO some hack
self.my_learnt_graph_iter_count = 0
def optimize(self, X, Y_vect, graph_function):
if self.context.opt_restart==None:
# Normal mode
cached_apply_X.cache_clear()
# gen_candidates=20 ??
h = self.HypothesisType()(graph_function, self.context, Data(X, Y_vect))
#print(h.dimensional_parameters)
#h.evaluate()
h.likelihood = h._compute_dataLogLikelihood(groupSize=-1, alpha=-1)
#h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
h = self._optimize_hypotheses(h)
else:
dimensional_parameters = np.array(graph_function.dimensional_parameters)
hypothesis_candidates = []
# Beast mode, opt restarts
for opt_i in range(self.context.opt_restart):
_perturbed_dim_param = np.random.normal(dimensional_parameters, dimensional_parameters*self.context.param_exploration)
_perturbed_dim_param = np.array(list(map(self.context.kern_respect_bounds, _perturbed_dim_param)))
#print(_perturbed_dim_param)
graph_function.dimensional_parameters = _perturbed_dim_param
# Normal mode
cached_apply_X.cache_clear()
# gen_candidates=20 ??
h = self.HypothesisType()(graph_function, self.context, Data(X, Y_vect))
#print(h.dimensional_parameters)
#h.evaluate()
h.likelihood = h._compute_dataLogLikelihood(groupSize=-1, alpha=-1)
#h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
h = self._optimize_hypotheses(h)
best_graph = h.make_graph()
logging.info("Candidate graph : {} - {}".format(h.likelihood, best_graph.edges()))
hypothesis_candidates.append(h)
# Pick the best
h = max(hypothesis_candidates, key=lambda h: h.likelihood)
#print(h.likelihood)
best_graph = h.make_graph()
best_dim_params = h.dimensional_parameters
nx.draw(best_graph, cmap = plt.get_cmap('jet'), with_labels=True)
plt.savefig(Config().learnt_graphs_file('{:05d}.png'.format(self.my_learnt_graph_iter_count)))
plt.clf()
self.my_learnt_graph_iter_count += 1
logging.info("New graph : {}".format(best_graph.edges()))
# Perform the update
graph_function.graph = best_graph
graph_function.dimensional_parameters = best_dim_params
graph_function.kernels = graph_function._make_kernels(best_dim_params, graph_function.make_fn_decompositions())
# Update only parameters
def optimize_parameters(self, X, Y_vect, graph_function):
if self.context.opt_restart==None:
# Normal mode
h = self.HypothesisType()(graph_function, self.context, Data(X, Y_vect))
#print(h.dimensional_parameters)
#h.evaluate()
h.likelihood = h._compute_dataLogLikelihood(groupSize=-1, alpha=-1)
h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
else:
dimensional_parameters = np.array(graph_function.dimensional_parameters)
hypothesis_candidates = []
# Beast mode, opt restarts
for opt_i in range(self.context.opt_restart):
_perturbed_dim_param = np.random.normal(dimensional_parameters, dimensional_parameters*self.context.param_exploration)
_perturbed_dim_param = np.array(list(map(self.context.kern_respect_bounds, _perturbed_dim_param)))
#print(_perturbed_dim_param)
graph_function.dimensional_parameters = _perturbed_dim_param
# Normal mode
h = self.HypothesisType()(graph_function, self.context, Data(X, Y_vect))
#print(h.dimensional_parameters)
#h.evaluate()
h.likelihood = h._compute_dataLogLikelihood(groupSize=-1, alpha=-1)
h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
#print(h.likelihood)
hypothesis_candidates.append(h)
# Pick the best
h = max(hypothesis_candidates, key=lambda h: h.likelihood)
#print(h.likelihood)
best_dim_params = h.dimensional_parameters
# Perform the update
graph_function.dimensional_parameters = best_dim_params
graph_function.kernels = graph_function._make_kernels(best_dim_params, graph_function.make_fn_decompositions())
def _optimize_hypotheses(self, hypothesis_graph):
raise NotImplementedError
def HypothesisType(self):
raise NotImplementedError
class Context(object):
def __init__(self, fully_optimize_lengthscales, p, M, max_group_size, sigma2, cycles, lengthscaleNumIter, opt_restart, param_exploration):
# Decide if should optimize lengthscale
self.fully_optimize_lengthscales = fully_optimize_lengthscales
# Prior knowledge of getting an edge
self.p = p
# The number of clusters
self.M = M
# The maximum size of each cluster
self.max_group_size = max_group_size
# Stability parameter for data likelihood
self.sigma2 = sigma2
# Assume that there are cycles for prior
self.cycles = cycles
self.param_exploration = param_exploration
self.gen_candidates = 20
# Smallest possible value for ls
self.ls_min_limit = 1e-4
# This is essentially sqrt(0.1), we set the var limit to be under 0.1
self.var_min_limit = 0.31622776601683794
self.var_max_limit = 1e5
# Number of samples taken for the parameters
self.lengthscaleNumIter = lengthscaleNumIter
self.ln_limits = (1e-2, 1e5)
self.var_limits = (0.31622776601683794, 1e5)
self.opt_restart = opt_restart
def kern_respect_bounds(self, param):
ls, var = param
_pp = (min(max(self.ln_limits[0], ls), self.ln_limits[1]), min(max(self.var_limits[0], var), self.var_limits[1]))
return _pp
class Data(object):
def __init__(self, X, Y):
self.X = X
self.Y = Y
self.dim = self.X.shape[1]
self.n = self.X.shape[0]
self.Y_r = self.Y.reshape(self.n, 1)
# There are different hypothesis representations
# ==============================================
# Base class to deal with the parameters and other misc stuff
class Hypothesis(object):
def __init__(self, graph_function, context, data):
self.ls_wrap = graph_function.ls_wrap
self.scipy_opt = graph_function.scipy_opt
self.dimensional_parameters = np.array(graph_function.dimensional_parameters)
self.likelihood = 0.
self.context = context
self.data = data
self._sk_kernel_class = graph_function._sk_kernel_class
self._sk_kwargs = graph_function._sk_kwargs
self.kernels = self._make_kernels(self.dimensional_parameters, graph_function.make_fn_decompositions())
def has_cycle(self):
return len(nx.cycle_basis(self.make_graph())) == 0
def optimize_dimensional_parameters(self, dim_n_iter, groupSize = -1, alpha = 1):
#return self.optimize_dimensional_old(dim_n_iter, groupSize, alpha)
return self.optimize_dimensional_grads(dim_n_iter, groupSize, alpha)
def optimize_dimensional_grads(self, dim_n_iter, groupSize = -1, alpha = 1):
logging.info(cached_apply_X.cache_info())
cached_apply_X.cache_clear()
#_max_val = np.min(dim_n_iter * self.data.dim * self.context.param_n_iter,1000)
_max_val = dim_n_iter * self.data.dim
target_n = np.log10(_max_val)
iters_decay = lambda t: 1e0 * np.exp((target_n-0)/1000.0*np.log(10)*t)
iters = int(iters_decay(self.data.n))
#print(self.dimensional_parameters)
dim_ls, dim_var = map(np.array, zip(*self.dimensional_parameters))
dim_param0 = np.concatenate( (dim_ls, dim_var) )
# One lengthscale per dimension
dim = len(self.dimensional_parameters)
fn_decomps = self.make_fn_decompositions_sorted()
# There are accuracy problems with lengthscale < 1e-2
# bounds = [(1e-2, 1e5)] * dim + [(1e-1, 1e5)] * dim
bounds = [(1e-2, 1e5)] * dim + [(0.31622776601683794, 1e5)] * dim
def dim_param_obj_min_f_df(dim_param):
f, df = self.phi_f_df_cholesky_sk(dim_param[:dim], dim_param[dim:], self.kernels)
return -f, -df
# Cannot use the other 2 params as they differ algo to algo
opt_x, _, _ = self.scipy_opt(dim_param_obj_min_f_df, approx_grad=False, x0=dim_param0, bounds=bounds, disp=0, maxfun=iters)
dim_ls = opt_x[:dim]
dim_var = opt_x[dim:]
# Check if the dim param changed
_dimensional_parameters = np.array(list(map(np.array, zip(dim_ls, dim_var))))
if np.allclose(_dimensional_parameters, self.dimensional_parameters):
return self.likelihood
self.dimensional_parameters = _dimensional_parameters
self._update_kernels(dim_ls, dim_var, self.kernels)
# Include Groupsize, need to recompute the likelihood
updated_likelihood = self._compute_dataLogLikelihood(groupSize, alpha)
#print(updated_likelihood, -opt_f)
# Sanity check that it improved.
if not np.isclose(updated_likelihood, self.likelihood):
assert(updated_likelihood > self.likelihood)
self.likelihood = updated_likelihood
return self.likelihood
#return self._compute_dataLogLikelihood(groupSize, alpha)
def apply_X(self, kernel):
return cached_apply_X(KernelWrap(kernel, self.data))
def _compute_dataLogLikelihood(self, groupSize, alpha):
#K = reduce(operator.add, [ kernel.K(self.data.X) for kernel in kernels ])
K = reduce(np.add, map(self.apply_X, self.kernels.values()))
#logp = self.phi_full(self.data.X, self.data.Y, K)
logp = self.phi_cholesky(self.data.X, self.data.Y, K)
#assert(np.allclose(logp, _logp))
# This is reserved for non overlap
if groupSize >= 0: # in the case of Gibbs Sampling
logp += np.log(groupSize + alpha)
return logp
def phi_f_df_cholesky_sk(self, dim_ls, dim_var, kernels):
# Noise inside
noise_var = 0
#likelihood = likelihoods.Gaussian(variance=noise_var)
X = self.data.X
y = self.data.Y
Ky = np.zeros((X.shape[0],X.shape[0]))
dK_dparams = []
for var_order, k in kernels.items():
_var_order = list(var_order)
ls = self.ls_wrap(dim_ls[_var_order])
_dim_var = dim_var[_var_order]
variance = np.sqrt(sum( _dim_var**2 ))
k._dim_var = _dim_var
k.set_params(k1__constant_value=variance, k2__length_scale=ls)
k.variance = variance
k.lengthscale = ls
K_part, dK_dparam = k(X[:,k.active_dims], eval_gradient=True)
Ky += K_part
dK_dparams.append(dK_dparam)
np.fill_diagonal(Ky, Ky.diagonal() + self.context.sigma2)
LW = np.linalg.cholesky(Ky)
c = np.linalg.inv(LW)
Wi = np.dot(c.T,c)
W_logdet = 2 * np.sum(np.log(LW.diagonal()))
alpha = scipy.linalg.cho_solve((LW, True), y)
n = X.shape[0]
log_2_pi = np.log(2*np.pi)*n/2.0
log_marginal = -0.5*(W_logdet + np.dot(y, alpha))
dL_dK = 0.5 * (np.einsum('i,k->ik', alpha, alpha, dtype=np.float64) - Wi)
grad_dim = np.zeros(self.data.dim*2)
grad_dim_ls = grad_dim[:self.data.dim]
grad_dim_var = grad_dim[self.data.dim:]
for k, dK_dparam in zip(kernels.values(), dK_dparams):
dL_dparam = np.einsum('ij,jik->k', dL_dK, dK_dparam)
dL_dsigma = dL_dparam[0] * k._dim_var / k.variance ** 2
grad_dim_var[k.active_dims] += dL_dsigma
dL_dls = dL_dparam[1:] / k.lengthscale
grad_dim_ls[k.active_dims] += dL_dls
# Fix Strange problems by numerical instability
if log_marginal > log_2_pi:
log_marginal = log_2_pi
# Cannot afford to increase the L anymore, so make the gradient really small and in the opposite dir
grad_dim = grad_dim * -1e-8
return log_marginal, grad_dim
# Also known as logp
def phi_cholesky(self, X, y, Ky):
# Ky = K + sigma^2 I
np.fill_diagonal(Ky, Ky.diagonal() + self.context.sigma2)
# We decompose the matrix for constant use
LW = np.linalg.cholesky(Ky)
# We skip the computation of the inverse and solve for alpha
# alpha = inv(Ky) * y
alpha = scipy.linalg.cho_solve((LW, True), y)
# Determinant using choleskey
# W_logdet = log|Ky|
# log(det(Ky)) = log(det(LW) * det(LW_t)) = 2 log(det(L)) = 2 log(Product(diag(L))) = 2 sum( log (dia(L)) )
W_logdet = 2 * np.sum(np.log(LW.diagonal()))
# term3 - ignore to save computation
n = X.shape[0]
log_2_pi = np.log(2*np.pi)*n/2.0
# finally compute term1 + term2 w/o term3
# We note that term3 is not useful as its the same throughout.
log_marginal = -0.5*(np.dot(y, alpha) + W_logdet)
#assert(log_marginal <= log_2_pi)
#log_marginal = -0.5*(np.linalg.dot(y, alpha) + W_logdet + log_2_pi)
# Fix Strange problems by numerical instability
if log_marginal > log_2_pi:
log_marginal = log_2_pi
return log_marginal
# Decide if optimizing lengthscale is needed
def evaluate(self, groupSize = -1, alpha = 1):
if self.context.fully_optimize_lengthscales:
self.likelihood = self.optimize_dimensional_parameters(self.context.lengthscaleNumIter, groupSize, alpha)
else:
self.likelihood = self._compute_dataLogLikelihood(groupSize, alpha)
return self.likelihood
def dimension(self):
return len(self.dimensional_parameters)
def make_fn_decompositions_sorted(self):
raise NotImplementedError
def make_graph(self):
raise NotImplementedError
def clone(self):
h = type(self).__new__(self.__class__)
h.likelihood = 0.
h.dimensional_parameters = self.dimensional_parameters.copy()
h.context = self.context
h.data = self.data
h.kernels = None
h._sk_kernel_class = self._sk_kernel_class
h._sk_kwargs = self._sk_kwargs
h.ls_wrap = self.ls_wrap
h.scipy_opt = self.scipy_opt
return h
def _make_kernels(self, dimensional_parameters, fn_decompositions, prev_kernels={}):
dim_ls, dim_var = map(np.array, zip(*dimensional_parameters))
nActiveVar = sum(map(len, fn_decompositions))
kernels = {}
for var_order in fn_decompositions:
if var_order in prev_kernels:
kernels[var_order] = prev_kernels[var_order]
continue
d = len(var_order)
# Prevent the values from going heywire
#ls = normalize(np.clip(dimensional_parameters[var_order], 1e-03, 1))
_var_order = list(var_order)
ls = self.ls_wrap(dim_ls[_var_order])
_dim_var = dim_var[_var_order]
variance = np.sqrt(sum( _dim_var**2 ))
#var = float(d) / nActiveVar
logging.debug("Fn={}, ls={}, variance={}".format(var_order, ls, variance))
kernel = variance * self._sk_kernel_class(ls, **self._sk_kwargs)
kernel._dim_var = _dim_var
kernel.active_dims = _var_order
kernel.variance = variance
kernel.lengthscale = ls
kernels[var_order] = kernel
return kernels
def _update_kernels(self, dim_ls, dim_var, kernels):
for var_order, kernel in kernels.items():
_var_order = list(var_order)
ls = self.ls_wrap(dim_ls[_var_order])
_dim_var = dim_var[_var_order]
variance = np.sqrt(sum( _dim_var**2 ))
kernel._dim_var = _dim_var
kernel.set_params(k1__constant_value=variance, k2__length_scale=ls)
kernel.variance = variance
kernel.lengthscale = ls
return kernels
# We operate everything based an the adjacency matrix, as underlying structure is graph
class HypothesisGraph(Hypothesis):
def __init__(self, graph_function, context, data):
self.Z = nx.to_numpy_matrix(graph_function.graph, dtype=bool)
super().__init__(graph_function, context, data)
# Maybe can be in base?
def make_fn_decompositions_sorted(self):
return [ tuple(sorted(ea_decomp)) for ea_decomp in nx.find_cliques(self.make_graph()) ]
def flip_edge(self, i, j):
Z = self.Z.copy()
Z[i,j] = not Z[i,j]
Z[j,i] = Z[i,j]
# Update for edge on likihood
# WHY?? TODO
# Update edge perturbation, TODO merge?
is_edge_set = self.Z[i,j] == 1
'''
if is_edge_set:
likelihood = np.log(self.context.p)
else:
likelihood = np.log(1-self.context.p)
'''
h = self.clone()
#h.likelihood = likelihood
h.Z = Z
h.kernels = h._make_kernels(h.dimensional_parameters, h.make_fn_decompositions_sorted(), self.kernels)
return h, is_edge_set
def mutate_edge(self, i_del, j_del, i_add, j_add):
Z = self.Z.copy()
Z[i_add,j_add] = True
Z[j_add,i_add] = True
Z[i_del,j_del] = False
Z[j_del,i_del] = False
# Update for edge on likihood
# WHY?? TODO
# Update edge perturbation, TODO merge?
#likelihood = np.log(self.context.p)
h = self.clone()
#h.likelihood = likelihood
h.Z = Z
h.kernels = h._make_kernels(h.dimensional_parameters, h.make_fn_decompositions_sorted(), self.kernels)
return h
def make_graph(self):
return nx.from_numpy_matrix(self.Z)
# We operate everything based on adj list
class HypothesisNonGraph(Hypothesis):
def __init__(self, graph_function, context, data):
self.z = self.getZFromGraph(graph_function.graph)
super().__init__(graph_function, context, data)
def getZFromGraph(self, G, M=0):
z = np.zeros(len(G.nodes()))
cliques = nx.find_cliques(G)
group = 0
for c in cliques:
for n in c:
z[n] = group
group += 1
'''
if M > 0:
assert(group <= M) # The number of groups must not exceed M
'''
return z
def make_fn_decompositions_sorted(self):
M = int(max(self.z)+1)
decomp = []
#values = set(np.array(z).flatten())
for m in range(M):
A = []
for j, z_j in enumerate(self.z):
#assert(z_j < M) # the constraint on the number of groups must be fullfilled
if z_j == m:
A.append(j)
if len(A) > 0:
decomp.append(tuple(sorted(A)))
# Fixed missing nodes
activated_set = reduce(set.union, map(set, map(list, decomp)))
leftover = set(range(self.data.dim)) - activated_set
decomp += [ tuple([i]) for i in leftover ]
return decomp
def make_graph(self):
decomp = self.make_fn_decompositions_sorted()
return self.getGraphFromDecomposition(decomp)
def getGroupSize(self, decomp, j):
for d in decomp:
if j in d:
return len(d)
return 0
def getGraphFromDecomposition(self, decomp):
graph = nx.Graph()
edges = []
single_nodes = []
for v in decomp:
if len(v) > 1:
all_pairs = list(itertools.combinations(v, 2))
for pair in all_pairs:
edges.append(pair)
elif len(v) == 1:
single_nodes.append(v[0])
graph.add_edges_from(edges)
graph.add_nodes_from(single_nodes)
return graph
def update_group(self, j, m, omega=0.):
z = self.z.copy()
z[j] = m
h = self.clone()
h.likelihood = omega
h.z = z
h.kernels = h._make_kernels(h.dimensional_parameters, h.make_fn_decompositions_sorted(), self.kernels)
return h
def maxGroupSize(self, decomp):
m = 0
for d in decomp:
s = len(d)
if s > m:
m = s
return m
class GraphNonOverlap(GraphOptimizer):
def __init__(self, cycles, max_group_size, **kwargs):
# GraphNonOverlap
# No cycles constraint, change group size to 2
if not cycles:
# Constraints to group size 2
logging.info("Overriding max_group_size to 2 as no cycles are reported for GraphNonOverlap.")
max_group_size = 2
GraphOptimizer.__init__(self, cycles=cycles, max_group_size=max_group_size, **kwargs)
def _optimize_hypotheses(self, h_0):
dim = h_0.dimension()
fully_optimize_lengthscales = self.context.fully_optimize_lengthscales
best_h = h_0
h_prev = h_0
count_i=0
# num_iter = int(self.graphSamplingNumIter / (dim*self.context.M)) + 1
while True:
h = h_prev
dimensional_parameter_new = None
for j in np.random.permutation(dim): # Sample z_j from p(z_j = m | z_-j, D) \proto exp(phi_m) using Gumbel's trick
omega = np.random.gumbel(0.0, 1.0, dim)
decomp = h.make_fn_decompositions_sorted()
size = h.getGroupSize(decomp, j)
# Partial evaluation of m
# TODO Replace this with a loop_best
z_j_new = -1
best_val = -np.inf
for m in range(self.context.M): # select z_j = argmax_{i <= M} phi_i + omega_i
h = h.update_group(j, m, omega[m])
decomp = h.make_fn_decompositions_sorted()
# Nth to consider if its maxed out alr.
if h.maxGroupSize(decomp) > self.context.max_group_size:
continue
h.evaluate(groupSize=size)
count_i += 1
# maxGroupSize
if h.likelihood > best_val:
best_val = h.likelihood
z_j_new = m
if fully_optimize_lengthscales:
dimensional_parameter_new = np.copy(h.dimensional_parameter)
if count_i >= self.graphSamplingNumIter:
break
h = h.update_group(j, z_j_new)
if fully_optimize_lengthscales:
h.dimensional_parameters = dimensional_parameter_new
if count_i >= self.graphSamplingNumIter:
break
# without group bias
h.evaluate()
if h.likelihood > best_h.likelihood:
best_h = h
if fully_optimize_lengthscales:
best_dimensional_parameter = dimensional_parameter_new.copy()
h_prev = h
if count_i >= self.graphSamplingNumIter:
break
if not fully_optimize_lengthscales:
best_h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
else:
best_h.dimensional_parameters = best_dimensional_parameter
return best_h
def HypothesisType(self):
return HypothesisNonGraph
class GraphOverlap(GraphOptimizer):
def _optimize_hypotheses(self, h_0):
logging.info("Running GraphOverlap")
dim = h_0.dimension()
h_prev = h_0
h = h_0
best_h = h_0
hypotheses_set = set([ h_0 ])
sampled_hypotheses_set = set([ h_0 ])
all_edges = [ (i, j) for i in np.random.permutation(dim) for j in np.random.permutation(i) ]
while len(hypotheses_set) < self.graphSamplingNumIter:
np.random.shuffle(all_edges)
for i, j in all_edges:
h_prev = h
h, is_edge_set = h.flip_edge(i, j)
# Check if the graph satisfy the prior cycle condition
if not (not self.context.cycles and is_edge_set) or h.has_cycle():
# Check if its a new hypothesis
if not h in hypotheses_set:
h.evaluate()
hypotheses_set.add(h)
# Check if the current hypothesis is better then the prev graph
if h.likelihood < h_prev.likelihood :
# Worse than previous graph -> Switch the edge back again
h = h_prev
else:
# Illegal operation, reset back
h = h_prev
if len(hypotheses_set) >= self.graphSamplingNumIter:
break
if h.likelihood > best_h.likelihood:
best_h = h
# Choose as next sample the graph with highest likelihood which has not
# yet been selected as sample.
# Continue to use h if we know it is still the best
if not h in sampled_hypotheses_set:
# h has been updated, not been sampled from before
# We choose the next best
unsampled_hypotheses_set = hypotheses_set - sampled_hypotheses_set
# this sample has already been selected, find a new one that is probable
# maximum probability
h = max(unsampled_hypotheses_set, key=lambda h:h.likelihood)
sampled_hypotheses_set.add(h_prev)
# Did not perform full optimization, so we optimze at the end
if not self.context.fully_optimize_lengthscales:
best_h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
#assert(len(hypotheses_set) == self.graphSamplingNumIter)
return best_h
def HypothesisType(self):
return HypothesisGraph
class Tree(GraphOptimizer):
def HypothesisType(self):
return HypothesisGraph
def _optimize_hypotheses(self, h_0):
logging.info("Running Tree")
dim = h_0.dimension()
h_prev = h_0
h = h_0
best_h = h_0
hypotheses_set = set([ h_0 ])
sampled_hypotheses_set = set([ h_0 ])
all_edges = [ (i, j) for i in np.random.permutation(dim) for j in np.random.permutation(i) ]
while len(hypotheses_set) < self.graphSamplingNumIter:
if len(h.make_graph().edges()) < dim - 1:
edges = h.make_graph().edges()
disjoint_set = DisjointSet()
for i, j in edges:
disjoint_set.union(i,j)
np.random.shuffle(all_edges)
#from tqdm import tqdm
#for i, j in tqdm(all_edges):
for i, j in all_edges:
# Checks for the same parent, which will check for cycle.
# TODO DEBUG, to make sure cycles does not exist
#print(i, j, parent)
if not disjoint_set.connected(i, j):
h_prev = h
h, _ = h.flip_edge(i, j)
# Check if its a new hypothesis
if not h in hypotheses_set:
h.evaluate()
hypotheses_set.add(h)
# Check if the current hypothesis is better then the prev graph
if h.likelihood < h_prev.likelihood:
# Worse than previous graph -> Switch the edge back again
h = h_prev
else:
disjoint_set.union(i, j)
if len(hypotheses_set) >= self.graphSamplingNumIter:
break
if len(h.make_graph().edges()) >= dim - 1:
break
#assert(len(edges) <= len(h.make_graph().edges()))
else:
graph_orig = h.make_graph()
edges = list(graph_orig.edges())
h_prev = h
h_orig = h
# we generate candidates from the original hypothesis
for i in range(self.context.gen_candidates):
graph_copy = graph_orig.copy()
# The edge to remove
i_del, j_del = edges[np.random.choice(len(edges))]
graph_copy.remove_edge(i_del, j_del)
comp_1, comp_2 = list(nx.descendants(graph_copy, i_del)) + [i_del], list(nx.descendants(graph_copy, j_del)) + [j_del]
candidate_edges = set(product(comp_1, comp_2))
# We do not want to add it back again, its lame...
# Unless its the case where there is nothing else
if len(candidate_edges) > 1:
candidate_edges.remove((i_del, j_del))
candidate_edges = list(candidate_edges)
i_add, j_add = candidate_edges[np.random.choice(len(candidate_edges))]
#for i_add, j_add in candidate_edges:
h_prev = h
h = h_orig.mutate_edge(i_del, j_del, i_add, j_add)
# Check if its a new hypothesis
if not h in hypotheses_set:
h.evaluate()
hypotheses_set.add(h)
# Check if the current hypothesis is better then the prev graph
if h.likelihood < h_prev.likelihood:
# Worse than previous graph -> Switch the edge back again
# H is only replaced if h is actually better
h = h_prev
if len(hypotheses_set) >= self.graphSamplingNumIter:
break
#assert(len(edges) == len(h.make_graph().edges()))
if h.likelihood > best_h.likelihood:
best_h = h
# Choose as next sample the graph with highest likelihood which has not
# yet been selected as sample.
# Continue to use h if we know it is still the best
if not h in sampled_hypotheses_set:
# h has been updated, not been sampled from before
# We choose the next best
unsampled_hypotheses_set = hypotheses_set - sampled_hypotheses_set
# this sample has already been selected, find a new one that is probable
# maximum probability
h = max(unsampled_hypotheses_set, key=lambda h:h.likelihood)
sampled_hypotheses_set.add(h_prev)
# Did not perform full optimization, so we optimze at the end
if not self.context.fully_optimize_lengthscales:
best_h.optimize_dimensional_parameters(self.context.lengthscaleNumIter)
#print(best_h.dimensional_parameters)
#assert(len(hypotheses_set) == self.graphSamplingNumIter)
return best_h
#from profilehooks import profile
import operator
from functools import reduce
class GraphFunction(object):
def __init__(self, graph, initial_kernel_params):
self.graph = graph
self.dimensional_parameters = [ (initial_kernel_params['lengthscale'], initial_kernel_params['variance']) for i in range(self.dimension()) ]
# dangerous but ok...
# TODO
# Set kernels, if not set then use defaults
if 'gpy_kernel' in initial_kernel_params:
self._gpy_kernel_class = self.locate(initial_kernel_params['gpy_kernel'])
else:
self._gpy_kernel_class = GPy.kern.RBF
self._sk_kwargs = {}
if 'sk_kernel' in initial_kernel_params:
self._sk_kernel_class = self.locate(initial_kernel_params['sk_kernel'])
if 'sk_kwargs' in initial_kernel_params:
self._sk_kwargs = initial_kernel_params['sk_kwargs']
else:
self._sk_kernel_class = sklearn.gaussian_process.kernels.RBF
# For future use, to expand the building of reusable kernels and creating only differences.
# TODO
# Decide for ARD
# TODO hack
self.is_ard = True
if 'ard' in initial_kernel_params:
self.is_ard = initial_kernel_params['ard']
if self.is_ard:
self.ls_wrap = lambda ord_dim_ls: ord_dim_ls
else:
self.ls_wrap = lambda ord_dim_ls: sum(ord_dim_ls)
# l_bfgs is now the default.
self.scipy_opt = scipy.optimize.fmin_tnc
if 'scipy_opt' in initial_kernel_params:
self.scipy_opt = self.locate(initial_kernel_params['scipy_opt'])
fn_decompositions = self.make_fn_decompositions()
self.kernels = self._make_kernels(self.dimensional_parameters, fn_decompositions)
def dimension(self):
return self.graph.number_of_nodes()
def make_decomposition(self, model):
fn_decompositions = self.make_fn_decompositions()
cfns = self.make_cfns(self.kernels, model)
return (fn_decompositions, GPy.kern.Add(self.kernels.values()), cfns)
def make_fn_decompositions(self):
return [ tuple(sorted(ea_decomp)) for ea_decomp in nx.find_cliques(self.graph) ]
def locate(self, path):
# Dynamically load the class
(modulename, classname) = path.rsplit('.', 1)
m = __import__(modulename, globals(), locals(), [classname])
if not hasattr(m, classname):
raise ImportError(f'Could not locate "{path}".')
return getattr(m, classname)
def _make_kernels(self, dimensional_parameters, fn_decompositions, prev_kernels={}):
dim_ls, dim_var = map(np.array, zip(*dimensional_parameters))
nActiveVar = sum(map(len, fn_decompositions))
kernels = {}
for var_order in fn_decompositions:
if var_order in prev_kernels:
kernels[var_order] = prev_kernels[var_order]
continue
d = len(var_order)
# Prevent the values from going heywire
#ls = normalize(np.clip(dimensional_parameters[var_order], 1e-03, 1))
_var_order = list(var_order)
ls = self.ls_wrap(dim_ls[_var_order])
_dim_var = dim_var[_var_order]
variance = np.sqrt(sum( _dim_var**2 ))
#var = float(d) / nActiveVar
logging.debug("Fn={}, ls={}, variance={}".format(var_order, ls, variance))
kernel = self._gpy_kernel_class(input_dim=d, lengthscale=ls, variance=variance, active_dims=var_order, ARD=self.is_ard, name="_"+"_".join(map(str,var_order)) )
kernel._dim_var = _dim_var
kernel.fix()
kernels[var_order] = kernel
return kernels
def _update_kernels(self, dim_ls, dim_var, kernels):
for var_order, kernel in kernels.items():
_var_order = list(var_order)
new_ls = self.ls_wrap(dim_ls[_var_order])
_dim_var = dim_var[_var_order]
kernel.lengthscale = new_ls
kernel.variance = np.sqrt(sum( _dim_var**2 ))
kernel._dim_var = _dim_var
return kernels
def make_cfns(self, kernels, model):
fn_decomp_lookup = {}
for var, kernel in kernels.items():
acqu = MyAcquisitionLCB(model, kernel, var)
fn_decomp_lookup[var] = acqu
return ComponentFunction(fn_decomp_lookup)
class OptimalGraphFunction(GraphFunction):
def make_cfns(self, kernels, model):
fn_decomp_lookup = {}
for var, kernel in kernels.items():
acqu = MyAcquisitionLCB(model, kernel, var)
fn_decomp_lookup[var] = acqu
return SyntheticComponentFunction(self.graph, fn_decomp_lookup)
def make_fn_decompositions(self):
return [ tuple(sorted([v])) for v in nx.isolates(self.graph)] + [ tuple(sorted(e)) for e in self.graph.edges() ]
def _make_kernels(self, dimensional_parameters, fn_decompositions, prev_kernels={}):
ls, var = dimensional_parameters[0]
kernels = {}
for var_order in fn_decompositions:
logging.info("Fn={}, ls={}, variance={}".format(var_order, ls, var))
kernel = GPy.kern.RBF(input_dim=len(var_order), lengthscale=ls, variance=var, active_dims=var_order, name="_"+"_".join(map(str,var_order)))
kernel.fix()
kernels[var_order] = kernel
return kernels
| [
"datasets.ComponentFunction",
"matplotlib.pyplot.clf",
"numpy.allclose",
"numpy.einsum",
"numpy.isclose",
"numpy.random.normal",
"numpy.copy",
"scipy.linalg.cho_solve",
"disjoint_set.DisjointSet",
"networkx.to_numpy_matrix",
"itertools.product",
"numpy.log10",
"myAcquisitionLCB.MyAcquisition... | [((1362, 1385), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1024)'}), '(maxsize=1024)\n', (1371, 1385), False, 'from functools import lru_cache\n'), ((4890, 4899), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4897, 4899), True, 'import matplotlib.pyplot as plt\n'), ((9403, 9450), 'numpy.array', 'np.array', (['graph_function.dimensional_parameters'], {}), '(graph_function.dimensional_parameters)\n', (9411, 9450), True, 'import numpy as np\n'), ((10437, 10455), 'numpy.log10', 'np.log10', (['_max_val'], {}), '(_max_val)\n', (10445, 10455), True, 'import numpy as np\n'), ((10722, 10755), 'numpy.concatenate', 'np.concatenate', (['(dim_ls, dim_var)'], {}), '((dim_ls, dim_var))\n', (10736, 10755), True, 'import numpy as np\n'), ((11689, 11754), 'numpy.allclose', 'np.allclose', (['_dimensional_parameters', 'self.dimensional_parameters'], {}), '(_dimensional_parameters, self.dimensional_parameters)\n', (11700, 11754), True, 'import numpy as np\n'), ((13311, 13345), 'numpy.zeros', 'np.zeros', (['(X.shape[0], X.shape[0])'], {}), '((X.shape[0], X.shape[0]))\n', (13319, 13345), True, 'import numpy as np\n'), ((14010, 14032), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['Ky'], {}), '(Ky)\n', (14028, 14032), True, 'import numpy as np\n'), ((14045, 14062), 'numpy.linalg.inv', 'np.linalg.inv', (['LW'], {}), '(LW)\n', (14058, 14062), True, 'import numpy as np\n'), ((14076, 14090), 'numpy.dot', 'np.dot', (['c.T', 'c'], {}), '(c.T, c)\n', (14082, 14090), True, 'import numpy as np\n'), ((14169, 14206), 'scipy.linalg.cho_solve', 'scipy.linalg.cho_solve', (['(LW, True)', 'y'], {}), '((LW, True), y)\n', (14191, 14206), False, 'import scipy\n'), ((14441, 14468), 'numpy.zeros', 'np.zeros', (['(self.data.dim * 2)'], {}), '(self.data.dim * 2)\n', (14449, 14468), True, 'import numpy as np\n'), ((15469, 15491), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['Ky'], {}), '(Ky)\n', (15487, 15491), True, 'import numpy as np\n'), ((15608, 15645), 'scipy.linalg.cho_solve', 'scipy.linalg.cho_solve', (['(LW, True)', 'y'], {}), '((LW, True), y)\n', (15630, 15645), False, 'import scipy\n'), ((19424, 19476), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['graph_function.graph'], {'dtype': 'bool'}), '(graph_function.graph, dtype=bool)\n', (19442, 19476), True, 'import networkx as nx\n'), ((20938, 20966), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['self.Z'], {}), '(self.Z)\n', (20958, 20966), True, 'import networkx as nx\n'), ((21309, 21327), 'networkx.find_cliques', 'nx.find_cliques', (['G'], {}), '(G)\n', (21324, 21327), True, 'import networkx as nx\n'), ((22635, 22645), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (22643, 22645), True, 'import networkx as nx\n'), ((26740, 26776), 'logging.info', 'logging.info', (['"""Running GraphOverlap"""'], {}), "('Running GraphOverlap')\n", (26752, 26776), False, 'import logging\n'), ((29336, 29364), 'logging.info', 'logging.info', (['"""Running Tree"""'], {}), "('Running Tree')\n", (29348, 29364), False, 'import logging\n'), ((38859, 38894), 'datasets.ComponentFunction', 'ComponentFunction', (['fn_decomp_lookup'], {}), '(fn_decomp_lookup)\n', (38876, 38894), False, 'from datasets import ComponentFunction, SyntheticComponentFunction\n'), ((39166, 39222), 'datasets.SyntheticComponentFunction', 'SyntheticComponentFunction', (['self.graph', 'fn_decomp_lookup'], {}), '(self.graph, fn_decomp_lookup)\n', (39192, 39222), False, 'from datasets import ComponentFunction, SyntheticComponentFunction\n'), ((3196, 3243), 'numpy.array', 'np.array', (['graph_function.dimensional_parameters'], {}), '(graph_function.dimensional_parameters)\n', (3204, 3243), True, 'import numpy as np\n'), ((5811, 5858), 'numpy.array', 'np.array', (['graph_function.dimensional_parameters'], {}), '(graph_function.dimensional_parameters)\n', (5819, 5858), True, 'import numpy as np\n'), ((12172, 12219), 'numpy.isclose', 'np.isclose', (['updated_likelihood', 'self.likelihood'], {}), '(updated_likelihood, self.likelihood)\n', (12182, 12219), True, 'import numpy as np\n'), ((13029, 13054), 'numpy.log', 'np.log', (['(groupSize + alpha)'], {}), '(groupSize + alpha)\n', (13035, 13054), True, 'import numpy as np\n'), ((14651, 14691), 'numpy.einsum', 'np.einsum', (['"""ij,jik->k"""', 'dL_dK', 'dK_dparam'], {}), "('ij,jik->k', dL_dK, dK_dparam)\n", (14660, 14691), True, 'import numpy as np\n'), ((23770, 23873), 'logging.info', 'logging.info', (['"""Overriding max_group_size to 2 as no cycles are reported for GraphNonOverlap."""'], {}), "(\n 'Overriding max_group_size to 2 as no cycles are reported for GraphNonOverlap.'\n )\n", (23782, 23873), False, 'import logging\n'), ((24390, 24416), 'numpy.random.permutation', 'np.random.permutation', (['dim'], {}), '(dim)\n', (24411, 24416), True, 'import numpy as np\n'), ((27128, 27156), 'numpy.random.shuffle', 'np.random.shuffle', (['all_edges'], {}), '(all_edges)\n', (27145, 27156), True, 'import numpy as np\n'), ((38766, 38802), 'myAcquisitionLCB.MyAcquisitionLCB', 'MyAcquisitionLCB', (['model', 'kernel', 'var'], {}), '(model, kernel, var)\n', (38782, 38802), False, 'from myAcquisitionLCB import MyAcquisitionLCB\n'), ((39073, 39109), 'myAcquisitionLCB.MyAcquisitionLCB', 'MyAcquisitionLCB', (['model', 'kernel', 'var'], {}), '(model, kernel, var)\n', (39089, 39109), False, 'from myAcquisitionLCB import MyAcquisitionLCB\n'), ((3432, 3534), 'numpy.random.normal', 'np.random.normal', (['dimensional_parameters', '(dimensional_parameters * self.context.param_exploration)'], {}), '(dimensional_parameters, dimensional_parameters * self.\n context.param_exploration)\n', (3448, 3534), True, 'import numpy as np\n'), ((4740, 4759), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (4752, 4759), True, 'import matplotlib.pyplot as plt\n'), ((6048, 6150), 'numpy.random.normal', 'np.random.normal', (['dimensional_parameters', '(dimensional_parameters * self.context.param_exploration)'], {}), '(dimensional_parameters, dimensional_parameters * self.\n context.param_exploration)\n', (6064, 6150), True, 'import numpy as np\n'), ((14249, 14266), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (14255, 14266), True, 'import numpy as np\n'), ((14312, 14328), 'numpy.dot', 'np.dot', (['y', 'alpha'], {}), '(y, alpha)\n', (14318, 14328), True, 'import numpy as np\n'), ((14354, 14406), 'numpy.einsum', 'np.einsum', (['"""i,k->ik"""', 'alpha', 'alpha'], {'dtype': 'np.float64'}), "('i,k->ik', alpha, alpha, dtype=np.float64)\n", (14363, 14406), True, 'import numpy as np\n'), ((15979, 15996), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (15985, 15996), True, 'import numpy as np\n'), ((16152, 16168), 'numpy.dot', 'np.dot', (['y', 'alpha'], {}), '(y, alpha)\n', (16158, 16168), True, 'import numpy as np\n'), ((24520, 24551), 'numpy.random.gumbel', 'np.random.gumbel', (['(0.0)', '(1.0)', 'dim'], {}), '(0.0, 1.0, dim)\n', (24536, 24551), True, 'import numpy as np\n'), ((26989, 27015), 'numpy.random.permutation', 'np.random.permutation', (['dim'], {}), '(dim)\n', (27010, 27015), True, 'import numpy as np\n'), ((27025, 27049), 'numpy.random.permutation', 'np.random.permutation', (['i'], {}), '(i)\n', (27046, 27049), True, 'import numpy as np\n'), ((29585, 29611), 'numpy.random.permutation', 'np.random.permutation', (['dim'], {}), '(dim)\n', (29606, 29611), True, 'import numpy as np\n'), ((29621, 29645), 'numpy.random.permutation', 'np.random.permutation', (['i'], {}), '(i)\n', (29642, 29645), True, 'import numpy as np\n'), ((29862, 29875), 'disjoint_set.DisjointSet', 'DisjointSet', ([], {}), '()\n', (29873, 29875), False, 'from disjoint_set import DisjointSet\n'), ((29972, 30000), 'numpy.random.shuffle', 'np.random.shuffle', (['all_edges'], {}), '(all_edges)\n', (29989, 30000), True, 'import numpy as np\n'), ((36657, 36684), 'networkx.find_cliques', 'nx.find_cliques', (['self.graph'], {}), '(self.graph)\n', (36672, 36684), True, 'import networkx as nx\n'), ((4799, 4807), 'common.Config', 'Config', ([], {}), '()\n', (4805, 4807), False, 'from common import Config\n'), ((22776, 22804), 'itertools.combinations', 'itertools.combinations', (['v', '(2)'], {}), '(v, 2)\n', (22798, 22804), False, 'import itertools\n'), ((39307, 39330), 'networkx.isolates', 'nx.isolates', (['self.graph'], {}), '(self.graph)\n', (39318, 39330), True, 'import networkx as nx\n'), ((32015, 32038), 'itertools.product', 'product', (['comp_1', 'comp_2'], {}), '(comp_1, comp_2)\n', (32022, 32038), False, 'from itertools import product\n'), ((10521, 10531), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (10527, 10531), True, 'import numpy as np\n'), ((25568, 25600), 'numpy.copy', 'np.copy', (['h.dimensional_parameter'], {}), '(h.dimensional_parameter)\n', (25575, 25600), True, 'import numpy as np\n'), ((31856, 31889), 'networkx.descendants', 'nx.descendants', (['graph_copy', 'i_del'], {}), '(graph_copy, i_del)\n', (31870, 31889), True, 'import networkx as nx\n'), ((31907, 31940), 'networkx.descendants', 'nx.descendants', (['graph_copy', 'j_del'], {}), '(graph_copy, j_del)\n', (31921, 31940), True, 'import networkx as nx\n')] |
import os
import numpy as np
import tflowtools as TFT
import tensorflow as tf
import fileinput
import random
import mnist_basics as mnist
random.seed(123)
np.random.seed(123)
tf.set_random_seed(123)
def replaceSeparator(file):
with open(file, "r") as file:
with open("data_sets/gamma.txt.bak", "w") as out:
for line in file:
if(random.uniform(0, 1) > 0.5):
out.write(line)
def max_label(file):
#replaceSeparator(file)
f = open(file)
separator = ','
lines = []
max_length = 0
for line in f.readlines():
label = int(line.split(separator)[-1].strip())
if(max_length < label):
max_length = label
f.close()
return max_length
def normalized(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2==0] = 1
return a / np.expand_dims(l2, axis)
def load_data(file, cfrac):
one_hot_length = max_label(file);
f = open(file)
separator = ','
lines = []
features = []
labels = []
for line in f.readlines():
if(random.random() <= cfrac):
feature = line.split(separator)[:-1]
feature = [float(i) for i in feature]
label = TFT.int_to_one_hot(int(line.split(separator)[-1].strip())-1, one_hot_length)
label = [float(i) for i in label]
features.append(feature)
labels.append(label)
means = np.mean(features, axis=0)
std = np.std(features, axis=0)
for n in range(len(features)):
for k in range(len(features[n])):
features[n][k] = (features[n][k] - means[k]) / std[k]
lines.append([features[n], labels[n]])
return lines
def get_valid_mnist():
data = mnist.load_all_flat_cases()
valid_data = []
for i in range(len(data[1])):
data[1][i] = TFT.int_to_one_hot(int(data[1][i]), 10)
valid_data.append([data[0][i],data[1][i]])
return valid_data
def quickrun(operators, grabbed_vars=None, dir='probeview', session=None, feed_dict=None, step=1, show_interval=1):
sess = session if session else TFT.gen_initialized_session(dir=dir)
results = sess.run([operators, grabbed_vars], feed_dict=feed_dict)
if show_interval and (step % show_interval) == 0:
TFT.show_results(results[1], grabbed_vars, dir)
return results[0], results[1], sess
def gradient_descent(filename, size_features=9, steps=1000, tvect=None, learning_rate=0.5, showint=10):
features, labels = load_data(filename)
#features = np.array([features[0]])
print("X SHAPE", features.shape)
target = tvect if tvect else np.array(labels[0]) # We have 7 different possible outputs
w = tf.Variable(np.random.uniform(-0.1, 0.1, size=(size_features, size_features)), name='weights') # We will train these #None = 214
print("W SHAPE:", w.shape)
b = tf.Variable(np.zeros((1, size_features)), name='bias')
x = tf.placeholder(tf.float64, shape=(214, size_features), name='input')
print("X SHAPE:", x.shape)
y = tf.tanh(tf.matmul(x, w) + b, name='out-softplus')
error = tf.reduce_mean(tf.square(target - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_operator = optimizer.minimize(error)
feeder = {x: features} #this is our data
sess = TFT.gen_initialized_session()
for step in range(steps):
quickrun([training_operator], [w, b, y, error], session=sess, feed_dict=feeder, step=step, show_interval=showint)
TFT.close_session(sess)
#print(gradient_descent('data_sets/glass.txt')) | [
"numpy.random.seed",
"tensorflow.matmul",
"numpy.mean",
"numpy.linalg.norm",
"mnist_basics.load_all_flat_cases",
"tflowtools.gen_initialized_session",
"numpy.std",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"random.seed",
"tflowtools.show_results",
"random.random",
"tensorflow.t... | [((139, 155), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (150, 155), False, 'import random\n'), ((156, 175), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (170, 175), True, 'import numpy as np\n'), ((176, 199), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(123)'], {}), '(123)\n', (194, 199), True, 'import tensorflow as tf\n'), ((1448, 1473), 'numpy.mean', 'np.mean', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (1455, 1473), True, 'import numpy as np\n'), ((1484, 1508), 'numpy.std', 'np.std', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (1490, 1508), True, 'import numpy as np\n'), ((1752, 1779), 'mnist_basics.load_all_flat_cases', 'mnist.load_all_flat_cases', ([], {}), '()\n', (1777, 1779), True, 'import mnist_basics as mnist\n'), ((2942, 3010), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '(214, size_features)', 'name': '"""input"""'}), "(tf.float64, shape=(214, size_features), name='input')\n", (2956, 3010), True, 'import tensorflow as tf\n'), ((3168, 3216), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3201, 3216), True, 'import tensorflow as tf\n'), ((3325, 3354), 'tflowtools.gen_initialized_session', 'TFT.gen_initialized_session', ([], {}), '()\n', (3352, 3354), True, 'import tflowtools as TFT\n'), ((3512, 3535), 'tflowtools.close_session', 'TFT.close_session', (['sess'], {}), '(sess)\n', (3529, 3535), True, 'import tflowtools as TFT\n'), ((808, 838), 'numpy.linalg.norm', 'np.linalg.norm', (['a', 'order', 'axis'], {}), '(a, order, axis)\n', (822, 838), True, 'import numpy as np\n'), ((873, 897), 'numpy.expand_dims', 'np.expand_dims', (['l2', 'axis'], {}), '(l2, axis)\n', (887, 897), True, 'import numpy as np\n'), ((2124, 2160), 'tflowtools.gen_initialized_session', 'TFT.gen_initialized_session', ([], {'dir': 'dir'}), '(dir=dir)\n', (2151, 2160), True, 'import tflowtools as TFT\n'), ((2295, 2342), 'tflowtools.show_results', 'TFT.show_results', (['results[1]', 'grabbed_vars', 'dir'], {}), '(results[1], grabbed_vars, dir)\n', (2311, 2342), True, 'import tflowtools as TFT\n'), ((2642, 2661), 'numpy.array', 'np.array', (['labels[0]'], {}), '(labels[0])\n', (2650, 2661), True, 'import numpy as np\n'), ((2722, 2787), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': '(size_features, size_features)'}), '(-0.1, 0.1, size=(size_features, size_features))\n', (2739, 2787), True, 'import numpy as np\n'), ((2890, 2918), 'numpy.zeros', 'np.zeros', (['(1, size_features)'], {}), '((1, size_features))\n', (2898, 2918), True, 'import numpy as np\n'), ((3128, 3149), 'tensorflow.square', 'tf.square', (['(target - y)'], {}), '(target - y)\n', (3137, 3149), True, 'import tensorflow as tf\n'), ((1096, 1111), 'random.random', 'random.random', ([], {}), '()\n', (1109, 1111), False, 'import random\n'), ((3058, 3073), 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), '(x, w)\n', (3067, 3073), True, 'import tensorflow as tf\n'), ((371, 391), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (385, 391), False, 'import random\n')] |
from typing import (
Optional
)
import numpy as np
def rolling_window(
array: np.ndarray,
size: int,
shift: Optional[int] = None,
stride: int = 1
) -> np.ndarray:
"""Gets an `size`-period rolling window for `array` as an 1d array
|-------- size:3 --------|
|- stride:1 -| |
| | |
1 2 3 --------|---
shift:2
3 4 5 --------|---
5 6 7
"""
shift = shift or size
window_step = shift * stride
step_length = len(array) - size
steps = step_length // window_step
rest = step_length - window_step * steps
steps += 1
if rest:
# drop the last window
# if its size is smaller than `size`.
array = array[:-rest]
item_stride = array.strides[0]
ret = np.lib.stride_tricks.as_strided(
array,
# rolling_window "destroy" the first dimension of input `array`,
shape=(steps, size) + array.shape[1:],
strides=(item_stride * shift, item_stride * stride) + array.strides[1:]
)
return ret
def max_lines_error(max_lines):
return ValueError(
f'max_lines must be positive, but got `{max_lines}`'
)
| [
"numpy.lib.stride_tricks.as_strided"
] | [((889, 1044), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['array'], {'shape': '((steps, size) + array.shape[1:])', 'strides': '((item_stride * shift, item_stride * stride) + array.strides[1:])'}), '(array, shape=(steps, size) + array.shape[1:\n ], strides=(item_stride * shift, item_stride * stride) + array.strides[1:])\n', (920, 1044), True, 'import numpy as np\n')] |
import itertools
import pathlib
import tempfile
import slab
import numpy
tmpdir = pathlib.Path(tempfile.gettempdir())
def test_low_high_pass():
for i in range(10):
sound = slab.Sound.whitenoise(duration=2.0)
for kind, fir in itertools.product(["lp", "hp"], [False, True]):
edge_freq = numpy.random.uniform(100, 2000)
length = numpy.random.randint(500, 5000)
filt = slab.Filter.band(frequency=edge_freq, length=length, kind=kind, fir=fir)
filt_sound = filt.apply(sound)
Z, freqs = filt_sound.spectrum(show=False)
idx = numpy.abs(freqs-edge_freq).argmin()
if kind == "hp":
suppressed = Z[0:idx]
else:
suppressed = Z[idx:]
assert suppressed.max() < -25
def test_band_pass_stop():
sound = slab.Sound.whitenoise(duration=2.0)
for kind, fir in itertools.product(["bp", "bs"], [False, True]):
lower_edge_freq = numpy.random.uniform(100, 1000)
higher_edge_freq = lower_edge_freq + numpy.random.uniform(100, 1000)
length = numpy.random.randint(500, 5000)
filt = slab.Filter.band(frequency=(lower_edge_freq, higher_edge_freq), length=length, kind=kind, fir=fir)
filt_sound = filt.apply(sound)
Z, freqs = filt_sound.spectrum(show=False)
low_idx = numpy.abs(freqs - lower_edge_freq).argmin()
high_idx = numpy.abs(freqs - higher_edge_freq).argmin()
if kind == "bp":
suppressed = numpy.concatenate([Z[0:low_idx], Z[high_idx:]])
else:
suppressed = Z[low_idx:high_idx]
assert suppressed.max() < -30
def test_custom_band():
sound = slab.Sound.whitenoise(duration=2.0, samplerate=44100)
freqs = numpy.array([100., 800., 2000., 4300., 8000., 14500., 18000.])
gains = [
[0., 1., 0., 1., 0., 1., 0.],
[1., 0., 1, 0., 1., 0., 1.],
[0., 1., 0., 0., 1., 1., 0.],
[1., 0., 1., 0., 0., 1., 0.]
]
for i in range(10):
for fir, gain in itertools.product([True, False], gains):
freqs += numpy.random.uniform(1, 10, 7)
freqs.sort()
length = numpy.random.randint(500, 5000)
filt = slab.Filter.band(frequency=list(freqs), gain=gain, length=length, fir=fir, samplerate=sound.samplerate)
w, h = filt.tf(show=False)
suppressed_freqs = freqs[numpy.where(numpy.array(gain) == 0.0)]
idx = [numpy.abs(w-freq).argmin() for freq in suppressed_freqs]
assert max(h[idx]) < -20
def test_cos_filterbank():
for i in range(10):
sound = slab.Sound.whitenoise(duration=1.0, samplerate=44100)
length = numpy.random.randint(1000, 5000)
low_cutoff = numpy.random.randint(0, 500)
high_cutoff = numpy.random.choice([numpy.random.randint(5000, 15000), None])
pass_bands = False
n_filters = []
for bandwidth in numpy.linspace(0.1, 0.9, 9):
fbank = slab.Filter.cos_filterbank(length, bandwidth, low_cutoff, high_cutoff, pass_bands, sound.samplerate)
n_filters.append(fbank.n_filters)
filtsound = fbank.apply(sound)
assert filtsound.n_channels == fbank.n_filters
assert filtsound.n_samples == sound.n_samples
assert all([n_filters[i] >= n_filters[i+1] for i in range(len(n_filters)-1)])
bandwidth = numpy.random.uniform(0.1, 0.9)
pass_bands = True
fbank = slab.Filter.cos_filterbank(sound.n_samples, bandwidth, low_cutoff, high_cutoff, pass_bands,
sound.samplerate)
filtsound = fbank.apply(sound)
collapsed = slab.Filter.collapse_subbands(filtsound, fbank)
numpy.testing.assert_almost_equal(sound.data, collapsed.data, decimal=-1)
def test_center_freqs():
for i in range(100):
low_cutoff = numpy.random.randint(0, 500)
high_cutoff = numpy.random.choice([numpy.random.randint(5000, 20000)])
bandwidth1 = numpy.random.uniform(0.1, 0.7)
pass_bands = False
center_freqs1, bandwidth2, _ = slab.Filter._center_freqs(low_cutoff, high_cutoff, bandwidth1, pass_bands)
assert numpy.abs(bandwidth1 - bandwidth2) < 0.3
fbank = slab.Filter.cos_filterbank(5000, bandwidth1, low_cutoff, high_cutoff, pass_bands, 44100)
center_freqs2 = fbank.filter_bank_center_freqs()
assert numpy.abs(slab.Filter._erb2freq(center_freqs1[1:]) - center_freqs2[1:]).max() < 40
assert numpy.abs(center_freqs1 - slab.Filter._freq2erb(center_freqs2)).max() < 1
def test_equalization():
for i in range(10):
sound = slab.Sound.pinknoise(samplerate=44100)
filt = slab.Filter.band(frequency=[100., 800., 2000., 4300., 8000., 14500., 18000.],
gain=[0., 1., 0., 1., 0., 1., 0.], samplerate=sound.samplerate)
filtered = filt.apply(sound)
fbank = slab.Filter.equalizing_filterbank(sound, filtered, low_cutoff=200, high_cutoff=16000)
equalized = fbank.apply(sound)
Z_equalized, _ = equalized.spectrum(show=False)
Z_sound, _ = sound.spectrum(show=False)
Z_filtered, _ = filtered.spectrum(show=False)
# The difference between spectra should be smaller after equalization
assert numpy.abs(Z_sound-Z_filtered).sum() / numpy.abs(Z_sound-Z_equalized).sum() > 2
def test_load_save():
for kind, freq in zip(["lp", "hp", "bs", "bp"], [
numpy.random.uniform(100, 2000),
numpy.random.uniform(100, 2000),
(0+numpy.random.uniform(100, 2000), 2000+numpy.random.uniform(100, 2000)),
(0 + numpy.random.uniform(100, 2000), 2000 + numpy.random.uniform(100, 2000))
]):
for fir in (True, False):
filt = slab.Filter.band(kind=kind, frequency=freq, fir=fir)
filt.save(tmpdir/"filt.npy")
loaded = slab.Filter.load(tmpdir/"filt.npy")
numpy.testing.assert_equal(filt.data, loaded.data)
numpy.testing.assert_equal(filt.times, loaded.times)
assert filt.fir == loaded.fir
assert filt.n_frequencies == loaded.n_frequencies
assert filt.n_taps == loaded.n_taps
| [
"slab.Filter.load",
"numpy.abs",
"slab.Sound.whitenoise",
"slab.Filter.equalizing_filterbank",
"numpy.random.randint",
"numpy.testing.assert_almost_equal",
"numpy.linspace",
"itertools.product",
"numpy.testing.assert_equal",
"slab.Filter.collapse_subbands",
"slab.Filter._center_freqs",
"numpy.... | [((95, 116), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (114, 116), False, 'import tempfile\n'), ((852, 887), 'slab.Sound.whitenoise', 'slab.Sound.whitenoise', ([], {'duration': '(2.0)'}), '(duration=2.0)\n', (873, 887), False, 'import slab\n'), ((909, 955), 'itertools.product', 'itertools.product', (["['bp', 'bs']", '[False, True]'], {}), "(['bp', 'bs'], [False, True])\n", (926, 955), False, 'import itertools\n'), ((1704, 1757), 'slab.Sound.whitenoise', 'slab.Sound.whitenoise', ([], {'duration': '(2.0)', 'samplerate': '(44100)'}), '(duration=2.0, samplerate=44100)\n', (1725, 1757), False, 'import slab\n'), ((1770, 1839), 'numpy.array', 'numpy.array', (['[100.0, 800.0, 2000.0, 4300.0, 8000.0, 14500.0, 18000.0]'], {}), '([100.0, 800.0, 2000.0, 4300.0, 8000.0, 14500.0, 18000.0])\n', (1781, 1839), False, 'import numpy\n'), ((185, 220), 'slab.Sound.whitenoise', 'slab.Sound.whitenoise', ([], {'duration': '(2.0)'}), '(duration=2.0)\n', (206, 220), False, 'import slab\n'), ((246, 292), 'itertools.product', 'itertools.product', (["['lp', 'hp']", '[False, True]'], {}), "(['lp', 'hp'], [False, True])\n", (263, 292), False, 'import itertools\n'), ((983, 1014), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(1000)'], {}), '(100, 1000)\n', (1003, 1014), False, 'import numpy\n'), ((1109, 1140), 'numpy.random.randint', 'numpy.random.randint', (['(500)', '(5000)'], {}), '(500, 5000)\n', (1129, 1140), False, 'import numpy\n'), ((1156, 1259), 'slab.Filter.band', 'slab.Filter.band', ([], {'frequency': '(lower_edge_freq, higher_edge_freq)', 'length': 'length', 'kind': 'kind', 'fir': 'fir'}), '(frequency=(lower_edge_freq, higher_edge_freq), length=\n length, kind=kind, fir=fir)\n', (1172, 1259), False, 'import slab\n'), ((2052, 2091), 'itertools.product', 'itertools.product', (['[True, False]', 'gains'], {}), '([True, False], gains)\n', (2069, 2091), False, 'import itertools\n'), ((2643, 2696), 'slab.Sound.whitenoise', 'slab.Sound.whitenoise', ([], {'duration': '(1.0)', 'samplerate': '(44100)'}), '(duration=1.0, samplerate=44100)\n', (2664, 2696), False, 'import slab\n'), ((2714, 2746), 'numpy.random.randint', 'numpy.random.randint', (['(1000)', '(5000)'], {}), '(1000, 5000)\n', (2734, 2746), False, 'import numpy\n'), ((2768, 2796), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(500)'], {}), '(0, 500)\n', (2788, 2796), False, 'import numpy\n'), ((2957, 2984), 'numpy.linspace', 'numpy.linspace', (['(0.1)', '(0.9)', '(9)'], {}), '(0.1, 0.9, 9)\n', (2971, 2984), False, 'import numpy\n'), ((3419, 3449), 'numpy.random.uniform', 'numpy.random.uniform', (['(0.1)', '(0.9)'], {}), '(0.1, 0.9)\n', (3439, 3449), False, 'import numpy\n'), ((3492, 3605), 'slab.Filter.cos_filterbank', 'slab.Filter.cos_filterbank', (['sound.n_samples', 'bandwidth', 'low_cutoff', 'high_cutoff', 'pass_bands', 'sound.samplerate'], {}), '(sound.n_samples, bandwidth, low_cutoff,\n high_cutoff, pass_bands, sound.samplerate)\n', (3518, 3605), False, 'import slab\n'), ((3704, 3751), 'slab.Filter.collapse_subbands', 'slab.Filter.collapse_subbands', (['filtsound', 'fbank'], {}), '(filtsound, fbank)\n', (3733, 3751), False, 'import slab\n'), ((3760, 3833), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['sound.data', 'collapsed.data'], {'decimal': '(-1)'}), '(sound.data, collapsed.data, decimal=-1)\n', (3793, 3833), False, 'import numpy\n'), ((3907, 3935), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(500)'], {}), '(0, 500)\n', (3927, 3935), False, 'import numpy\n'), ((4036, 4066), 'numpy.random.uniform', 'numpy.random.uniform', (['(0.1)', '(0.7)'], {}), '(0.1, 0.7)\n', (4056, 4066), False, 'import numpy\n'), ((4133, 4207), 'slab.Filter._center_freqs', 'slab.Filter._center_freqs', (['low_cutoff', 'high_cutoff', 'bandwidth1', 'pass_bands'], {}), '(low_cutoff, high_cutoff, bandwidth1, pass_bands)\n', (4158, 4207), False, 'import slab\n'), ((4280, 4372), 'slab.Filter.cos_filterbank', 'slab.Filter.cos_filterbank', (['(5000)', 'bandwidth1', 'low_cutoff', 'high_cutoff', 'pass_bands', '(44100)'], {}), '(5000, bandwidth1, low_cutoff, high_cutoff,\n pass_bands, 44100)\n', (4306, 4372), False, 'import slab\n'), ((4680, 4718), 'slab.Sound.pinknoise', 'slab.Sound.pinknoise', ([], {'samplerate': '(44100)'}), '(samplerate=44100)\n', (4700, 4718), False, 'import slab\n'), ((4734, 4899), 'slab.Filter.band', 'slab.Filter.band', ([], {'frequency': '[100.0, 800.0, 2000.0, 4300.0, 8000.0, 14500.0, 18000.0]', 'gain': '[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]', 'samplerate': 'sound.samplerate'}), '(frequency=[100.0, 800.0, 2000.0, 4300.0, 8000.0, 14500.0, \n 18000.0], gain=[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], samplerate=sound.\n samplerate)\n', (4750, 4899), False, 'import slab\n'), ((4961, 5050), 'slab.Filter.equalizing_filterbank', 'slab.Filter.equalizing_filterbank', (['sound', 'filtered'], {'low_cutoff': '(200)', 'high_cutoff': '(16000)'}), '(sound, filtered, low_cutoff=200,\n high_cutoff=16000)\n', (4994, 5050), False, 'import slab\n'), ((318, 349), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (338, 349), False, 'import numpy\n'), ((371, 402), 'numpy.random.randint', 'numpy.random.randint', (['(500)', '(5000)'], {}), '(500, 5000)\n', (391, 402), False, 'import numpy\n'), ((422, 494), 'slab.Filter.band', 'slab.Filter.band', ([], {'frequency': 'edge_freq', 'length': 'length', 'kind': 'kind', 'fir': 'fir'}), '(frequency=edge_freq, length=length, kind=kind, fir=fir)\n', (438, 494), False, 'import slab\n'), ((1060, 1091), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(1000)'], {}), '(100, 1000)\n', (1080, 1091), False, 'import numpy\n'), ((1521, 1568), 'numpy.concatenate', 'numpy.concatenate', (['[Z[0:low_idx], Z[high_idx:]]'], {}), '([Z[0:low_idx], Z[high_idx:]])\n', (1538, 1568), False, 'import numpy\n'), ((2114, 2144), 'numpy.random.uniform', 'numpy.random.uniform', (['(1)', '(10)', '(7)'], {}), '(1, 10, 7)\n', (2134, 2144), False, 'import numpy\n'), ((2191, 2222), 'numpy.random.randint', 'numpy.random.randint', (['(500)', '(5000)'], {}), '(500, 5000)\n', (2211, 2222), False, 'import numpy\n'), ((3006, 3110), 'slab.Filter.cos_filterbank', 'slab.Filter.cos_filterbank', (['length', 'bandwidth', 'low_cutoff', 'high_cutoff', 'pass_bands', 'sound.samplerate'], {}), '(length, bandwidth, low_cutoff, high_cutoff,\n pass_bands, sound.samplerate)\n', (3032, 3110), False, 'import slab\n'), ((4223, 4257), 'numpy.abs', 'numpy.abs', (['(bandwidth1 - bandwidth2)'], {}), '(bandwidth1 - bandwidth2)\n', (4232, 4257), False, 'import numpy\n'), ((5502, 5533), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (5522, 5533), False, 'import numpy\n'), ((5543, 5574), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (5563, 5574), False, 'import numpy\n'), ((5806, 5858), 'slab.Filter.band', 'slab.Filter.band', ([], {'kind': 'kind', 'frequency': 'freq', 'fir': 'fir'}), '(kind=kind, frequency=freq, fir=fir)\n', (5822, 5858), False, 'import slab\n'), ((5921, 5958), 'slab.Filter.load', 'slab.Filter.load', (["(tmpdir / 'filt.npy')"], {}), "(tmpdir / 'filt.npy')\n", (5937, 5958), False, 'import slab\n'), ((5969, 6019), 'numpy.testing.assert_equal', 'numpy.testing.assert_equal', (['filt.data', 'loaded.data'], {}), '(filt.data, loaded.data)\n', (5995, 6019), False, 'import numpy\n'), ((6032, 6084), 'numpy.testing.assert_equal', 'numpy.testing.assert_equal', (['filt.times', 'loaded.times'], {}), '(filt.times, loaded.times)\n', (6058, 6084), False, 'import numpy\n'), ((1363, 1397), 'numpy.abs', 'numpy.abs', (['(freqs - lower_edge_freq)'], {}), '(freqs - lower_edge_freq)\n', (1372, 1397), False, 'import numpy\n'), ((1426, 1461), 'numpy.abs', 'numpy.abs', (['(freqs - higher_edge_freq)'], {}), '(freqs - higher_edge_freq)\n', (1435, 1461), False, 'import numpy\n'), ((2840, 2873), 'numpy.random.randint', 'numpy.random.randint', (['(5000)', '(15000)'], {}), '(5000, 15000)\n', (2860, 2873), False, 'import numpy\n'), ((3979, 4012), 'numpy.random.randint', 'numpy.random.randint', (['(5000)', '(20000)'], {}), '(5000, 20000)\n', (3999, 4012), False, 'import numpy\n'), ((611, 639), 'numpy.abs', 'numpy.abs', (['(freqs - edge_freq)'], {}), '(freqs - edge_freq)\n', (620, 639), False, 'import numpy\n'), ((5587, 5618), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (5607, 5618), False, 'import numpy\n'), ((5625, 5656), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (5645, 5656), False, 'import numpy\n'), ((5672, 5703), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (5692, 5703), False, 'import numpy\n'), ((5712, 5743), 'numpy.random.uniform', 'numpy.random.uniform', (['(100)', '(2000)'], {}), '(100, 2000)\n', (5732, 5743), False, 'import numpy\n'), ((2434, 2451), 'numpy.array', 'numpy.array', (['gain'], {}), '(gain)\n', (2445, 2451), False, 'import numpy\n'), ((2480, 2499), 'numpy.abs', 'numpy.abs', (['(w - freq)'], {}), '(w - freq)\n', (2489, 2499), False, 'import numpy\n'), ((5337, 5368), 'numpy.abs', 'numpy.abs', (['(Z_sound - Z_filtered)'], {}), '(Z_sound - Z_filtered)\n', (5346, 5368), False, 'import numpy\n'), ((5375, 5407), 'numpy.abs', 'numpy.abs', (['(Z_sound - Z_equalized)'], {}), '(Z_sound - Z_equalized)\n', (5384, 5407), False, 'import numpy\n'), ((4451, 4491), 'slab.Filter._erb2freq', 'slab.Filter._erb2freq', (['center_freqs1[1:]'], {}), '(center_freqs1[1:])\n', (4472, 4491), False, 'import slab\n'), ((4565, 4601), 'slab.Filter._freq2erb', 'slab.Filter._freq2erb', (['center_freqs2'], {}), '(center_freqs2)\n', (4586, 4601), False, 'import slab\n')] |
from pioneer.common import platform as platform_utils
from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate
from pioneer.common import clouds
from pioneer.das.api.samples import FastTrace, Echo
from pioneer.das.view.windows import Window
from pioneer.common.gui.qml import backend_qtquick5
from enum import Enum
from PyQt5.QtCore import QObject
import copy
import matplotlib.pyplot as plt
import numpy as np
COLORS = plt.cm.rainbow(np.linspace(0,1,20))
class TracesWindow(Window):
def __init__(self, window, platform, synchronized, ds_name):
super(TracesWindow, self).__init__(window, platform)
self.window.setTitle(ds_name)
self.synchronized = synchronized
self.viewport = self.window.viewport
self.controls = self.window.controls
self.ds_name = ds_name
self.backend = self.window.findChild(QObject, "figure")
self.figure = self.backend.getFigure()
self.ax = [self.figure.add_subplot(s) for s in [211, 212]]
self.datasource = self.platform[self.ds_name]
sensor_name, sensor_pos, trr_ds_type = platform_utils.parse_datasource_name(self.ds_name)
self.ech_ds_name = f'{sensor_name}_{sensor_pos}_ech'
self.has_echoes = self.ech_ds_name in self.platform.datasource_names()
self.virtual_ech_ds_name = f'{sensor_name}_{sensor_pos}_ech-{trr_ds_type}'
self.has_virtual_echoes = self.virtual_ech_ds_name in self.platform.datasource_names()
if self.has_virtual_echoes:
self.window.useVirtualEchoes.visible = True
self.helper = None
self.image = None
self.hover_coords = None
self.hovering = False
self.selection = []
self.trace_processing = None
self.drawn_traces = []
if self.datasource.sensor.static_noise is None or self.datasource.sensor.static_noise == 0:
self.window.removeStaticVisible = False
def connect(self):
self.add_connection(self.window.cursorChanged.connect(self._update))
self.add_connection(self.window.selectionChanged.connect(self._update))
self.add_connection(self.window.addToSelectionSubmit.clicked.connect(self._add_to_selection))
self.add_connection(self.window.useVirtualEchoes.clicked.connect(self._update))
self.add_connection(self.window.imageTypeChanged.connect(self._update))
self.add_connection(self.window.showRawChanged.connect(self._update))
self.add_connection(self.window.showHighFastTraceChanged.connect(self._update))
self.add_connection(self.window.showLowFastTraceChanged.connect(self._update))
self.add_connection(self.window.traceProcessingChanged.connect(self._update))
self.add_connection(self.window.desaturateChanged.connect(self._update_trace_processing))
self.add_connection(self.window.removeStaticChanged.connect(self._update_trace_processing))
self.add_connection(self.window.realignChanged.connect(self._update_trace_processing))
self.add_connection(self.window.zeroBaselineChanged.connect(self._update_trace_processing))
self.add_connection(self.window.cutoffChanged.connect(self._update_trace_processing))
self.add_connection(self.window.smoothTraceChanged.connect(self._update_trace_processing))
self.backend.canvas.mpl_connect('motion_notify_event', self._on_hover)
self.backend.canvas.mpl_connect('button_press_event', self._on_click)
self._update_trace_processing()
self._update()
def _update(self):
cursor = int(self.window['cursor'])
self.trace_sample = self.datasource[cursor]
if self.window.traceProcessing:
self.trace_processed = self.trace_sample.processed(self.trace_processing)
self.selection = self.window.selection
if isinstance(self.trace_sample, FastTrace):
self.window.fastTraceSelectionVisible = True
self.window.traceProcessingVisible = self.window.traceProcessing
self._update_image()
self._update_plots()
self.backend.draw()
def _update_image(self):
if self.window.useVirtualEchoes.checked:
self.echo_sample = self.platform[self.virtual_ech_ds_name].get_at_timestamp(self.trace_sample.timestamp)
elif self.has_echoes:
self.echo_sample = self.platform[self.ech_ds_name].get_at_timestamp(self.trace_sample.timestamp)
else:
self.echo_sample = self._placeholder_echo_sample()
if self.window.imageType == 'distance':
image = self.echo_sample.distance_img(options='max_amplitude')
elif self.window.imageType == 'width':
image = self.echo_sample.other_field_img('widths')
elif self.window.imageType == 'skew':
image = self.echo_sample.other_field_img('skews')
else:
image = self.echo_sample.amplitude_img()
if self.image is None:
self.image = self.ax[0].imshow(image, extent=[0, image.shape[1], image.shape[0], 0])
else:
self.image.set_data(image)
self.image.set_clim(image.min(), image.max())
if self.helper is None:
self.helper = backend_qtquick5.MPLImageHelper(image, self.ax[0], offset = 0)
self.helper.image_coord_to_channel_index = self.echo_sample.image_coord_to_channel_index
self.helper.channel_index_to_image_coord = self.echo_sample.channel_index_to_image_coord
def _update_plots(self):
self._clear_plots()
if len(self.selection) > 0:
for coords in self.selection:
row, col = coords
index = self.helper.image_coord_to_channel_index(row, col)
color = COLORS[index%len(COLORS)]
marker_style = dict(color=color, marker='s', markersize=4, markerfacecolor=color, markeredgecolor = 'white')
self.ax[0].plot(col+.5, row+.5, **marker_style)
self.draw_traces(index, color)
self._update_plot_range()
self._update_legend()
self.backend.draw()
def _update_legend(self):
try: self.ax[1].get_legend().remove()
except: pass
if len(self.drawn_traces) > 0:
self.ax[1].legend()
def _update_plot_range(self):
if len(self.drawn_traces) > 0:
plot_range_min = min([trace.min() for trace in self.drawn_traces])
plot_range_max = max([trace.max() for trace in self.drawn_traces])
if plot_range_max <= plot_range_min:
return self.ax[1].set_ylim(-0.1,1.1)
diff = float(plot_range_max) - float(plot_range_min)
plot_range_min -= diff*0.1
plot_range_max += diff*0.1
self.ax[1].set_ylim(plot_range_min, plot_range_max)
def _update_trace_processing(self):
list_trace_processing = []
if self.window.desaturate:
list_trace_processing.append(Desaturate(self.datasource.sensor.saturation_calibration))
if self.window.removeStatic:
list_trace_processing.append(RemoveStaticNoise(self.datasource.sensor.static_noise))
if self.window.realign:
list_trace_processing.append(Realign())
if self.window.zeroBaseline:
list_trace_processing.append(ZeroBaseline())
if self.window.cutoff:
list_trace_processing.append(Clip())
if self.window.smoothTrace:
list_trace_processing.append(Smooth())
self.trace_processing = TraceProcessingCollection(list_trace_processing)
self._update()
def _on_hover(self, event):
if event.inaxes == self.ax[0]:
try: col, row = self.helper.to_indices(event.xdata, event.ydata)
except:
self.hover_coords = None
return 0
index = self.helper.image_coord_to_channel_index(row, col)
if [row, col] == self.hover_coords:
return 0
self.hover_coords = [row, col]
self.hovering = True
# Marker on the currently hovered channel
color = 'r'
marker_style = dict(color=color, marker='o', markersize=4, markerfacecolor=color, markeredgecolor = 'white')
self.ax[0].plot(col+.5, row+.5, **marker_style)
# Amplitude and distance of echo in hovered channel
ech_idx = np.where(self.echo_sample.indices == index)[0]
self.ax[0].set_title(f'amp:{self.echo_sample.amplitudes[ech_idx]}, dst:{self.echo_sample.distances[ech_idx]}')
len_drawn_traces = len(self.drawn_traces)
self.draw_traces(index, color)
self._update_plot_range()
self._update_legend()
self.backend.draw()
del self.ax[0].lines[-1]
for _ in self.drawn_traces[len_drawn_traces:]:
del self.ax[1].lines[-1]
self.drawn_traces = self.drawn_traces[:len_drawn_traces]
elif self.hovering:
self._update_plot_range()
self._update_legend()
self.backend.draw()
self.hover_coords = None
self.ax[0].set_title(f'')
else:
self.hover_coords = None
def _on_click(self, event):
if self.hover_coords is not None:
if self.hover_coords not in self.selection:
self.selection.append(self.hover_coords)
else:
self.selection.remove(self.hover_coords)
self.window.selection = self.selection
# self.hover_coords = None
self._update_plots()
def _add_to_selection(self):
try:
channel = int(self.window.addToSelection)
row, col = self.helper.channel_index_to_image_coord(channel)
self.selection.append([row, col])
self.window.selection = self.selection
except: pass
self._update_plots()
def draw_traces(self, index, color):
if self.window.showRaw:
if isinstance(self.trace_sample, FastTrace):
if self.window.showHighFastTrace:
trace_high = self.trace_sample.raw['high']['data'][index]
self.ax[1].plot(trace_high, color=color, label=f'Raw(High): {index}')
self.drawn_traces.append(trace_high)
if self.window.showLowFastTrace:
trace_low = self.trace_sample.raw['low']['data'][index]
self.ax[1].plot(trace_low, color=color, ls=':', label=f'Raw(Low): {index}')
self.drawn_traces.append(trace_low)
else:
trace_raw = self.trace_sample.raw['data'][index]
self.ax[1].plot(trace_raw, color=color, label=f'Raw: {index}')
self.drawn_traces.append(trace_raw)
if self.window.traceProcessing:
if isinstance(self.trace_sample, FastTrace):
if self.window.showHighFastTrace:
trace_processed_high = self.trace_processed['high']['data'][index]
self.ax[1].plot(trace_processed_high, color=color, ls='--', label=f'Processed(High): {index}')
self.drawn_traces.append(trace_processed_high)
if self.window.showLowFastTrace:
trace_processed_low = self.trace_processed['low']['data'][index]
self.ax[1].plot(trace_processed_low, color=color, ls='-.', label=f'Processed(Low): {index}')
self.drawn_traces.append(trace_processed_low)
else:
trace_processed = self.trace_processed['data'][index]
self.ax[1].plot(trace_processed, color=color, ls='--', label=f'Processed: {index}')
self.drawn_traces.append(trace_processed)
def _clear_plots(self):
for ax in self.ax:
del ax.lines[:]
self.drawn_traces = []
self._update_legend()
def _placeholder_echo_sample(self):
if isinstance(self.trace_sample, FastTrace):
traces_raw = self.trace_sample.raw['high']
else:
traces_raw = self.trace_sample.raw
v, h = self.trace_sample.specs['v'], self.trace_sample.specs['h']
vv, hh = np.mgrid[0:v, 0:h]
coords_img = np.stack((vv,hh, np.arange(0, v*h).reshape(v, h)), axis=2)[...,2]
coords_img_tf = np.flipud(coords_img)
indices = coords_img_tf.flatten()
amplitudes = np.max(traces_raw['data'], axis=1)[indices]
distances = np.argmax(traces_raw['data'], axis=1)[indices]*traces_raw['distance_scaling']
try:
distances += traces_raw['time_base_delays'][indices]
except:
distances += traces_raw['time_base_delays']
raw = clouds.to_echo_package(
indices = np.array(indices, 'u4'),
distances = np.array(distances, 'f4'),
amplitudes = np.array(amplitudes, 'f4'),
timestamp = self.trace_sample.timestamp,
specs = self.trace_sample.specs
)
return Echo(self.trace_sample.index, self.trace_sample.datasource, raw, self.trace_sample.timestamp) | [
"pioneer.common.trace_processing.RemoveStaticNoise",
"pioneer.das.api.samples.Echo",
"numpy.argmax",
"pioneer.common.trace_processing.TraceProcessingCollection",
"pioneer.common.trace_processing.ZeroBaseline",
"numpy.flipud",
"pioneer.common.trace_processing.Desaturate",
"pioneer.common.trace_processi... | [((520, 541), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (531, 541), True, 'import numpy as np\n'), ((1180, 1230), 'pioneer.common.platform.parse_datasource_name', 'platform_utils.parse_datasource_name', (['self.ds_name'], {}), '(self.ds_name)\n', (1216, 1230), True, 'from pioneer.common import platform as platform_utils\n'), ((7657, 7705), 'pioneer.common.trace_processing.TraceProcessingCollection', 'TraceProcessingCollection', (['list_trace_processing'], {}), '(list_trace_processing)\n', (7682, 7705), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((12541, 12562), 'numpy.flipud', 'np.flipud', (['coords_img'], {}), '(coords_img)\n', (12550, 12562), True, 'import numpy as np\n'), ((13233, 13331), 'pioneer.das.api.samples.Echo', 'Echo', (['self.trace_sample.index', 'self.trace_sample.datasource', 'raw', 'self.trace_sample.timestamp'], {}), '(self.trace_sample.index, self.trace_sample.datasource, raw, self.\n trace_sample.timestamp)\n', (13237, 13331), False, 'from pioneer.das.api.samples import FastTrace, Echo\n'), ((5297, 5357), 'pioneer.common.gui.qml.backend_qtquick5.MPLImageHelper', 'backend_qtquick5.MPLImageHelper', (['image', 'self.ax[0]'], {'offset': '(0)'}), '(image, self.ax[0], offset=0)\n', (5328, 5357), False, 'from pioneer.common.gui.qml import backend_qtquick5\n'), ((12626, 12660), 'numpy.max', 'np.max', (["traces_raw['data']"], {'axis': '(1)'}), "(traces_raw['data'], axis=1)\n", (12632, 12660), True, 'import numpy as np\n'), ((7087, 7144), 'pioneer.common.trace_processing.Desaturate', 'Desaturate', (['self.datasource.sensor.saturation_calibration'], {}), '(self.datasource.sensor.saturation_calibration)\n', (7097, 7144), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((7224, 7278), 'pioneer.common.trace_processing.RemoveStaticNoise', 'RemoveStaticNoise', (['self.datasource.sensor.static_noise'], {}), '(self.datasource.sensor.static_noise)\n', (7241, 7278), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((7353, 7362), 'pioneer.common.trace_processing.Realign', 'Realign', ([], {}), '()\n', (7360, 7362), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((7442, 7456), 'pioneer.common.trace_processing.ZeroBaseline', 'ZeroBaseline', ([], {}), '()\n', (7454, 7456), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((7530, 7536), 'pioneer.common.trace_processing.Clip', 'Clip', ([], {}), '()\n', (7534, 7536), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((7615, 7623), 'pioneer.common.trace_processing.Smooth', 'Smooth', ([], {}), '()\n', (7621, 7623), False, 'from pioneer.common.trace_processing import TraceProcessingCollection, Smooth, Clip, ZeroBaseline, Realign, RemoveStaticNoise, Desaturate\n'), ((8534, 8577), 'numpy.where', 'np.where', (['(self.echo_sample.indices == index)'], {}), '(self.echo_sample.indices == index)\n', (8542, 8577), True, 'import numpy as np\n'), ((12690, 12727), 'numpy.argmax', 'np.argmax', (["traces_raw['data']"], {'axis': '(1)'}), "(traces_raw['data'], axis=1)\n", (12699, 12727), True, 'import numpy as np\n'), ((12979, 13002), 'numpy.array', 'np.array', (['indices', '"""u4"""'], {}), "(indices, 'u4')\n", (12987, 13002), True, 'import numpy as np\n'), ((13029, 13054), 'numpy.array', 'np.array', (['distances', '"""f4"""'], {}), "(distances, 'f4')\n", (13037, 13054), True, 'import numpy as np\n'), ((13082, 13108), 'numpy.array', 'np.array', (['amplitudes', '"""f4"""'], {}), "(amplitudes, 'f4')\n", (13090, 13108), True, 'import numpy as np\n'), ((12468, 12487), 'numpy.arange', 'np.arange', (['(0)', '(v * h)'], {}), '(0, v * h)\n', (12477, 12487), True, 'import numpy as np\n')] |
import matplotlib
import numpy as np
matplotlib.use('Agg')
import shap
def test_random_single_image():
""" Just make sure the image_plot function doesn't crash.
"""
shap.image_plot(np.random.randn(3, 20,20), np.random.randn(3, 20,20), show=False)
def test_random_multi_image():
""" Just make sure the image_plot function doesn't crash.
"""
shap.image_plot([np.random.randn(3, 20,20) for i in range(3)], np.random.randn(3, 20,20), show=False)
| [
"matplotlib.use",
"numpy.random.randn"
] | [((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (51, 58), False, 'import matplotlib\n'), ((195, 221), 'numpy.random.randn', 'np.random.randn', (['(3)', '(20)', '(20)'], {}), '(3, 20, 20)\n', (210, 221), True, 'import numpy as np\n'), ((222, 248), 'numpy.random.randn', 'np.random.randn', (['(3)', '(20)', '(20)'], {}), '(3, 20, 20)\n', (237, 248), True, 'import numpy as np\n'), ((431, 457), 'numpy.random.randn', 'np.random.randn', (['(3)', '(20)', '(20)'], {}), '(3, 20, 20)\n', (446, 457), True, 'import numpy as np\n'), ((385, 411), 'numpy.random.randn', 'np.random.randn', (['(3)', '(20)', '(20)'], {}), '(3, 20, 20)\n', (400, 411), True, 'import numpy as np\n')] |
import decimal
import random
from .properties import Properties
from .singleton import Singleton
import numpy as np
import sys
import threading
import re
import os.path
import logging
import copy
# This module should be usable on systems without wx.
verbose = True
p = Properties()
class DBException(Exception):
def __str__(self):
return 'ERROR: ' + self.args[0] + '\n'
# XXX: sys.traceback is only set when an exception is not handled
# To test, enter an invalid image_channel_path column name in props file
# filename, line_number, function_name, text = traceback.extract_tb(sys.last_traceback)[-1]
# return "ERROR <%s>: "%(function_name) + self.args[0] + '\n'
def DBError():
'''returns the Error type associated with the db library in use'''
if p.db_type.lower() == 'mysql':
import MySQLdb
return MySQLdb.Error
elif p.db_type.lower() == 'sqlite':
import sqlite3
return sqlite3.Error
def DBOperationalError():
'''returns the Error type associated with the db library in use'''
if p.db_type.lower() == 'mysql':
import MySQLdb
return MySQLdb.OperationalError
elif p.db_type.lower() == 'sqlite':
import sqlite3
return sqlite3.OperationalError
class DBDisconnectedException(Exception):
"""
Raised when a query or other database operation fails because the
database is shutting down or the connection has been lost.
"""
def with_mysql_retry(cls, f):
"""
Decorator that tries calling its function a second time if a
DBDisconnectedException occurs the first time.
"""
def fn(db, *args, **kwargs):
try:
return f(db, *args, **kwargs)
except DBDisconnectedException:
logging.info('Lost connection to the MySQL database; reconnecting.')
db.connect()
return f(db, *args, **kwargs)
return fn
with_mysql_retry = classmethod(with_mysql_retry)
def sqltype_to_pythontype(t):
'''
t -- a valid sql typestring
returns a python type that will hold the given sqltype
'''
t = t.upper()
if (t.startswith('INT') or t.startswith('DECIMAL') or t.startswith('BIGINT') or
t in ['TINYINT', 'SMALLINT', 'MEDIUMINT', 'UNSIGNED BIG INT',
'INT2', 'INT8', 'NUMERIC', 'BOOLEAN', 'DATE', 'DATETIME']):
return int
elif t in ['REAL', 'DOUBLE', 'DOUBLE PRECISION', 'FLOAT']:
return float
elif (t.startswith('CHARACTER') or t.startswith('VARCHAR') or t.startswith('CHAR') or
t.startswith('VARYING CHARACTER') or t.startswith('NCHAR') or
t.startswith('NCHAR') or t.startswith('NATIVE CHARACTER') or
t.startswith('NVARCHAR') or t in ['TEXT', 'CLOB']):
return str
#TODO: this doesn't belong in this module
def get_data_table_from_csv_reader(reader):
'''reads a csv table into a 2d list'''
dtable = []
try:
row = next(reader)
except:
return []
while row:
dtable += [row]
try:
row = next(reader)
except StopIteration: break
return dtable
def clean_up_colnames(colnames):
'''takes a list of column names and makes them so they
don't have to be quoted in sql syntax'''
colnames = [col.replace(' ','_') for col in colnames]
colnames = [col.replace('\n','_') for col in colnames]
colnames = [''.join([c for c in col if re.match('[A-Za-z0-9_]',c)]) for col in colnames]
return colnames
def well_key_columns(table_name=''):
'''Return, as a tuple, the names of the columns that make up the
well key. If table_name is not None, use it to qualify each
column name.'''
if table_name is None:
table_name = ''
if table_name != '':
table_name += '.'
if p.plate_id and p.well_id:
return (table_name+p.plate_id, table_name+p.well_id)
elif p.well_id:
return (table_name+p.well_id, )
else:
return None
def image_key_columns(table_name=''):
'''Return, as a tuple, the names of the columns that make up the
image key. If table_name is not None, use it to qualify each
column name.'''
if table_name is None:
table_name = ''
if table_name != '':
table_name += '.'
if p.table_id:
return (table_name+p.table_id, table_name+p.image_id)
else:
return (table_name+p.image_id,)
def object_key_columns(table_name=''):
'''Return, as a tuple, the names of the columns that make up the
object key.'''
assert p.object_table is not None
if table_name is None:
table_name = ''
if table_name != '':
table_name += '.'
object_id = '' if not (p.object_id and p.object_table) else p.object_id
if p.table_id:
return (table_name+p.table_id, table_name+p.image_id, table_name+object_id)
else:
return (table_name+p.image_id, table_name+object_id)
def object_key_defs():
return ', '.join(['%s INT'%(id) for id in object_key_columns()])
def GetWhereClauseForObjects(obkeys, table_name=None):
'''
Return a SQL WHERE clause that matches any of the given object keys.
Example: GetWhereClauseForObjects([(1, 3), (2, 4)]) => "ImageNumber=1
AND ObjectNumber=3 OR ImageNumber=2 AND ObjectNumber=4"
'''
if table_name is None:
table_name = ''
# To limit the depth of this expression, we split it into a binary tree.
# This helps avoid SQLITE_MAX_LIMIT_EXPR_DEPTH
def split(keys,table_name):
if len(keys) <= 3:
return '(' + ' OR '.join([' AND '.join([col + '=' + str(value)
for col, value in zip(object_key_columns(table_name), obkey)])
for obkey in keys]) + ')'
else:
halflen = len(keys) // 2
return '(' + split(keys[:halflen],table_name) + ' OR ' + split(keys[halflen:],table_name) + ')'
return split(obkeys,table_name)
def GetWhereClauseForImages(imkeys):
'''
Return a SQL WHERE clause that matches any of the given image keys.
Example: GetWhereClauseForImages([(3,), (4,)]) =>
"(ImageNumber IN (3, 4))"
'''
imkeys.sort()
if not p.table_id:
return '%s IN (%s)'%(p.image_id, ','.join([str(k[0]) for k in imkeys]))
else:
imkeys = np.array(imkeys)
count = 0
tnum = 0
wheres = []
while count < len(imkeys):
imnums = imkeys[(imkeys[:,0]==tnum), 1]
count += len(imnums)
if len(imnums)>0:
wheres += ['(%s=%s AND %s IN (%s))'%(p.table_id, tnum,
p.image_id, ','.join([str(k) for k in imnums]))]
tnum += 1
return ' OR '.join(wheres)
def GetWhereClauseForWells(keys, table_name=None):
'''
Return a SQL WHERE clause that matches any of the given well keys.
Example: GetWhereClauseForImages([('plate1', 'A01'), ('plate1', 'A02')]) =>
"(plate="plate1" AND well="A01" OR plate="plate1" AND "A02"))"
'''
if table_name is None:
table_name = ''
else:
table_name += '.'
keys.sort()
if not p.plate_id:
return '%s%s IN (%s)'%(table_name, p.well_id, ','.join(['"%s"'%(k[0]) for k in keys]))
else:
wheres = ['%s%s="%s" AND %s%s="%s"'%(table_name, p.plate_id, plate, table_name, p.well_id, well) for plate, well in keys]
return ' OR '.join(wheres)
def UniqueObjectClause(table_name=None):
'''
Returns a clause for specifying a unique object in MySQL.
Example: "SELECT "+UniqueObjectClause()+" FROM <mydb>;" would return all object keys
'''
return ','.join(object_key_columns(table_name))
def UniqueImageClause(table_name=None):
'''
Returns a clause for specifying a unique image in MySQL.
Example: "SELECT <UniqueObjectClause()> FROM <mydb>;" would return all image keys
'''
return ','.join(image_key_columns(table_name))
def UniqueWellClause(table_name=None):
'''
Returns a clause for specifying a unique image in MySQL.
Example: "SELECT <UniqueObjectClause()> FROM <mydb>;" would return all image keys
'''
return ','.join(well_key_columns(table_name))
def get_csv_filenames_from_sql_file():
'''
Get the image and object CSVs specified in the .SQL file
'''
f = open(p.db_sql_file)
lines = f.read()
f.close()
files = re.findall(r" '\w+\.[Cc][Ss][Vv]' ",lines)
files = [f[2:-2] for f in files]
imcsvs = []
obcsvs = []
for file in files:
if file.lower().endswith('image.csv'):
imcsvs += [file]
elif file.lower().endswith('object.csv'):
obcsvs += [file]
return imcsvs, obcsvs
class SqliteClassifier():
def __init__(self):
pass
def setup_classifier(self, thresholds, a, b):
self.thresholds = thresholds
self.a = a.T
self.b = b.T
def classify(self, *features):
features = [0 if f is None else f for f in features]
class_num = 1 + np.where((features > self.thresholds), self.a, self.b).sum(axis=1).argmax()
# CRUCIAL: must make sure class_num is an int or it won't compare
# properly with the class being looked for and nothing will
# be found. This only appears to be a problem on Windows 64bit
return int(class_num)
def _check_colname_user(properties, table, colname):
if table in [properties.image_table, properties.object_table] and not colname.lower().startswith('user_'):
raise ValueError('User-defined columns in the image and object tables must have names beginning with "User_".')
class DBConnect(metaclass=Singleton):
'''
DBConnect abstracts calls to MySQLdb/SQLite. It's a singleton that maintains
unique connections for each thread that uses it. These connections are
automatically created on "execute", and results are automatically returned
as a list.
'''
def __init__(self):
self.classifierColNames = None
self.connections = {}
self.cursors = {}
self.connectionInfo = {}
#self.link_cols = {} # link_cols['table'] = columns that link 'table' to the per-image table
self.sqlite_classifier = SqliteClassifier()
self.gui_parent = None
def __str__(self):
return ''.join([ (key + " = " + str(val) + "\n")
for (key, val) in list(self.__dict__.items())])
def connect(self, empty_sqlite_db=False):
'''
Attempts to create a new connection to the specified database using
the current thread name as a connection ID.
If properties.db_type is 'sqlite', it will create a sqlite db in a
temporary directory from the csv files specified by
properties.image_csv_file and properties.object_csv_file
'''
connID = threading.currentThread().getName()
logging.info('[%s] Connecting to the database...'%(connID))
# If this connection ID already exists print a warning
if connID in self.connections:
if self.connectionInfo[connID] == (p.db_host, p.db_user,
(p.db_passwd or None), p.db_name):
logging.warn('A connection already exists for this thread. %s as %s@%s (connID = "%s").'%(p.db_name, p.db_user, p.db_host, connID))
else:
raise DBException('A connection already exists for this thread (%s). Close this connection first.'%(connID,))
# MySQL database: connect normally
if p.db_type.lower() == 'mysql':
import MySQLdb
from MySQLdb.cursors import SSCursor
try:
conn = MySQLdb.connect(host=p.db_host, db=p.db_name,
user=p.db_user, passwd=(p.db_passwd or None))
self.connections[connID] = conn
self.cursors[connID] = SSCursor(conn)
self.connectionInfo[connID] = (p.db_host, p.db_user,
(p.db_passwd or None), p.db_name)
logging.debug('[%s] Connected to database: %s as %s@%s'%(connID, p.db_name, p.db_user, p.db_host))
if connID == 'MainThread':
if p.classification_type == 'image':
self.CreateObjectImageTable()
if p.check_tables == 'yes':
self.CreateObjectCheckedTable()
except DBError() as e:
raise DBException('Failed to connect to database: %s as %s@%s (connID = "%s").\n %s'%(p.db_name, p.db_user, p.db_host, connID, e))
# SQLite database: create database from CSVs
elif p.db_type.lower() == 'sqlite':
import sqlite3 as sqlite
if not p.db_sqlite_file:
# Compute a UNIQUE database name for these files
import hashlib
dbpath = os.getenv('USERPROFILE') or os.getenv('HOMEPATH') or \
os.path.expanduser('~')
dbpath = os.path.join(dbpath,'CPA')
try:
os.listdir(dbpath)
except OSError:
os.mkdir(dbpath)
if p.db_sql_file:
csv_dir = os.path.split(p.db_sql_file)[0] or '.'
imcsvs, obcsvs = get_csv_filenames_from_sql_file()
files = imcsvs + obcsvs + [os.path.split(p.db_sql_file)[1]]
hash = hashlib.new('md5')
for fname in files:
t = os.stat(csv_dir + os.path.sep + fname).st_mtime
hash_me = f"{fname}{t}".encode()
hash.update(hash_me)
dbname = 'CPA_DB_%s.db'%(hash.hexdigest())
else:
imtime = os.stat(p.image_csv_file).st_mtime
obtime = os.stat(p.object_csv_file).st_mtime
l = '%s%s%s%s'%(p.image_csv_file,p.object_csv_file,imtime,obtime)
dbname = 'CPA_DB_%s.db'%(hashlib.md5(l.encode()).hexdigest())
p.db_sqlite_file = os.path.join(dbpath, dbname)
logging.info('[%s] SQLite file: %s'%(connID, p.db_sqlite_file))
self.connections[connID] = sqlite.connect(p.db_sqlite_file)
self.connections[connID].text_factory = str
self.cursors[connID] = self.connections[connID].cursor()
self.connectionInfo[connID] = ('sqlite', 'cpa_user', '', 'CPA_DB')
self.connections[connID].create_function('greatest', -1, max)
# Create MEDIAN function
class median:
def __init__(self):
self.reset()
def reset(self):
self.values = []
def step(self, val):
if val is not None:
if not np.isnan(float(val)):
self.values.append(float(val))
def finalize(self):
n = len(self.values)
if n == 0:
return None
self.values.sort()
if n%2 == 1:
return self.values[n//2]
else:
return (self.values[n//2-1] + self.values[n//2]) / 2
self.connections[connID].create_aggregate('median', 1, median)
# Create STDDEV function
class stddev:
def __init__(self):
self.reset()
def reset(self):
self.values = []
def step(self, val):
if val is not None:
if not np.isnan(float(val)):
self.values.append(float(val))
def finalize(self):
if len(self.values) == 0:
return None
avg = np.mean(self.values)
b = np.sum([(x-avg)**2 for x in self.values])
std = np.sqrt(b/len(self.values))
return std
self.connections[connID].create_aggregate('stddev', 1, stddev)
# Create REGEXP function
def regexp(expr, item):
reg = re.compile(expr)
return reg.match(item) is not None
self.connections[connID].create_function("REGEXP", 2, regexp)
# Create classifier function
self.connections[connID].create_function('classifier', -1, self.sqlite_classifier.classify)
try:
# Try the connection
if empty_sqlite_db:
self.execute('select 1')
else:
self.GetAllImageKeys()
except Exception:
# If this is the first connection, then we need to create the DB from the csv files
if len(self.connections) == 1:
if p.db_sql_file:
# TODO: prompt user "create db, y/n"
logging.info('[%s] Creating SQLite database at: %s.'%(connID, p.db_sqlite_file))
try:
self.CreateSQLiteDBFromCSVs()
except Exception as e:
try:
if os.path.isfile(p.db_sqlite_file):
os.remove(p.db_sqlite_file)
except:
pass
raise e
elif p.image_csv_file and p.object_csv_file:
# TODO: prompt user "create db, y/n"
logging.info('[%s] Creating SQLite database at: %s.'%(connID, p.db_sqlite_file))
self.CreateSQLiteDB()
else:
raise DBException('Database at %s appears to be missing specified tables.'%(p.db_sqlite_file))
# If we're not on the main thread these tables should already have been made.
if p.classification_type == 'image' and connID == "MainThread":
self.CreateObjectImageTable()
if p.check_tables == 'yes' and connID == "MainThread":
self.CreateObjectCheckedTable()
logging.debug('[%s] Connected to database: %s'%(connID, p.db_sqlite_file))
# Unknown database type (this should never happen)
else:
raise DBException("Unknown db_type in properties: '%s'\n"%(p.db_type))
def setup_sqlite_classifier(self, thresh, a, b):
self.sqlite_classifier.setup_classifier(thresh, a, b)
def Disconnect(self):
for connID in list(self.connections.keys()):
self.CloseConnection(connID)
self.connections = {}
self.cursors = {}
self.connectionInfo = {}
self.classifierColNames = None
def CloseConnection(self, connID=None):
if not connID:
connID = threading.currentThread().getName()
if connID in self.connections:
try:
self.connections[connID].commit()
except: pass
self.cursors.pop(connID)
self.connections.pop(connID).close()
(db_host, db_user, db_passwd, db_name) = self.connectionInfo.pop(connID)
logging.info('Closed connection: %s as %s@%s (connID="%s").' % (db_name, db_user, db_host, connID))
else:
logging.warn('No database connection ID "%s" found!' %(connID))
@DBDisconnectedException.with_mysql_retry
def execute(self, query, args=None, silent=False, return_result=True):
'''
Executes the given query using the connection associated with
the current thread. Returns the results as a list of rows
unless return_result is false.
'''
if p.db_type.lower() == 'sqlite':
if args:
raise TypeError('Can\'t pass args to sqlite execute!')
# Grab a new connection if this is a new thread
connID = threading.currentThread().getName()
if not connID in self.connections:
self.connect()
try:
cursor = self.cursors[connID]
except KeyError as e:
raise DBException('No such connection: "%s".\n' %(connID))
# Finally make the query
try:
if verbose and not silent:
logging.debug('[%s] %s'%(connID, query))
if p.db_type.lower() == 'sqlite':
assert args is None
cursor.execute(query)
else:
cursor.execute(query, args=args)
if return_result:
return self._get_results_as_list()
except Exception as e:
try:
if isinstance(e, DBOperationalError()) and e.args[0] in [2006, 2013, 1053]:
raise DBDisconnectedException()
else:
raise DBException('Database query failed for connection "%s"'
'\nQuery was: "%s"'
'\nException was: %s'%(connID, query, e))
except Exception as e2:
raise DBException('Database query failed for connection "%s" and failed to reconnect'
'\nQuery was: "%s"'
'\nFirst exception was: %s'
'\nSecond exception was: %s'%(connID, query, e, e2))
def Commit(self):
connID = threading.currentThread().getName()
try:
logging.debug('[%s] Commit'%(connID))
self.connections[connID].commit()
except DBError() as e:
raise DBException('Commit failed for connection "%s"\n\t%s\n' %(connID, e))
except KeyError as e:
raise DBException('No such connection: "%s".\n' %(connID))
def GetNextResult(self):
connID = threading.currentThread().getName()
try:
return next(self.cursors[connID])
except DBError() as e:
raise DBException('Error retrieving next result from database: %s'%(e,))
return None
except StopIteration as e:
return None
except KeyError as e:
raise DBException('No such connection: "%s".\n' %(connID))
def _get_results_as_list(self):
'''
Returns a list of results retrieved from the last execute query.
NOTE: this function automatically called by execute.
'''
connID = threading.currentThread().getName()
return list(self.cursors[connID].fetchall())
def result_dtype(self):
"""
Return an appropriate descriptor for a numpy array in which the
result can be stored.
"""
#XXX: This doesn't work for SQLite... no cursor.description_flags
cursor = self.cursors[threading.currentThread().getName()]
descr = []
for (name, type_code, display_size, internal_size, precision,
scale, null_ok), flags in zip(cursor.description,
cursor.description_flags):
conversion = cursor.connection.converter[type_code]
if isinstance(conversion, list):
fun2 = None
for mask, fun in conversion:
fun2 = fun
if mask & flags:
break
else:
fun2 = conversion
if fun2 in [decimal.Decimal, float]:
dtype = 'f8'
elif fun2 == int:
dtype = 'i4'
elif fun2 == bytes:
dtype = '|S%d'%(internal_size,)
descr.append((name, dtype))
return descr
def get_results_as_structured_array(self, n=None):
#XXX: this doesn't work for SQLite
col_names = self.GetResultColumnNames()
connID = threading.currentThread().getName()
records = []
while True:
r = self.cursors[connID].fetchmany(n)
if len(r) == 0:
break
records.extend(list(r))
return np.array(records, dtype=self.result_dtype())
def GetObjectIDAtIndex(self, imKey, index):
'''
Returns the true object ID of the nth object in an image.
Note: This must be used when object IDs in the DB aren't
contiguous starting at 1.
(eg: if some objects have been removed)
index: a POSITIVE integer (1,2,3...)
'''
where_clause = " AND ".join(['%s=%s'%(col, val) for col, val in zip(image_key_columns(), imKey)])
object_number = self.execute('SELECT %s FROM %s WHERE %s LIMIT %s,1'
%(p.object_id, p.object_table, where_clause, index - 1))
object_number = object_number[0][0]
return tuple(list(imKey)+[int(object_number)])
def GetRandomObjectsSQL(self, imKeys, N):
'''
Returns a random sampling of object keys from the database.
Sampling occurs without replacement.
imKeys: a list of image keys to sample objects from.
N: number of keys to sample
'''
rand = "RANDOM()" if p.db_type.lower() == 'sqlite' else "RAND()"
if not imKeys:
statement = f"SELECT {p.image_id}, {p.object_id} FROM {p.object_table} ORDER BY {rand} LIMIT {N}"
else:
where_clause = GetWhereClauseForImages(imKeys)
statement = f"SELECT {p.image_id}, {p.object_id} FROM {p.object_table} WHERE {where_clause} ORDER BY {rand} LIMIT {N}"
object_numbers = self.execute(statement)
return object_numbers
def GetAllObjectsSQL(self, imKeys, N=None):
'''
Returns objects from a list of keys in order.
imkeys: a list of image keys
N: integer representing the number of objects to fetch.
'''
if N is None:
limit_clause = ""
else:
limit_clause = f" LIMIT {N}"
if not imKeys:
statement = f"SELECT {p.image_id}, {p.object_id} FROM {p.object_table} ORDER BY {p.image_id}{limit_clause}"
else:
where_clause = GetWhereClauseForImages(imKeys)
statement = f"SELECT {p.image_id}, {p.object_id} FROM {p.object_table} WHERE {where_clause} ORDER BY {p.image_id}{limit_clause}"
object_numbers = self.execute(statement)
return object_numbers
def GetPerImageObjectCounts(self):
'''
Returns a list of (imKey, obCount) tuples.
The counts returned correspond to images that are present in BOTH the
per_image and per_object table.
'''
if p.object_table is None or p.object_id is None:
return []
select = 'SELECT '+UniqueImageClause(p.object_table)+', COUNT('+p.object_table+'.'+p.object_id + ') FROM '+p.object_table + ' GROUP BY '+UniqueImageClause(p.object_table)
result1 = self.execute(select)
select = 'SELECT '+UniqueImageClause(p.image_table)+' FROM '+p.image_table
result2 = self.execute(select)
counts = {}
for r in result1:
counts[r[:-1]] = r[-1]
return [r+(counts[r],) for r in result2 if r in counts]
def GetAllImageKeys(self):
''' Returns a list of all image keys in the image_table. '''
select = "SELECT "+UniqueImageClause()+" FROM "+p.image_table+" GROUP BY "+UniqueImageClause()
return self.execute(select)
def GetObjectsFromImage(self, imKey):
return self.execute('SELECT %s FROM %s WHERE %s'%(UniqueObjectClause(), p.object_table, GetWhereClauseForImages([imKey])))
def GetObjectCoords(self, obKey, none_ok=False, silent=False):
'''Returns the specified object's x, y coordinates in an image.
'''
res = self.execute('SELECT %s, %s FROM %s WHERE %s'%(
p.cell_x_loc, p.cell_y_loc, p.object_table,
GetWhereClauseForObjects([obKey])), silent=silent)
if len(res) == 0 or res[0][0] is None or res[0][1] is None:
message = ('Failed to load coordinates for object key %s. This may '
'indicate a problem with your per-object table.\n'
'You can check your per-object table "%s" in TableViewer'
%(', '.join(['%s:%s'%(col, val) for col, val in
zip(object_key_columns(), obKey)]),
p.object_table))
raise Exception(message)
else:
return res[0]
def GetObjectsCoords(self, obKeys, none_ok=False, silent=False):
'''Returns the specified objects' x, y coordinates in an image.
'''
res = self.execute('SELECT %s, %s, %s, %s FROM %s WHERE %s'%(
p.image_id, p.object_id, p.cell_x_loc, p.cell_y_loc, p.object_table,
GetWhereClauseForObjects(obKeys)), silent=silent)
if len(res) == 0 or res[0][0] is None or res[0][1] is None:
message = ('Failed to load coordinates for object key %s. This may '
'indicate a problem with your per-object table.\n'
'You can check your per-object table "%s" in TableViewer'
%(', '.join(['%s:%s'%(col, val) for col, val in
zip(object_key_columns(), obKeys)]),
p.object_table))
raise Exception(message)
else:
# Now we need to match the returned lines to the requested keys
res_dict = {}
buffer = []
for tup in res:
res_dict[(tup[0], tup[1])] = (tup[2], tup[3])
for key in obKeys:
buffer.append(res_dict[key])
return buffer
def GetAllObjectCoordsFromImage(self, imKey):
''' Returns a list of lists x, y coordinates for all objects in the given image. '''
select = 'SELECT '+p.cell_x_loc+', '+p.cell_y_loc+' FROM '+p.object_table+' WHERE '+GetWhereClauseForImages([imKey])+' ORDER BY '+p.object_id
return self.execute(select)
def GetObjectNear(self, imkey, x, y, silent=False):
''' Returns obKey of the closest object to x, y in an image. '''
delta_x = '(%s - %d)'%(p.cell_x_loc, x)
delta_y = '(%s - %d)'%(p.cell_y_loc, y)
dist_clause = '%s*%s + %s*%s'%(delta_x, delta_x, delta_y, delta_y)
select = 'SELECT '+UniqueObjectClause()+' FROM '+p.object_table+' WHERE '+GetWhereClauseForImages([imkey])+' ORDER BY ' +dist_clause+' LIMIT 1'
res = self.execute(select, silent=silent)
if len(res) == 0:
return None
else:
return res[0]
def GetFullChannelPathsForImage(self, imKey):
'''
Returns a list of image channel filenames for a particular image
including the absolute path.
'''
assert len(p.image_path_cols) == len(p.image_file_cols), "Number of image_path_cols and image_file_cols do not match!"
nChannels = len(p.image_path_cols)
select = 'SELECT '
for i in range(nChannels):
select += p.image_path_cols[i]+', '+p.image_file_cols[i]+', '
select = select[:-2] # chop off the last ', '
select += ' FROM '+p.image_table+' WHERE '+GetWhereClauseForImages([imKey])
imPaths = self.execute(select)[0]
# parse filenames out of results
filenames = []
for i in range(0,len(p.image_path_cols*2),2):
if p.image_url_prepend:
filenames.append( imPaths[i]+'/'+imPaths[i+1] )
else:
filenames.append( os.path.join(imPaths[i],imPaths[i+1]) )
return filenames
def GetGroupMaps(self, reverse=False):
'''Return a tuple of two dictionaries: one that maps group
names to group maps and one that maps group names to lists of
column names. If reverse is set to true, the group maps will
map group keys to image keys instead of vice-versa.'''
groupColNames = {}
groupMaps = {}
for group in p._groups:
groupMaps[group], groupColNames[group] = self.group_map(group, reverse=reverse)
return groupMaps, groupColNames
def group_map(self, group, reverse=False, filter=None):
"""
Return a tuple of (1) a dictionary mapping image keys to
group keys and (2) a list of column names for the group
keys.
If reverse is set to true, the dictionary will map
group keys to image keys instead.
"""
key_size = p.table_id and 2 or 1
query = p._groups[group]
from_idx = re.search('\sFROM\s', query.upper()).start()
try:
where_idx = re.search('\sWHERE\s', query.upper()).start()
except AttributeError:
where_idx = len(query)
if filter:
join_clause = ' JOIN (%s) as f USING (%s)' % (self.filter_sql(filter),
','.join(image_key_columns()))
query = query[:where_idx] + join_clause + query[where_idx:]
try:
res = self.execute(query)
except DBException as e:
raise DBException('Group query failed for group "%s". Check the SQL'
' syntax in your properties file.\n'
'Error was: "%s"'%(group, e))
col_names = self.GetResultColumnNames()[key_size:]
from_clause = query[from_idx+6 : where_idx].strip()
if ',' not in from_clause and ' ' not in from_clause:
col_names = ['%s.%s'%(from_clause, col) for col in col_names]
else:
for table in from_clause.split(','):
if re.search('\sAS\s', table.upper()) or ' ' in table.strip():
raise Exception('Unable to parse group query for group named "%s". '
'This could be because you are using table aliases '
'in your FROM clause. Please try rewriting your '
'query without aliases and try again.'%(group))
col_names = [col.strip() for col in query[7 : from_idx].split(',')][len(image_key_columns()):]
d = {}
for row in res:
if reverse:
d[row[key_size:]] = []
for row in res:
if reverse:
d[row[key_size:]] += [row[:key_size]]
else:
d[row[:key_size]] = row[key_size:]
return d, col_names
def filter_sql(self, filter_name):
f = p._filters[filter_name]
from . import sqltools
if isinstance(f, sqltools.Filter):
unique_tables = np.unique(f.get_tables())
if len(unique_tables) > 1:
if p.image_table in unique_tables:
select_name = UniqueImageClause(p.image_table)
else:
select_name = UniqueImageClause(unique_tables[0])
logging.warn("Mixing multiple object tables in a filter is experimental, use with caution")
else:
select_name = UniqueImageClause()
return 'SELECT %s FROM %s WHERE %s' % (select_name,
','.join(unique_tables),
str(f))
elif isinstance(f, sqltools.OldFilter):
return str(f)
else:
raise Exception('Invalid filter type in p._filters')
def GetFilteredImages(self, filter_name):
''' Returns a list of imKeys from the given filter. '''
try:
f = p._filters[filter_name]
# New filters can be based on object parameters. We need to remove duplicates.
# Using dict instead of set preserves key order without additional time cost.
imKeys = self.execute(self.filter_sql(filter_name))
return list(dict.fromkeys(imKeys))
except Exception as e:
logging.error('Filter query failed for filter "%s". Check the SQL syntax in your properties file.'%(filter_name))
logging.error(e)
raise Exception('Filter query failed for filter "%s". Check the SQL syntax in your properties file.'%(filter_name))
def GetFilteredObjects(self, filter_obj, N=None, random=True):
# Get filtered object keys using a passed in filter sql object
from . import sqltools
q = sqltools.QueryBuilder()
q.select(sqltools.object_cols())
q.where([filter_obj])
q.group_by(sqltools.object_cols())
if random:
if p.db_type.lower() == 'sqlite':
query = f"{str(q)} ORDER BY RANDOM()"
else:
query = f"{str(q)} ORDER BY RAND()"
else:
query = f"{str(q)} ORDER BY {p.object_table}.{p.image_id}"
if N is not None:
query += f" LIMIT {N}"
keys = self.execute(query)
return keys
def GetGatedImages(self, gate_name):
''' Returns a list of imKeys from the given filter. '''
try:
g = p.gates[gate_name]
from . import sqltools
if isinstance(g, sqltools.Gate):
unique_tables = np.unique(g.get_tables())
if len(unique_tables) > 1:
if p.image_table in unique_tables:
select_name = UniqueImageClause(p.image_table)
else:
select_name = UniqueImageClause(unique_tables[0])
logging.warn("Mixing multiple object tables in a filter is experimental, use with caution")
else:
select_name = UniqueImageClause()
imKeys = self.execute(f"SELECT {select_name} FROM {','.join(unique_tables)} WHERE {str(g)}")
return list(dict.fromkeys(imKeys))
else:
raise Exception('Invalid gate type in p.gate')
except Exception as e:
logging.error('Filter query failed for filter "%s". Check the MySQL syntax in your properties file.'%(gate_name))
logging.error(e)
raise Exception('Filter query failed for filter "%s". Check the MySQL syntax in your properties file.'%(gate_name))
def GetGatedObjects(self, gate_name, N=None, random=True):
from . import sqltools
q = sqltools.QueryBuilder()
q.select(sqltools.object_cols())
q.where([p.gates[gate_name]])
q.group_by(sqltools.object_cols())
if random:
if p.db_type.lower() == 'sqlite':
query = f"{str(q)} ORDER BY RANDOM()"
else:
query = f"{str(q)} ORDER BY RAND()"
else:
query = f"{str(q)} ORDER BY {p.object_table}.{p.image_id}"
if N is not None:
query += f" LIMIT {N}"
keys = self.execute(query)
return keys
def GetTableNames(self):
'''
returns all table names in the database
'''
if p.db_type.lower()=='mysql':
res = self.execute('SHOW TABLES')
return [t[0] for t in res]
elif p.db_type.lower()=='sqlite':
res = self.execute('SELECT name FROM sqlite_master WHERE type="table" ORDER BY name')
return [t[0] for t in res]
def get_other_table_names(self):
'''
returns a list of table names in the database that CPA hasn't accessed.
'''
tables = list(set(self.GetTableNames()) -
set([p.image_table, p.object_table]))
return sorted(tables)
def GetColumnNames(self, table):
'''Returns a list of the column names for the specified table. '''
# NOTE: SQLite doesn't like DESCRIBE or SHOW statements so we do it this way.
self.execute('SELECT * FROM %s LIMIT 1'%(table))
return self.GetResultColumnNames() # return the column names
#
# Methods used for linking database tables
#
#
# link_tables_table
# +-------------------------------------+
# | src | dest | link | ord |
# +-------------------------------------+
# | obj | treat | img | 0 |
# | obj | treat | well | 1 |
# | obj | treat | treat | 2 |
#
# link_columns_table
# +-----------------------------------+
# | table1 | table2 | col1 | col2 |
# +-----------------------------------+
# | per_im | per_well | plate | plate |
# | per_im | per_well | well | well |
#
def _add_link_tables_row(self, src, dest, link, order):
'''adds src, dest, link, order to link_tables_table.
'''
self.execute('INSERT INTO %s (src, dest, link, ord) '
'VALUES ("%s", "%s", "%s", "%d")'
%(p.link_tables_table, src, dest, link, order))
def _add_link_columns_row(self, src, dest, col1, col2):
'''adds src, dest, col1, col2 to link_columns_table
'''
self.execute('INSERT INTO %s (table1, table2, col1, '
'col2) VALUES ("%s", "%s", "%s", "%s")'
%(p.link_columns_table, src, dest, col1, col2))
def connected_tables(self, table):
'''return tables connected (directly or indirectly) to the given table
'''
return [r[0] for r in self.execute('SELECT DISTINCT dest FROM %s '
'WHERE src="%s"'
%(p.link_tables_table, table))]
def adjacent_tables(self, table):
'''return tables directly connected to the given table
'''
return [r[0] for r in self.execute('SELECT DISTINCT link FROM %s '
'WHERE src="%s" AND ord=0'
%(p.link_tables_table, table))]
def adjacent(self, table1, table2):
'''return whether the given tables are adjacent
'''
return table1 in self.adjacent_tables(table2)
def do_link_tables(self, src, dest, src_cols, dest_cols):
'''Inserts table linking information into the database so src can
be linked to dest through the columns specified.
src - table to be linked in
dest - table to link src to
src_cols - foreign key column names in src
dest_cols - foreign key column names in dest
'''
assert len(src_cols) == len(dest_cols), 'Column lists were not the same length.'
# create the tables if they don't exist
if p.link_tables_table.lower() not in [x.lower() for x in self.GetTableNames()]:
self.execute('CREATE TABLE %s (src VARCHAR(100), '
'dest VARCHAR(100), link VARCHAR(100), ord INTEGER)'
%(p.link_tables_table))
if p.link_columns_table.lower() not in [x.lower() for x in self.GetTableNames()]:
self.execute('CREATE TABLE %s (table1 VARCHAR(100), '
'table2 VARCHAR(100), col1 VARCHAR(200), col2 VARCHAR(200))'
%(p.link_columns_table))
if self.get_linking_tables(src, dest) is not None:
raise Exception('Tables are already linked. Call '
'DBConnect.get_linking_tables to check if tables are linked '
'before do_link_tables.')
# Connect src directly to dest
self._add_link_tables_row(src, dest, dest, 0)
for col1, col2 in zip(src_cols, dest_cols):
self._add_link_columns_row(src, dest, col1, col2)
# Connect src to everything dest is connected to through dest
for t in self.connected_tables(dest):
self._add_link_tables_row(src, t, dest, 0)
res = self.execute('SELECT * FROM %s WHERE src="%s" AND dest="%s"'
%(p.link_tables_table, dest, t))
for row in res:
link = row[2]
order = int(row[3]) + 1
self._add_link_tables_row(src, t, link, order)
# Connect dest back to src
self._add_link_tables_row(dest, src, src, 0)
for col1, col2 in zip(src_cols, dest_cols):
self._add_link_columns_row(dest, src, col2, col1)
self.Commit()
#
# TODO: ensure this table wasn't linking others together.
#
def do_unlink_table(self, table):
'''remove all linkage entries pertaining to the given table
'''
self.execute('DELETE FROM %s WHERE src=%s OR dest=%s OR link=%s'
%(p.link_tables_table, table, table, table))
self.execute('DELETE FROM %s WHERE table1=%s OR table2=%s'
%(p.link_columns_table, table, table))
self.Commit()
def get_linking_expressions(self, tables):
'''returns: A list of Expressions linking the tables given. These
expressions may link through some intermediate table if a path exists.
Use when constructing a where clause for a multi-table query.
An exception is raised if a path linking the tables doesn't exist. Call
DBConnect.get_linking_tables first to check that all tables are linked.
usage:
get_linking_expressions(['per_well', 'per_image', 'per_object'])
[Expression(('per_well', 'Plate'), '=', ('per_image', 'Plate')),
Expression(('per_well', 'Well'), '=', ('per_image', 'Well')),
Expression(('per_image', 'ImageNumber'), '=', ('per_object', 'ImageNumber'))]
'''
from . import sqltools as sql
for t in tables[1:]:
if self.get_linking_table_pairs(tables[0], t) is None:
raise Exception('Tables "%s" and "%s" are not linked.'%(tables[0], t))
def get_linking_clauses(table1, table2):
#helper function returns expressions that link 2 tables
return [sql.Expression(sql.Column(ta, cola), '=', sql.Column(tb, colb))
for ta, tb in self.get_linking_table_pairs(table1, table2)
for cola, colb in self.get_linking_columns(ta, tb)]
expressions = set()
for table in tables[1:]:
expressions.update(get_linking_clauses(tables[0], table))
return expressions
def get_linking_tables(self, table_from, table_to):
'''returns: an ordered list of tables that must be used to join
table_from to table_to. If the tables aren't linked in link_tables_table
then None is returned.
usage:
get_linking_tables(per_well, per_object)
[per_image, per_object]
'''
if p.link_tables_table not in self.GetTableNames():
return None
res = self.execute('SELECT link FROM %s '
'WHERE src="%s" AND dest="%s" ORDER BY ord'
%(p.link_tables_table, table_from, table_to))
return [row[0] for row in res] or None
def get_linking_table_pairs(self, table_from, table_to):
'''returns: an ordered list of table pairs that must be used to join
table_from to table_to. If the tables aren't linked in link_tables_table
then None is returned.
usage:
get_linking_table_pairs(per_well, per_object)
[(per_well, per_image), (per_image, per_object)]
'''
ltables = self.get_linking_tables(table_from, table_to)
if ltables is None:
return None
from_tables = [table_from] + [t for t in ltables[:-1]]
to_tables = ltables
return [(tfrom, tto) for tfrom, tto in zip(from_tables, to_tables)]
def get_linking_columns(self, table_from, table_to):
'''returns: a list of column pairs that can be used to join table_from
to table_to. An exception is raised if table_from is not
DIRECTLY linked to table_to in link_tables_table or if the
link_columns_table is not found.
usage: >>> get_linking_columns(per_well, per_image)
[(plateid, plate), (wellid, well)]
'''
if p.link_columns_table not in self.GetTableNames():
raise Exception('Could not find link_columns table "%s".'%(p.link_columns_table))
col_pairs = self.execute('SELECT col1, col2 FROM %s WHERE table1="%s" '
'AND table2="%s"'%(p.link_columns_table, table_from, table_to))
if len(col_pairs[0]) == 0:
raise Exception('Tables "%s" and "%s" are not directly linked in '
'the database'%(table_from, table_to))
return col_pairs
def get_linkable_tables(self):
'''returns the list of tables that CPA can link together.
'''
tables = []
if p.link_tables_table in self.GetTableNames():
tables = [row[0] for row in
self.execute('SELECT DISTINCT src FROM %s'
%(p.link_tables_table))]
if len(tables) == 0:
if p.object_table:
self.do_link_tables(p.image_table, p.object_table,
image_key_columns(), image_key_columns())
return [p.image_table, p.object_table]
else:
return [p.image_table]
return tables
def GetUserColumnNames(self, table):
'''Returns a list of the column names that start with "User_" for the
specified table. '''
return [col for col in self.GetColumnNames(table) if col.lower().startswith('user')]
def GetColumnTypes(self, table):
'''Returns python types for each column of the given table. '''
sqltypes = self.GetColumnTypeStrings(table)
return [sqltype_to_pythontype(t) for t in sqltypes]
def GetColumnType(self, table, colname):
'''Returns the python type for a given table column. '''
for col, coltype in zip(self.GetColumnNames(table), self.GetColumnTypes(table)):
if col == colname:
return coltype
def GetColumnTypeStrings(self, table):
'''Returns the SQL type string for each column of the given table.'''
if p.db_type.lower() == 'sqlite':
res = self.execute('PRAGMA table_info(%s)'%(table))
return [r[2] for r in res]
elif p.db_type.lower() == 'mysql':
res = self.execute('SHOW COLUMNS FROM %s'%(table))
return [r[1] for r in res]
def GetColumnTypeString(self, table, colname):
'''Returns the SQL type string for a given table column. '''
for col, coltype in zip(self.GetColumnNames(table), self.GetColumnTypeStrings(table)):
if col == colname:
return coltype
def GetColnamesForClassifier(self, exclude_features_with_no_variance=False,
force=False):
'''
Returns a list of column names for the object_table excluding
those specified in Properties.classifier_ignore_columns
and excluding those with zero variance (unless
exclude_features_with_no_variance is set to False)
'''
if (self.classifierColNames is None) or force:
col_names = self.GetColumnNames(p.object_table)
col_types = self.GetColumnTypes(p.object_table)
# automatically ignore all string-type columns
# ColumnTypes will return None for unknown formats, rather than a proper type.
self.classifierColNames = [col for col, coltype in zip(col_names, col_types) if coltype not in (str, None)]
# automatically ignore ID columns
if p.table_id in self.classifierColNames:
self.classifierColNames.remove(p.table_id)
self.classifierColNames.remove(p.image_id)
self.classifierColNames.remove(p.object_id)
if len(self.classifierColNames) == 0:
import wx
wx.MessageBox('No columns were found to use for classification '
'Please check your per-object table, it may be '
'empty or not contain any numeric columns.', 'Error')
self.classifierColNames = None
return None
# treat each classifier_ignore_substring as a regular expression
# for column names to ignore
if p.classifier_ignore_columns:
self.classifierColNames = [col for col in self.classifierColNames
if not any([re.match('^'+user_exp+'$',col)
for user_exp in p.classifier_ignore_columns])]
logging.info('Ignoring columns: %s'%([x for x in col_names if x not in self.classifierColNames]))
if exclude_features_with_no_variance:
# ignore columns which have no variance
cq = ', '.join(['MAX(%s)-MIN(%s)'%(col,col) for col in col_names])
res = np.array(self.execute('SELECT %s FROM %s'%(cq, p.object_table))[0])
ignore_cols = np.array(col_names)[np.where(res==0)[0]]
for colname in ignore_cols:
self.classifierColNames.remove(colname)
logging.warning('Ignoring column "%s" because it has zero variance'%(colname))
if len(self.classifierColNames) == 0 and p.classifier_ignore_columns:
import wx
wx.MessageBox('No columns were found to use for classification '
'after filtering columns that matched your '
'classifier_ignore_columns properties setting. '
'Please check your properties and your per-object'
' table.', 'Error')
self.classifierColNames = None
return None
return self.classifierColNames
def GetResultColumnNames(self):
''' Returns the column names of the last query on this connection. '''
connID = threading.currentThread().getName()
return [x[0] for x in self.cursors[connID].description]
def GetCellDataForRedux(self):
'''
Returns a list of measurements for the specified object excluding
those specified in Properties.classifier_ignore_columns
'''
if (self.classifierColNames == None):
self.GetColnamesForClassifier()
query = 'SELECT %s FROM %s' %(', '.join([p.image_id, p.object_id] + self.classifierColNames), p.object_table)
data = self.execute(query, silent=False)
if len(data) == 0:
logging.error('No data in table')
return None
# This should be the case
valid_types = (int, float, type(None))
if not all([type(x) in valid_types for x in data[0]]):
raise ValueError("Invalid column types were found in the data. "
"Only numerical columns can be present in the object table")
return np.array(data, dtype=np.float)
def GetCellData(self, obKey):
'''
Returns a list of measurements for the specified object.
'''
query = 'SELECT * FROM %s WHERE %s' %(p.object_table, GetWhereClauseForObjects([obKey]))
data = self.execute(query, silent=True)
if len(data) == 0:
logging.error('No data for obKey: %s'%str(obKey))
return None
# fetch out only numeric data
values = [x if type(x) in (int, float) else 0.0 for x in data[0]]
return np.array(values)
def GetCellsData(self, obKeys):
'''
Returns a list of measurements for multiple objects.
'''
if p.db_type.lower() == 'mysql':
query = f'SELECT {p.image_id}, {p.object_id}, {p.object_table}.* FROM {p.object_table} WHERE {GetWhereClauseForObjects(obKeys)}'
else:
query = f'SELECT {p.image_id}, {p.object_id}, * FROM {p.object_table} WHERE {GetWhereClauseForObjects(obKeys)}'
data = self.execute(query, silent=True)
if len(data) == 0:
logging.error('No data for obKeys: %s'%str(obKeys))
return None
# fetch out only numeric data
buffer = []
for line in data:
buffer.append(((line[0], line[1]), np.array([x if type(x) in (int, float) else 0.0 for x in line[2:]])))
return buffer
def GetPlateNames(self):
'''
Returns the names of each plate in the per-image table.
'''
res = self.execute('SELECT DISTINCT %s FROM %s ORDER BY %s'%(p.plate_id, p.image_table, p.plate_id))
return [str(l[0]) for l in res]
def GetPlatesAndWellsPerImage(self):
'''
Returns rows containing image key, plate, and well
'''
if p.plate_id and p.well_id:
return self.execute('SELECT %s, %s FROM %s'%(UniqueImageClause(), ','.join(well_key_columns()), p.image_table))
else:
logging.error('Both plate_id and well_id must be defined in properties!')
def get_platewell_for_object(self, key):
if p.plate_id and p.well_id:
return self.execute('SELECT %s FROM %s WHERE %s'%(','.join(well_key_columns()), p.image_table, GetWhereClauseForImages([key[:-1]])))[0]
else:
return key[:-1]
def InferColTypesFromData(self, tabledata, nCols):
'''
For converting csv data to DB data.
Returns a list of column types (INT, FLOAT, or VARCHAR(#)) that each column can safely be converted to
tabledata: 2d iterable of strings
nCols: # of columns
'''
colTypes = ['' for i in range(nCols)]
# Maximum string length for each column (if VARCHAR)
maxLen = [0 for i in range(nCols)]
try:
tabledata[0][0]
except:
raise Exception('Cannot infer column types from an empty table.')
for row in tabledata:
for i, e in enumerate(row):
if colTypes[i]!='FLOAT' and not colTypes[i].startswith('VARCHAR'):
try:
x = int(str(e))
colTypes[i] = 'INT'
continue
except ValueError: pass
if not colTypes[i].startswith('VARCHAR'):
try:
x = float(str(e))
colTypes[i] = 'FLOAT'
continue
except ValueError: pass
try:
x = str(e)
maxLen[i] = max(len(x), maxLen[i])
colTypes[i] = 'VARCHAR(%d)'%(maxLen[i])
except ValueError:
raise Exception('Value in table could not be converted to string!')
return colTypes
def AppendColumn(self, table, colname, coltype):
'''
Appends a new column to the specified table.
The column name must begin with "User_" and contain only A-Za-z0-9_
'''
_check_colname_user(p, table, colname)
if not re.match('^[A-Za-z]\w*$', colname):
raise ValueError('Column name may contain only alphanumeric characters and underscore, and must begin with a letter.')
self.execute('ALTER TABLE %s ADD %s %s'%(table, colname, coltype))
def UpdateWells(self, table, colname, value, wellkeys):
'''
Sets the value of the specified column in the database for each row
associated with wellkeys. Pass value=None to store NULL
'''
# TODO: handle other tables
assert table == p.image_table
_check_colname_user(p, table, colname)
if type(value) == str:
if re.search(r'["\'`]', value):
raise ValueError('No quotes are allowed in values written to the database.')
value = '"'+value+'"'
if value is None:
value = 'NULL'
self.execute('UPDATE %s SET %s=%s WHERE %s'%(table, colname, value,
GetWhereClauseForWells(wellkeys)))
# for some reason non string columns need to be committed or they will not be saved
self.Commit()
def CreateSQLiteDB(self):
'''
Creates an SQLite database from files specified in properties
image_csv_file and object_csv_file.
'''
import csv
# CREATE THE IMAGE TABLE
# All the ugly code is to establish the type of each column in the table
# so we can form a proper CREATE TABLE statement.
f = open(p.image_csv_file, 'U')
r = csv.reader(f)
columnLabels = next(r)
columnLabels = [lbl.strip() for lbl in columnLabels]
dtable = get_data_table_from_csv_reader(r)
colTypes = self.InferColTypesFromData(dtable, len(columnLabels))
# Build the CREATE TABLE statement
statement = 'CREATE TABLE '+p.image_table+' ('
statement += ',\n'.join([lbl+' '+colTypes[i] for i, lbl in enumerate(columnLabels)])
keys = ','.join([x for x in [p.table_id, p.image_id, p.object_id] if x in columnLabels])
statement += ',\nPRIMARY KEY (' + keys + ') )'
f.close()
logging.info('Creating table: %s'%(p.image_table))
self.execute('DROP TABLE IF EXISTS %s'%(p.image_table))
self.execute(statement)
if not p.classification_type == 'image':
# CREATE THE OBJECT TABLE
# For the object table we assume that all values are type FLOAT
# except for the primary keys
f = open(p.object_csv_file, 'U')
r = csv.reader(f)
columnLabels = next(r)
columnLabels = [lbl.strip() for lbl in columnLabels]
dtable = get_data_table_from_csv_reader(r)
colTypes = self.InferColTypesFromData(dtable, len(columnLabels))
statement = 'CREATE TABLE '+p.object_table+' ('
statement += ',\n'.join([lbl+' '+colTypes[i] for i, lbl in enumerate(columnLabels)])
keys = ','.join([x for x in [p.table_id, p.image_id, p.object_id] if x in columnLabels])
statement += ',\nPRIMARY KEY (' + keys + ') )'
f.close()
logging.info('Creating table: %s'%(p.object_table))
self.execute('DROP TABLE IF EXISTS '+p.object_table)
self.execute(statement)
# POPULATE THE IMAGE TABLE
f = open(p.image_csv_file, 'U')
r = csv.reader(f)
row = next(r) # skip the headers
row = next(r)
while row:
self.execute('INSERT INTO '+p.image_table+' VALUES ('+','.join(["'%s'"%(i) for i in row])+')',
silent=True)
try:
row = next(r)
except StopIteration:
break
f.close()
# POPULATE THE OBJECT TABLE
if not p.classification_type == 'image':
f = open(p.object_csv_file, 'U')
r = csv.reader(f)
row = next(r) # skip the headers
row = next(r)
while row:
self.execute('INSERT INTO '+p.object_table+' VALUES ('+','.join(["'%s'"%(i) for i in row])+')',
silent=True)
try:
row = next(r)
except StopIteration: break
f.close()
self.Commit()
def CreateSQLiteDBFromCSVs(self):
'''
Creates an SQLite database from files generated by CellProfiler's
ExportToDatabase module.
'''
import csv
imcsvs, obcsvs = get_csv_filenames_from_sql_file()
# Verify that the CSVs exist
csv_dir = os.path.split(p.db_sql_file)[0] or '.'
dir_files = os.listdir(csv_dir)
for file in imcsvs + obcsvs:
assert file in dir_files, ('File "%s" was specified in %s but was '
'not found in %s.'%(file, os.path.split(p.db_sql_file)[1], csv_dir))
assert len(imcsvs)>0, ('Failed to parse image csv filenames from %s. '
'Make sure db_sql_file in your properties file is'
' set to the .SQL file output by CellProfiler\'s '
'ExportToDatabase module.'%(os.path.split(p.db_sql_file)[1]))
# parse out create table statements and execute them
f = open(p.db_sql_file)
lines = f.readlines()
create_stmts = []
i=0
in_create_stmt = False
for l in lines:
if l.upper().startswith('CREATE TABLE') or in_create_stmt:
if in_create_stmt:
create_stmts[i] += l
else:
create_stmts.append(l)
if l.strip().endswith(';'):
in_create_stmt = False
i+=1
else:
in_create_stmt = True
f.close()
for q in create_stmts:
self.execute(q)
import wx
if self.gui_parent is not None and issubclass(self.gui_parent.__class__, wx.Window):
dlg = wx.ProgressDialog('Creating sqlite DB...', '0% Complete', 100, self.gui_parent, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
else:
dlg = None
# find the number of bytes we're going to read
total_bytes = 0
for file in imcsvs + obcsvs:
total_bytes += os.path.getsize(os.path.join(csv_dir, file))
total_bytes = float(total_bytes)
base_bytes = 0
connID = threading.currentThread().getName()
# populate tables with contents of csv files
for file in imcsvs:
logging.info('Populating image table with data from %s'%file)
f = open(os.path.join(csv_dir, file), 'U')
r = csv.reader(f)
row1 = next(r)
command = 'INSERT INTO '+p.image_table+' VALUES ('+','.join(['?' for i in row1])+')'
self.cursors[connID].execute(command, row1)
self.cursors[connID].executemany(command, [l for l in r if len(l)>0])
f.close()
base_bytes += os.path.getsize(os.path.join(csv_dir, file))
pct = min(int(100 * base_bytes / total_bytes), 100)
if dlg:
c, s = dlg.Update(pct, '%d%% Complete'%(pct))
if not c:
try:
os.remove(p.db_sqlite_file)
except OSError:
wx.MessageBox('Could not remove incomplete database'
' at "%s". This file must be removed '
'manually or CPAnalyst will load it '
'the next time use use the current '
'database settings.', 'Error')
raise Exception('cancelled load')
logging.info("... loaded %d%% of CSV data"%(pct))
line_count = 0
if not p.classification_type == 'image':
assert len(obcsvs)>0, ('Failed to parse object csv filenames from %s. '
'Make sure db_sql_file in your properties file is'
' set to the .SQL file output by CellProfiler\'s '
'ExportToDatabase module.'%(os.path.split(p.db_sql_file)[1]))
for file in obcsvs:
logging.info('Populating object table with data from %s'%file)
f = open(csv_dir+os.path.sep+file, 'U')
r = csv.reader(f)
row1 = next(r)
if p.check_tables:
object_table = p.object_table
object_table = object_table.split('_checked')[0]
command = 'INSERT INTO '+object_table+' VALUES ('+','.join(['?' for i in row1])+')'
else:
command = 'INSERT INTO '+p.object_table+' VALUES ('+','.join(['?' for i in row1])+')'
# guess at a good number of lines, about 250 megabytes, assuming floats)
nlines = (250*1024*1024) // (len(row1) * 64)
self.cursors[connID].execute(command, row1)
lnum = 1
while True:
lnum += 1
# fetch a certain number of lines efficiently
args = [l for idx, l in zip(list(range(nlines)), r) if len(l) > 0]
if args == []:
break
self.cursors[connID].executemany(command, args)
line_count += len(args)
prog = line_count
# pct = min(int(100 * (f.tell() + base_bytes) / total_bytes), 100)
if dlg:
c, s = dlg.Update(prog, '%d lines loaded'%(prog))
if not c:
try:
os.remove(p.db_sqlite_file)
except OSError:
wx.MessageBox('Could not remove incomplete database'
' at "%s". This file must be removed '
'manually or CPAnalyst will load it '
'the next time use use the current '
'database settings.', 'Error')
raise Exception('cancelled load')
logging.info("... loaded %d lines of CSV data"%(prog))
f.close()
logging.info("Finished loading CSV data")
base_bytes += os.path.getsize(os.path.join(csv_dir, file))
# Commit only at very end. No use in committing if the db is incomplete.
self.Commit()
if dlg:
dlg.Destroy()
def GetImageWidthHeight(self,list_of_cols):
# Get image width and height
try:
width_col = next(name for name in list_of_cols if 'width' in name.lower())
height_col = next(name for name in list_of_cols if 'height' in name.lower())
width_query = 'SELECT %s FROM %s LIMIT 1'%(width_col, p.image_table)
height_query = 'SELECT %s FROM %s LIMIT 1'%(height_col, p.image_table)
width = self.execute(width_query)
height = self.execute(height_query)
width = int(width[0][0])
height = int(height[0][0])
except:
if p.image_width and p.image_height:
width = int(p.image_width)
height = int(p.image_height)
else:
raise Exception('Input image_width and image_height fields in properties file')
return width, height
def CreateObjectCheckedTable(self):
# Create object (checked) table where there are no rows with missing/null values
DB_NAME = p.db_name
DB_TYPE = p.db_type.lower()
object_table = p.object_table
object_table = object_table.split('_checked')[0]
# Try to get a quick count of how many table rows we started with.
try:
query = f"SELECT COUNT(*) FROM {object_table}"
initial_count = self.execute(query)[0][0]
except:
logging.error("Unable to count table rows")
initial_count = 0
all_cols = [str(x) for x in self.GetColumnNames(object_table)]
# We don't want to obliterate the table if there's an entirely empty column. Let's exclude those.
# Checking entire columns is expensive, let's restrict our search.
# First we sample 1 row and identify columns with missing values.
query = f"SELECT * FROM {object_table} LIMIT 1"
res = self.execute(query)[0]
maybe_empty_cols = [col for col, val in zip(all_cols, res) if val is None]
if len(maybe_empty_cols) > 0:
# Now let's check whether those missing columns are entirely empty (count will be 0).
query = f"SELECT {', '.join([f'count({col})' for col in maybe_empty_cols])} from {object_table}"
res = self.execute(query)[0]
empty_cols = set([col for col, count in zip(maybe_empty_cols, res) if count == 0])
# Now we rebuild our table column list without the empty columns
all_cols = [col for col in all_cols if col not in empty_cols]
logging.info(f"Table checking dropped {len(empty_cols)} blank columns")
AreaShape_Area = [x for x in all_cols if 'AreaShape_Area' in x]
if DB_TYPE == 'mysql':
if len(AreaShape_Area) > 0:
query = f"""CREATE OR REPLACE VIEW {p.object_table} AS SELECT {', '.join(all_cols)} FROM {object_table}
WHERE {" IS NOT NULL AND ".join(all_cols)} IS NOT NULL AND {" > 0 AND ".join(AreaShape_Area)} > 0"""
else:
query = f"""CREATE OR REPLACE VIEW {p.object_table} AS SELECT {', '.join(all_cols)} FROM {object_table}
WHERE {" IS NOT NULL AND ".join(all_cols)} IS NOT NULL"""
self.execute(query)
elif DB_TYPE == 'sqlite':
# SQL can only handle 1000 comparisons in a query. If we have too many columns we'll need to break it up.
col_buffer = [all_cols[i:i + 400] for i in range(0, len(all_cols), 400)]
# Do the largest chunk first, it'll reduce work later on.
to_test = col_buffer.pop(0)
query = f"PRAGMA table_info({object_table})"
self.execute(query)
query = f'DROP TABLE IF EXISTS {p.object_table}'
self.execute(query)
if len(AreaShape_Area) > 0:
query = f"""CREATE TABLE {p.object_table} AS SELECT {', '.join(all_cols)} FROM {object_table} WHERE {
" IS NOT NULL AND ".join(to_test)} IS NOT NULL AND {" != '' AND ".join(to_test)} != '' AND {
" > 0 AND ".join(AreaShape_Area)} > 0"""
else:
query = f"""CREATE TABLE {p.object_table} AS SELECT {', '.join(all_cols)} FROM {object_table} WHERE {
" IS NOT NULL AND ".join(to_test)} IS NOT NULL AND {" != '' AND ".join(to_test)} != ''"""
self.execute(query)
for chunk in col_buffer:
query = f"""DELETE FROM {p.object_table} WHERE {
" IS NOT NULL AND ".join(chunk)} IS NOT NULL AND {" != '' AND ".join(chunk)} != ''"""
self.execute(query)
self.Commit()
# Inform user of what we did. Also check whether we nuked the table.
try:
query = f"SELECT COUNT(*) FROM {p.object_table}"
res = self.execute(query)[0][0]
if res == 0:
logging.error("Table checking removed all rows, you may have an empty column in your database. "
"Disable check_tables in your properties file if this is expected.")
else:
logging.info(f"Table checking removed {initial_count - res} rows with missing values")
if res < initial_count // 2:
import wx
dlg = wx.MessageDialog(None, 'The check_tables option was enabled in your propreties file, but this '
f'would exclude {initial_count - res} of {initial_count} rows from your '
'data set (which had values missing). If using a combined object '
'table you might want to disable table checking. Should the rows be '
'excluded for this session?',
'Table checking', wx.YES_NO | wx.ICON_WARNING)
result = dlg.ShowModal()
if result == wx.ID_NO:
# Use the non-checked table instead
logging.info(f"Discarded table checking results, using original object table")
p.object_table = object_table
except:
logging.error("Unable to validate checked object table")
try:
if not self.get_linking_tables(p.image_table, p.object_table):
# Link image table to the checked object table.
self.do_link_tables(p.image_table, p.object_table, image_key_columns(), image_key_columns())
except:
logging.error("Unable to link checked tables upon creation. Some SQL filters may not work correctly.")
def CreateObjectImageTable(self):
# Create object table for image classification
DB_NAME = p.db_name
DB_TYPE = p.db_type.lower()
if DB_TYPE == 'mysql':
query = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s'"%(DB_NAME, p.image_table)
self.execute(query)
list_of_cols = []
cols = [x for x in self.GetColumnNames(p.image_table)]
list_of_cols.extend([str(x) for x in cols])
width, height = self.GetImageWidthHeight(list_of_cols)
query = "CREATE OR REPLACE VIEW %s AS SELECT 1 AS %s, %d AS %s, %d AS %s, %s.* FROM %s"%(p.object_table, p.object_id, width/2, p.cell_x_loc, height/2, p.cell_y_loc, p.image_table,p.image_table)
self.execute(query)
elif DB_TYPE == 'sqlite':
# Copy image table and add more columns
query = "PRAGMA table_info(%s)"%p.image_table
self.execute(query)
list_of_cols = [str(x) for x in self.GetColumnNames(p.image_table)]
list_of_colTypes = [str(x) for x in self.GetColumnTypeStrings(p.image_table)]
all_cols = list(list_of_cols)
all_colTypes = list(list_of_colTypes)
pid_index = all_cols.index(p.image_id)
all_cols.remove(p.image_id)
all_colTypes.remove(list_of_colTypes[pid_index])
all_cols = [p.image_id, p.object_id, p.cell_x_loc, p.cell_y_loc] + all_cols
list_of_colTypes = [list_of_colTypes[pid_index], list_of_colTypes[pid_index], 'float', 'float'] + all_colTypes
object_table = p.object_table
if object_table.endswith('_checked'):
object_table = object_table[:-8]
query = 'DROP TABLE IF EXISTS %s'%(object_table)
self.execute(query)
query = 'CREATE TABLE %s (%s)'%(object_table, ",".join([all_cols[i]+' '+list_of_colTypes[i] for i in range(len(all_cols))]))
self.execute(query)
query = 'INSERT INTO %s (%s) SELECT %s FROM %s'%(object_table, ",".join(list_of_cols), ",".join(list_of_cols), p.image_table)
self.execute(query)
#Get info on image width and height (assuming they are fields in the image table) to get image center
width, height = self.GetImageWidthHeight(list_of_cols)
query = "UPDATE %s SET %s=1, %s=%s, %s=%s"%(object_table, p.object_id,
p.cell_x_loc, width/2,
p.cell_y_loc, height/2)
self.execute(query)
self.Commit()
if self.get_linking_tables(object_table, p.image_table) is None:
# Link the temporary table if needed
self.do_link_tables(object_table, p.image_table, image_key_columns(), image_key_columns())
logging.info('%s table added to database'%object_table)
def table_exists(self, name):
res = []
if p.db_type.lower() == 'mysql':
res = self.execute("SELECT table_name FROM information_schema.tables WHERE table_name='%s' AND table_schema='%s'"%(name, p.db_name))
else:
res = self.execute("SELECT name FROM sqlite_master WHERE type='table' and name='%s'"%(name))
res += self.execute("SELECT name FROM sqlite_temp_master WHERE type='table' and name='%s'"%(name))
return len(res) > 0
def CreateTempTableFromCSV(self, filename, tablename):
'''
Reads a csv file into a temporary table in the database.
Column names are taken from the first row.
Column types are inferred from the data.
'''
import csv
if hasattr(filename, 'read'):
f = filename
else:
f = open(filename, 'U')
r = csv.reader(f)
self.execute('DROP TABLE IF EXISTS %s'%(tablename))
colnames = next(r)
dtable = np.array(get_data_table_from_csv_reader(r))
typed_table = []
for i in range(dtable.shape[1]):
try:
col = np.array(dtable[:,i], dtype=str)
col = np.array(dtable[:,i], dtype=float)
col = np.array(dtable[:,i], dtype=int)
except:
pass
typed_table += [col]
typed_table = np.array(typed_table, dtype=object).T
return self.CreateTempTableFromData(typed_table, colnames, tablename)
def create_empty_table(self, tablename, colnames, coltypes, temporary=False):
'''Creates an empty table with the given tablename and columns.
Note: column names will automatically be cleaned up.
'''
self.execute('DROP TABLE IF EXISTS %s'%(tablename))
# Clean up column names
colnames = clean_up_colnames(colnames)
coldefs = ', '.join(['`%s` %s'%(lbl, coltypes[i]) for i, lbl in enumerate(colnames)])
if not temporary:
self.execute('CREATE TABLE %s (%s)'%(tablename, coldefs))
else:
self.execute('CREATE TEMPORARY TABLE %s (%s)'%(tablename, coldefs))
def create_default_indexes_on_table(self, tablename):
'''automatically adds indexes to all the image, object, and well key
columns in the specified table
'''
for key in list(well_key_columns() or []) + list(object_key_columns()):
if key in self.GetColumnNames(tablename):
self.execute('CREATE INDEX %s ON %s (%s)'%('%s_%s'%(tablename,key), tablename, key))
def insert_rows_into_table(self, tablename, colnames, coltypes, rows):
'''Inserts the given rows into the table
'''
for row in rows:
vals = []
for i, val in enumerate(row):
if (coltypes[i]=='FLOAT' and (np.isinf(val) or np.isnan(val))
or val is None):
vals += ['NULL']
else:
vals += ['"%s"'%val]
vals = ', '.join(vals)
self.execute('INSERT INTO %s (%s) VALUES (%s)'%(
tablename, ', '.join(colnames), vals), silent=True)
def CreateTempTableFromData(self, dtable, colnames, tablename, temporary=True):
'''Creates and populates a temporary table in the database.
'''
return self.CreateTableFromData(dtable, colnames, tablename, temporary=temporary)
def CreateTableFromData(self, dtable, colnames, tablename, temporary=False, coltypes=None):
'''Creates and populates a table in the database.
dtable -- array of the data to populate the table with (SQL data types
are inferred from the array data)
colnames -- the column names to use (note: these will be cleaned up if
invalid characters are used)
tablename -- the name of the table
temporary -- whether the table should be created as temporary
'''
colnames = clean_up_colnames(colnames)
if coltypes is None:
coltypes = self.InferColTypesFromData(dtable, len(colnames))
self.create_empty_table(tablename, colnames, coltypes, temporary)
self.create_default_indexes_on_table(tablename)
logging.info('Populating %stable %s...'%((temporary and 'temporary ' or ''), tablename))
self.insert_rows_into_table(tablename, colnames, coltypes, dtable)
self.Commit()
return True
def is_view(self, table):
if p.db_type == 'sqlite':
return False
self.execute('SHOW CREATE TABLE %s'%(table))
res = self.GetResultColumnNames()
return res[0].lower() == 'view'
def CheckTables(self):
'''
Queries the DB to check that the per_image and per_object
tables agree on image numbers.
'''
if p.db_type=='sqlite':
logging.warn('Skipping table checking step for sqlite')
return
logging.info('Checking database tables...')
if not self.is_view(p.image_table):
# For now, don't check indices on views.
# Check for index on image_table
res = self.execute('SHOW INDEX FROM %s'%(p.image_table))
idx_cols = [r[4] for r in res]
for col in image_key_columns():
if col not in idx_cols:
import wx
wx.MessageDialog(self.gui_parent, 'Column "%s" is not indexed in table '
'"%s" Without column indices, dabase performance will be '
'severly slowed.\n'
'To avoid this warning, set check_tables = false in your '
'properties file.'%(col, p.object_table),
'Missing column index',
style=wx.OK|wx.ICON_EXCLAMATION).ShowModal()
else:
logging.warn('%s is a view. CheckTables will skip the index check on this table'%(p.image_table))
# Explicitly check for TableNumber in case it was not specified in props file
if not p.object_table and 'TableNumber' in self.GetColumnNames(p.image_table):
raise ValueError('Indexed column "TableNumber" was found in the database but not in your properties file.')
# STOP here if there is no object table
if not p.object_table:
return
if not self.is_view(p.object_table):
# Check for index on object_table
res = self.execute('SHOW INDEX FROM %s'%(p.object_table))
idx_cols = [r[4] for r in res]
for col in object_key_columns():
if col not in idx_cols:
import wx
wx.MessageDialog(self.gui_parent, 'Column "%s" is not indexed in table '
'"%s" Without column indices, dabase performance will be '
'severly slowed.\n'
'To avoid this warning, set check_tables = false in your '
'properties file.'%(col, p.object_table),
'Missing column index',
style=wx.OK|wx.ICON_EXCLAMATION).ShowModal()
else:
logging.warn('%s is a view. CheckTables will skip the index check on this table'%(p.object_table))
# Explicitly check for TableNumber in case it was not specified in props file
if ('TableNumber' not in object_key_columns()) and ('TableNumber' in self.GetColumnNames(p.object_table)):
raise ValueError('Indexed column "TableNumber" was found in the database but not in your properties file.')
elif ('TableNumber' in self.GetColumnNames(p.object_table)):
logging.warn('TableNumber column was found indexed in your image table but not your object table.')
elif ('TableNumber' not in object_key_columns()):
logging.warn('TableNumber column was found indexed in your object table but not your image table.')
# Removed because it doesn't work (ignores TableNumber), and is slow.
#
# # Check for orphaned objects
# obims = [(c[0]) for c in self.execute('SELECT %s, COUNT(*) FROM %s GROUP BY %s'%(p.image_id, p.object_table, p.image_id))]
# imims = self.execute('SELECT %s FROM %s'%(p.image_id, p.image_table))
# orphans = set(obims) - set(imims)
# assert not orphans, 'Objects were found in "%s" that had no corresponding image key in "%s"'%(p.object_table, p.image_table)
# Check for unlabeled wells
if p.well_id:
res = self.execute('SELECT %s FROM %s WHERE %s IS NULL OR %s=""'%(UniqueImageClause(), p.image_table, p.well_id, p.well_id))
if any(res):
logging.warn('WARNING: Images were found in "%s" that had a NULL or empty "%s" column value'%(p.image_table, p.well_id))
# Check for unlabeled plates
if p.plate_id:
res = self.execute('SELECT %s FROM %s WHERE %s IS NULL OR %s=""'%(UniqueImageClause(), p.image_table, p.plate_id, p.plate_id))
if any(res):
logging.warn('WARNING: Images were found in "%s" that had a NULL or empty "%s" column value'%(p.image_table, p.plate_id))
logging.info('Done checking database tables.')
def histogram(self, column, table_or_query, nbins, range=None):
"""
Compute a 1-D histogram entirely in the database.
column -- a single column name, as a string
table_or_query -- either a table name or a subquery, as a string
nbins -- the number of desired bins in the histogram
range -- the lower and upper range of the bins
Returns (hist, bin_edges), where hist is a numpy array of size
nbins and bin_edges is a numpy array of size nbins + 1.
"""
if ' ' in table_or_query:
table_clause = "(%s) as foo"%(table_or_query,)
else:
table_clause = table_or_query
if range is None:
data = self.execute("select min(%s), max(%s) from %s" %
(column, column, table_clause))
min = data[0][0]
max = data[0][1]
else:
min, max = range
clause = ("round(%d * (%s - (%f)) / (%f - (%f)))" %
(nbins, column, min, max, min))
h = np.zeros(nbins)
res = self.execute("select %s as bin, count(*) from %s "
"where %s <= %d "
"group by %s order by bin" % (clause, table_clause,
clause, nbins, clause))
for bin, count in res:
if bin == nbins:
bin -= 1
h[bin] = count
return h, np.linspace(min, max, nbins + 1)
def get_objects_modify_date(self):
if p.db_type.lower() == 'mysql':
return self.execute("select UPDATE_TIME from INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='%s' and TABLE_SCHEMA='%s'"%(p.object_table, p.db_name))[0][0]
else:
return os.path.getmtime(p.db_sqlite_file)
def verify_objects_modify_date_earlier(self, later):
cur = self.get_objects_modify_date()
return self.get_objects_modify_date() <= later
def register_gui_parent(self, parent):
self.gui_parent = parent
class Entity(object):
"""Abstract class containing code that is common to Images and
Objects. Do not instantiate directly."""
class dbiter(object):
def __init__(self, objects, db):
self.length = objects.count()
self.db = db
self.db.execute(objects.all_query(), return_result=False)
self.columns = self.db.GetResultColumnNames()
def __iter__(self):
return self
def __len__(self):
return self.length
def structured_array(self):
return self.db.get_results_as_structured_array()
def sample(self, n):
"""
Arguments:
n -- a non-negative integer or None
If n is None or n >= length, return all results.
"""
list = self.db._get_results_as_list()
n = min(n, len(self))
return random.sample(list, n)
def __next__(self):
try:
r = self.db.GetNextResult()
if r:
return r
else:
raise StopIteration
except GeneratorExit:
print("GeneratorExit")
connID = threading.currentThread().getName()
self.db.cursors[connID].fetchall()
def __init__(self):
self._where = []
self.filters = []
self._offset = None
self._limit = None
self._ordering = None
self._columns = None
self.group_columns = []
def offset(self, offset):
new = copy.deepcopy(self)
new._offset = (0 if new._offset is None else new._offset) + offset
return new
def limit(self, limit):
new = copy.deepcopy(self)
new._limit = limit
return new
def filter(self, name):
"""Add a filter (as defined in the properties file) by name."""
new = copy.deepcopy(self)
new.filters.append(name)
return new
def group_by(self, group_columns):
new = copy.deepcopy(self)
if type(group_columns) == str:
new.group_columns += [group_columns]
elif type(group_columns) in [list, tuple]:
new.group_columns += group_columns
else:
raise
return new
def where(self, predicate):
new = copy.deepcopy(self)
new._where.append(predicate)
return new
def _get_where_clause(self):
return "" if self._where == [] else "WHERE " + \
" AND ".join(self._where)
where_clause = property(_get_where_clause)
def _get_group_by_clause(self):
if self.group_columns == []:
return ''
else:
return "GROUP BY " + ",".join(self.group_columns)
group_by_clause = property(_get_group_by_clause)
def count(self):
c = DBConnect().execute(self.all_query(columns=["COUNT(*)"]))[0][0]
c = max(0, c - (self._offset or 0))
c = max(c, self._limit or 0)
return c
def all(self):
return self.dbiter(self, DBConnect())
def all_query(self, columns=None):
return "SELECT %s FROM %s %s %s %s %s" % (
",".join(columns or self.columns()),
self.from_clause,
self.where_clause,
self.group_by_clause,
self.ordering_clause,
self.offset_limit_clause)
def _get_ordering_clause(self):
if self._ordering is None:
return ""
else:
return "ORDER BY " + ", ".join(self._ordering)
ordering_clause = property(_get_ordering_clause)
def _get_offset_limit_clause(self):
return " ".join((self._limit and ["LIMIT %d" % self._limit] or []) +
(self._offset and ["OFFSET %d" % self._offset] or []))
offset_limit_clause = property(_get_offset_limit_clause)
def ordering(self, ordering):
new = copy.deepcopy(self)
new._ordering = ordering
return new
def project(self, columns):
new = copy.deepcopy(self)
new._columns = columns
return new
class Union(Entity):
def __init__(self, *args):
super(Union, self).__init__()
self.operands = args
def all_query(self, *args, **kwargs):
return " UNION ".join([e.all_query(*args, **kwargs)
for e in self.operands])
class Images(Entity):
'''
Easy access to images and their objects.
# Get all objects treated with 10 uM nocodazole
> cpa.dbconnect.Images().filter(compound_name).where("cast(Image_LoadedText_Platemap as decimal) = 10").objects()
'''
def __init__(self):
super(Images, self).__init__()
def _get_from_clause(self):
t = set([col[:col.index('.')] for col in self.columns() if '.' in col])
t = t - set(Properties().image_table)
from_clause = [Properties().image_table] + list(t)
for filter in self.filters:
from_clause.append("JOIN (%s) AS %s USING (%s)" %
(Properties()._filters[filter],
'filter_SQL_' + filter,
", ".join(image_key_columns())))
return " ".join(from_clause)
from_clause = property(_get_from_clause)
def objects(self):
if self._offset is not None or self._limit is not None:
raise ValueError("Cannot join with objects after applying "
"offset/limit.")
return Objects(images=self)
def columns(self):
return self._columns or DBConnect().GetColumnNames(Properties().image_table)
class Objects(Entity):
'''
Easy access to objects.
> feature = "Cells_NumberNeighbors_SecondClosestDistance"
> y = [row[0] for row in Objects().ordering([feature]).project([feature]).all()]
'''
def __init__(self, images=None):
super(Objects, self).__init__()
if images is None:
self._images = None
else:
self._images = images
self._where = images._where
self.filters = images.filters
def _get_from_clause(self):
from_clause = [Properties().object_table]
if self._images is not None:
from_clause.append("JOIN %s USING (%s)"%
(Properties().image_table,
", ".join(image_key_columns())))
for filter in self.filters:
from_clause.append("JOIN (%s) AS %s USING (%s)" %
(Properties()._filters[filter],
'filter_SQL_' + filter,
", ".join(image_key_columns())))
return " ".join(from_clause)
from_clause = property(_get_from_clause)
def columns(self):
return self._columns or list(object_key_columns()) + \
DBConnect().GetColnamesForClassifier()
def standard_deviations(self):
"""Returns a list of the standard deviations of the non-key columns.
Offsets and limits are ignored here, not sure if they should be."""
db = DBConnect()
return db.execute("SELECT %s FROM %s %s"%(
",".join(["STD(%s)" % c
for c in db.GetColnamesForClassifier()]),
self.from_clause, self.where_clause))[0]
def __add__(self, other):
return Union(self, other)
if __name__ == "__main__":
''' For debugging only... '''
import wx
app = wx.App()
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
p.show_load_dialog()
app.MainLoop()
| [
"csv.reader",
"numpy.sum",
"random.sample",
"MySQLdb.cursors.SSCursor",
"numpy.isnan",
"numpy.mean",
"threading.currentThread",
"logging.error",
"logging.warning",
"re.findall",
"numpy.linspace",
"re.search",
"copy.deepcopy",
"re.match",
"numpy.isinf",
"wx.App",
"sqlite3.connect",
... | [((8500, 8544), 're.findall', 're.findall', (['""" \'\\\\w+\\\\.[Cc][Ss][Vv]\' """', 'lines'], {}), '(" \'\\\\w+\\\\.[Cc][Ss][Vv]\' ", lines)\n', (8510, 8544), False, 'import re\n'), ((99522, 99530), 'wx.App', 'wx.App', ([], {}), '()\n', (99528, 99530), False, 'import wx\n'), ((99535, 99594), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (99554, 99594), False, 'import logging\n'), ((6417, 6433), 'numpy.array', 'np.array', (['imkeys'], {}), '(imkeys)\n', (6425, 6433), True, 'import numpy as np\n'), ((11049, 11108), 'logging.info', 'logging.info', (["('[%s] Connecting to the database...' % connID)"], {}), "('[%s] Connecting to the database...' % connID)\n", (11061, 11108), False, 'import logging\n'), ((55833, 55863), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float'}), '(data, dtype=np.float)\n', (55841, 55863), True, 'import numpy as np\n'), ((56373, 56389), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (56381, 56389), True, 'import numpy as np\n'), ((61463, 61476), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (61473, 61476), False, 'import csv\n'), ((62081, 62131), 'logging.info', 'logging.info', (["('Creating table: %s' % p.image_table)"], {}), "('Creating table: %s' % p.image_table)\n", (62093, 62131), False, 'import logging\n'), ((63342, 63355), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (63352, 63355), False, 'import csv\n'), ((80437, 80494), 'logging.info', 'logging.info', (["('%s table added to database' % object_table)"], {}), "('%s table added to database' % object_table)\n", (80449, 80494), False, 'import logging\n'), ((81399, 81412), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (81409, 81412), False, 'import csv\n'), ((84808, 84900), 'logging.info', 'logging.info', (["('Populating %stable %s...' % (temporary and 'temporary ' or '', tablename))"], {}), "('Populating %stable %s...' % (temporary and 'temporary ' or '',\n tablename))\n", (84820, 84900), False, 'import logging\n'), ((85532, 85575), 'logging.info', 'logging.info', (['"""Checking database tables..."""'], {}), "('Checking database tables...')\n", (85544, 85575), False, 'import logging\n'), ((89851, 89897), 'logging.info', 'logging.info', (['"""Done checking database tables."""'], {}), "('Done checking database tables.')\n", (89863, 89897), False, 'import logging\n'), ((91001, 91016), 'numpy.zeros', 'np.zeros', (['nbins'], {}), '(nbins)\n', (91009, 91016), True, 'import numpy as np\n'), ((93591, 93610), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (93604, 93610), False, 'import copy\n'), ((93748, 93767), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (93761, 93767), False, 'import copy\n'), ((93929, 93948), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (93942, 93948), False, 'import copy\n'), ((94055, 94074), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (94068, 94074), False, 'import copy\n'), ((94359, 94378), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (94372, 94378), False, 'import copy\n'), ((95936, 95955), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (95949, 95955), False, 'import copy\n'), ((96055, 96074), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (96068, 96074), False, 'import copy\n'), ((19608, 19711), 'logging.info', 'logging.info', (['(\'Closed connection: %s as %s@%s (connID="%s").\' % (db_name, db_user,\n db_host, connID))'], {}), '(\'Closed connection: %s as %s@%s (connID="%s").\' % (db_name,\n db_user, db_host, connID))\n', (19620, 19711), False, 'import logging\n'), ((19734, 19796), 'logging.warn', 'logging.warn', (['(\'No database connection ID "%s" found!\' % connID)'], {}), '(\'No database connection ID "%s" found!\' % connID)\n', (19746, 19796), False, 'import logging\n'), ((21897, 21934), 'logging.debug', 'logging.debug', (["('[%s] Commit' % connID)"], {}), "('[%s] Commit' % connID)\n", (21910, 21934), False, 'import logging\n'), ((53436, 53537), 'logging.info', 'logging.info', (["('Ignoring columns: %s' % [x for x in col_names if x not in self.\n classifierColNames])"], {}), "('Ignoring columns: %s' % [x for x in col_names if x not in\n self.classifierColNames])\n", (53448, 53537), False, 'import logging\n'), ((55449, 55482), 'logging.error', 'logging.error', (['"""No data in table"""'], {}), "('No data in table')\n", (55462, 55482), False, 'import logging\n'), ((57797, 57870), 'logging.error', 'logging.error', (['"""Both plate_id and well_id must be defined in properties!"""'], {}), "('Both plate_id and well_id must be defined in properties!')\n", (57810, 57870), False, 'import logging\n'), ((59923, 59958), 're.match', 're.match', (['"""^[A-Za-z]\\\\w*$"""', 'colname'], {}), "('^[A-Za-z]\\\\w*$', colname)\n", (59931, 59958), False, 'import re\n'), ((60565, 60593), 're.search', 're.search', (['"""["\\\\\'`]"""', 'value'], {}), '(\'["\\\\\\\'`]\', value)\n', (60574, 60593), False, 'import re\n'), ((62495, 62508), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (62505, 62508), False, 'import csv\n'), ((63093, 63144), 'logging.info', 'logging.info', (["('Creating table: %s' % p.object_table)"], {}), "('Creating table: %s' % p.object_table)\n", (63105, 63144), False, 'import logging\n'), ((63859, 63872), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (63869, 63872), False, 'import csv\n'), ((66076, 66249), 'wx.ProgressDialog', 'wx.ProgressDialog', (['"""Creating sqlite DB..."""', '"""0% Complete"""', '(100)', 'self.gui_parent', '(wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.\n PD_CAN_ABORT)'], {}), "('Creating sqlite DB...', '0% Complete', 100, self.\n gui_parent, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.\n PD_REMAINING_TIME | wx.PD_CAN_ABORT)\n", (66093, 66249), False, 'import wx\n'), ((66677, 66740), 'logging.info', 'logging.info', (["('Populating image table with data from %s' % file)"], {}), "('Populating image table with data from %s' % file)\n", (66689, 66740), False, 'import logging\n'), ((66810, 66823), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (66820, 66823), False, 'import csv\n'), ((67904, 67953), 'logging.info', 'logging.info', (["('... loaded %d%% of CSV data' % pct)"], {}), "('... loaded %d%% of CSV data' % pct)\n", (67916, 67953), False, 'import logging\n'), ((81908, 81943), 'numpy.array', 'np.array', (['typed_table'], {'dtype': 'object'}), '(typed_table, dtype=object)\n', (81916, 81943), True, 'import numpy as np\n'), ((85448, 85503), 'logging.warn', 'logging.warn', (['"""Skipping table checking step for sqlite"""'], {}), "('Skipping table checking step for sqlite')\n", (85460, 85503), False, 'import logging\n'), ((86457, 86564), 'logging.warn', 'logging.warn', (["('%s is a view. CheckTables will skip the index check on this table' % p.\n image_table)"], {}), "(\n '%s is a view. CheckTables will skip the index check on this table' % p\n .image_table)\n", (86469, 86564), False, 'import logging\n'), ((87797, 87905), 'logging.warn', 'logging.warn', (["('%s is a view. CheckTables will skip the index check on this table' % p.\n object_table)"], {}), "(\n '%s is a view. CheckTables will skip the index check on this table' % p\n .object_table)\n", (87809, 87905), False, 'import logging\n'), ((91417, 91449), 'numpy.linspace', 'np.linspace', (['min', 'max', '(nbins + 1)'], {}), '(min, max, nbins + 1)\n', (91428, 91449), True, 'import numpy as np\n'), ((92913, 92935), 'random.sample', 'random.sample', (['list', 'n'], {}), '(list, n)\n', (92926, 92935), False, 'import random\n'), ((10996, 11021), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (11019, 11021), False, 'import threading\n'), ((11379, 11522), 'logging.warn', 'logging.warn', (['(\'A connection already exists for this thread. %s as %s@%s (connID = "%s").\' %\n (p.db_name, p.db_user, p.db_host, connID))'], {}), '(\n \'A connection already exists for this thread. %s as %s@%s (connID = "%s").\'\n % (p.db_name, p.db_user, p.db_host, connID))\n', (11391, 11522), False, 'import logging\n'), ((11856, 11950), 'MySQLdb.connect', 'MySQLdb.connect', ([], {'host': 'p.db_host', 'db': 'p.db_name', 'user': 'p.db_user', 'passwd': '(p.db_passwd or None)'}), '(host=p.db_host, db=p.db_name, user=p.db_user, passwd=p.\n db_passwd or None)\n', (11871, 11950), False, 'import MySQLdb\n'), ((12075, 12089), 'MySQLdb.cursors.SSCursor', 'SSCursor', (['conn'], {}), '(conn)\n', (12083, 12089), False, 'from MySQLdb.cursors import SSCursor\n'), ((12257, 12362), 'logging.debug', 'logging.debug', (["('[%s] Connected to database: %s as %s@%s' % (connID, p.db_name, p.db_user,\n p.db_host))"], {}), "('[%s] Connected to database: %s as %s@%s' % (connID, p.\n db_name, p.db_user, p.db_host))\n", (12270, 12362), False, 'import logging\n'), ((14392, 14457), 'logging.info', 'logging.info', (["('[%s] SQLite file: %s' % (connID, p.db_sqlite_file))"], {}), "('[%s] SQLite file: %s' % (connID, p.db_sqlite_file))\n", (14404, 14457), False, 'import logging\n'), ((14495, 14527), 'sqlite3.connect', 'sqlite.connect', (['p.db_sqlite_file'], {}), '(p.db_sqlite_file)\n', (14509, 14527), True, 'import sqlite3 as sqlite\n'), ((18568, 18644), 'logging.debug', 'logging.debug', (["('[%s] Connected to database: %s' % (connID, p.db_sqlite_file))"], {}), "('[%s] Connected to database: %s' % (connID, p.db_sqlite_file))\n", (18581, 18644), False, 'import logging\n'), ((20328, 20353), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (20351, 20353), False, 'import threading\n'), ((20702, 20744), 'logging.debug', 'logging.debug', (["('[%s] %s' % (connID, query))"], {}), "('[%s] %s' % (connID, query))\n", (20715, 20744), False, 'import logging\n'), ((21836, 21861), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (21859, 21861), False, 'import threading\n'), ((22248, 22273), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (22271, 22273), False, 'import threading\n'), ((22855, 22880), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (22878, 22880), False, 'import threading\n'), ((24238, 24263), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (24261, 24263), False, 'import threading\n'), ((36478, 36601), 'logging.error', 'logging.error', (['(\'Filter query failed for filter "%s". Check the SQL syntax in your properties file.\'\n % filter_name)'], {}), '(\n \'Filter query failed for filter "%s". Check the SQL syntax in your properties file.\'\n % filter_name)\n', (36491, 36601), False, 'import logging\n'), ((36604, 36620), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (36617, 36620), False, 'import logging\n'), ((38498, 38621), 'logging.error', 'logging.error', (['(\'Filter query failed for filter "%s". Check the MySQL syntax in your properties file.\'\n % gate_name)'], {}), '(\n \'Filter query failed for filter "%s". Check the MySQL syntax in your properties file.\'\n % gate_name)\n', (38511, 38621), False, 'import logging\n'), ((38624, 38640), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (38637, 38640), False, 'import logging\n'), ((52689, 52860), 'wx.MessageBox', 'wx.MessageBox', (['"""No columns were found to use for classification Please check your per-object table, it may be empty or not contain any numeric columns."""', '"""Error"""'], {}), "(\n 'No columns were found to use for classification Please check your per-object table, it may be empty or not contain any numeric columns.'\n , 'Error')\n", (52702, 52860), False, 'import wx\n'), ((54253, 54480), 'wx.MessageBox', 'wx.MessageBox', (['"""No columns were found to use for classification after filtering columns that matched your classifier_ignore_columns properties setting. Please check your properties and your per-object table."""', '"""Error"""'], {}), "(\n 'No columns were found to use for classification after filtering columns that matched your classifier_ignore_columns properties setting. Please check your properties and your per-object table.'\n , 'Error')\n", (54266, 54480), False, 'import wx\n'), ((54854, 54879), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (54877, 54879), False, 'import threading\n'), ((66548, 66573), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (66571, 66573), False, 'import threading\n'), ((68414, 68478), 'logging.info', 'logging.info', (["('Populating object table with data from %s' % file)"], {}), "('Populating object table with data from %s' % file)\n", (68426, 68478), False, 'import logging\n'), ((68553, 68566), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (68563, 68566), False, 'import csv\n'), ((70598, 70639), 'logging.info', 'logging.info', (['"""Finished loading CSV data"""'], {}), "('Finished loading CSV data')\n", (70610, 70639), False, 'import logging\n'), ((72286, 72329), 'logging.error', 'logging.error', (['"""Unable to count table rows"""'], {}), "('Unable to count table rows')\n", (72299, 72329), False, 'import logging\n'), ((75736, 75908), 'logging.error', 'logging.error', (['"""Table checking removed all rows, you may have an empty column in your database. Disable check_tables in your properties file if this is expected."""'], {}), "(\n 'Table checking removed all rows, you may have an empty column in your database. Disable check_tables in your properties file if this is expected.'\n )\n", (75749, 75908), False, 'import logging\n'), ((75966, 76057), 'logging.info', 'logging.info', (['f"""Table checking removed {initial_count - res} rows with missing values"""'], {}), "(\n f'Table checking removed {initial_count - res} rows with missing values')\n", (75978, 76057), False, 'import logging\n'), ((76142, 76522), 'wx.MessageDialog', 'wx.MessageDialog', (['None', 'f"""The check_tables option was enabled in your propreties file, but this would exclude {initial_count - res} of {initial_count} rows from your data set (which had values missing). If using a combined object table you might want to disable table checking. Should the rows be excluded for this session?"""', '"""Table checking"""', '(wx.YES_NO | wx.ICON_WARNING)'], {}), "(None,\n f'The check_tables option was enabled in your propreties file, but this would exclude {initial_count - res} of {initial_count} rows from your data set (which had values missing). If using a combined object table you might want to disable table checking. Should the rows be excluded for this session?'\n , 'Table checking', wx.YES_NO | wx.ICON_WARNING)\n", (76158, 76522), False, 'import wx\n'), ((77058, 77114), 'logging.error', 'logging.error', (['"""Unable to validate checked object table"""'], {}), "('Unable to validate checked object table')\n", (77071, 77114), False, 'import logging\n'), ((77405, 77517), 'logging.error', 'logging.error', (['"""Unable to link checked tables upon creation. Some SQL filters may not work correctly."""'], {}), "(\n 'Unable to link checked tables upon creation. Some SQL filters may not work correctly.'\n )\n", (77418, 77517), False, 'import logging\n'), ((81666, 81699), 'numpy.array', 'np.array', (['dtable[:, i]'], {'dtype': 'str'}), '(dtable[:, i], dtype=str)\n', (81674, 81699), True, 'import numpy as np\n'), ((81721, 81756), 'numpy.array', 'np.array', (['dtable[:, i]'], {'dtype': 'float'}), '(dtable[:, i], dtype=float)\n', (81729, 81756), True, 'import numpy as np\n'), ((81778, 81811), 'numpy.array', 'np.array', (['dtable[:, i]'], {'dtype': 'int'}), '(dtable[:, i], dtype=int)\n', (81786, 81811), True, 'import numpy as np\n'), ((88307, 88416), 'logging.warn', 'logging.warn', (['"""TableNumber column was found indexed in your image table but not your object table."""'], {}), "(\n 'TableNumber column was found indexed in your image table but not your object table.'\n )\n", (88319, 88416), False, 'import logging\n'), ((89351, 89483), 'logging.warn', 'logging.warn', (['(\'WARNING: Images were found in "%s" that had a NULL or empty "%s" column value\'\n % (p.image_table, p.well_id))'], {}), '(\n \'WARNING: Images were found in "%s" that had a NULL or empty "%s" column value\'\n % (p.image_table, p.well_id))\n', (89363, 89483), False, 'import logging\n'), ((89721, 89854), 'logging.warn', 'logging.warn', (['(\'WARNING: Images were found in "%s" that had a NULL or empty "%s" column value\'\n % (p.image_table, p.plate_id))'], {}), '(\n \'WARNING: Images were found in "%s" that had a NULL or empty "%s" column value\'\n % (p.image_table, p.plate_id))\n', (89733, 89854), False, 'import logging\n'), ((1807, 1875), 'logging.info', 'logging.info', (['"""Lost connection to the MySQL database; reconnecting."""'], {}), "('Lost connection to the MySQL database; reconnecting.')\n", (1819, 1875), False, 'import logging\n'), ((3477, 3504), 're.match', 're.match', (['"""[A-Za-z0-9_]"""', 'c'], {}), "('[A-Za-z0-9_]', c)\n", (3485, 3504), False, 'import re\n'), ((16517, 16533), 're.compile', 're.compile', (['expr'], {}), '(expr)\n', (16527, 16533), False, 'import re\n'), ((19258, 19283), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (19281, 19283), False, 'import threading\n'), ((23203, 23228), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (23226, 23228), False, 'import threading\n'), ((35466, 35567), 'logging.warn', 'logging.warn', (['"""Mixing multiple object tables in a filter is experimental, use with caution"""'], {}), "(\n 'Mixing multiple object tables in a filter is experimental, use with caution'\n )\n", (35478, 35567), False, 'import logging\n'), ((53844, 53863), 'numpy.array', 'np.array', (['col_names'], {}), '(col_names)\n', (53852, 53863), True, 'import numpy as np\n'), ((54037, 54115), 'logging.warning', 'logging.warning', (['(\'Ignoring column "%s" because it has zero variance\' % colname)'], {}), '(\'Ignoring column "%s" because it has zero variance\' % colname)\n', (54052, 54115), False, 'import logging\n'), ((70501, 70555), 'logging.info', 'logging.info', (["('... loaded %d lines of CSV data' % prog)"], {}), "('... loaded %d lines of CSV data' % prog)\n", (70513, 70555), False, 'import logging\n'), ((76901, 76979), 'logging.info', 'logging.info', (['f"""Discarded table checking results, using original object table"""'], {}), "(f'Discarded table checking results, using original object table')\n", (76913, 76979), False, 'import logging\n'), ((88477, 88586), 'logging.warn', 'logging.warn', (['"""TableNumber column was found indexed in your object table but not your image table."""'], {}), "(\n 'TableNumber column was found indexed in your object table but not your image table.'\n )\n", (88489, 88586), False, 'import logging\n'), ((13676, 13694), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (13687, 13694), False, 'import hashlib\n'), ((16175, 16195), 'numpy.mean', 'np.mean', (['self.values'], {}), '(self.values)\n', (16182, 16195), True, 'import numpy as np\n'), ((16220, 16267), 'numpy.sum', 'np.sum', (['[((x - avg) ** 2) for x in self.values]'], {}), '([((x - avg) ** 2) for x in self.values])\n', (16226, 16267), True, 'import numpy as np\n'), ((38045, 38146), 'logging.warn', 'logging.warn', (['"""Mixing multiple object tables in a filter is experimental, use with caution"""'], {}), "(\n 'Mixing multiple object tables in a filter is experimental, use with caution'\n )\n", (38057, 38146), False, 'import logging\n'), ((53864, 53882), 'numpy.where', 'np.where', (['(res == 0)'], {}), '(res == 0)\n', (53872, 53882), True, 'import numpy as np\n'), ((9133, 9185), 'numpy.where', 'np.where', (['(features > self.thresholds)', 'self.a', 'self.b'], {}), '(features > self.thresholds, self.a, self.b)\n', (9141, 9185), True, 'import numpy as np\n'), ((67488, 67683), 'wx.MessageBox', 'wx.MessageBox', (['"""Could not remove incomplete database at "%s". This file must be removed manually or CPAnalyst will load it the next time use use the current database settings."""', '"""Error"""'], {}), '(\n \'Could not remove incomplete database at "%s". This file must be removed manually or CPAnalyst will load it the next time use use the current database settings.\'\n , \'Error\')\n', (67501, 67683), False, 'import wx\n'), ((83374, 83387), 'numpy.isinf', 'np.isinf', (['val'], {}), '(val)\n', (83382, 83387), True, 'import numpy as np\n'), ((83391, 83404), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (83399, 83404), True, 'import numpy as np\n'), ((85964, 86283), 'wx.MessageDialog', 'wx.MessageDialog', (['self.gui_parent', '("""Column "%s" is not indexed in table "%s" Without column indices, dabase performance will be severly slowed.\nTo avoid this warning, set check_tables = false in your properties file."""\n % (col, p.object_table))', '"""Missing column index"""'], {'style': '(wx.OK | wx.ICON_EXCLAMATION)'}), '(self.gui_parent, \n """Column "%s" is not indexed in table "%s" Without column indices, dabase performance will be severly slowed.\nTo avoid this warning, set check_tables = false in your properties file."""\n % (col, p.object_table), \'Missing column index\', style=wx.OK | wx.\n ICON_EXCLAMATION)\n', (85980, 86283), False, 'import wx\n'), ((87304, 87623), 'wx.MessageDialog', 'wx.MessageDialog', (['self.gui_parent', '("""Column "%s" is not indexed in table "%s" Without column indices, dabase performance will be severly slowed.\nTo avoid this warning, set check_tables = false in your properties file."""\n % (col, p.object_table))', '"""Missing column index"""'], {'style': '(wx.OK | wx.ICON_EXCLAMATION)'}), '(self.gui_parent, \n """Column "%s" is not indexed in table "%s" Without column indices, dabase performance will be severly slowed.\nTo avoid this warning, set check_tables = false in your properties file."""\n % (col, p.object_table), \'Missing column index\', style=wx.OK | wx.\n ICON_EXCLAMATION)\n', (87320, 87623), False, 'import wx\n'), ((93237, 93262), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (93260, 93262), False, 'import threading\n'), ((17317, 17404), 'logging.info', 'logging.info', (["('[%s] Creating SQLite database at: %s.' % (connID, p.db_sqlite_file))"], {}), "('[%s] Creating SQLite database at: %s.' % (connID, p.\n db_sqlite_file))\n", (17329, 17404), False, 'import logging\n'), ((17957, 18044), 'logging.info', 'logging.info', (["('[%s] Creating SQLite database at: %s.' % (connID, p.db_sqlite_file))"], {}), "('[%s] Creating SQLite database at: %s.' % (connID, p.\n db_sqlite_file))\n", (17969, 18044), False, 'import logging\n'), ((53291, 53326), 're.match', 're.match', (["('^' + user_exp + '$')", 'col'], {}), "('^' + user_exp + '$', col)\n", (53299, 53326), False, 'import re\n'), ((70037, 70232), 'wx.MessageBox', 'wx.MessageBox', (['"""Could not remove incomplete database at "%s". This file must be removed manually or CPAnalyst will load it the next time use use the current database settings."""', '"""Error"""'], {}), '(\n \'Could not remove incomplete database at "%s". This file must be removed manually or CPAnalyst will load it the next time use use the current database settings.\'\n , \'Error\')\n', (70050, 70232), False, 'import wx\n')] |
# Import dependencies
from sklearn.model_selection import train_test_split
import numpy as np
import argparse
# Set size and seed
test_size = 0.2
random_state = 42
# Establish arguments as file paths
ap = argparse.ArgumentParser()
ap.add_argument("-images", "--images", required=True, help="images npy location")
ap.add_argument("-masks", "--masks", required=True, help="masks npy location")
args = vars(ap.parse_args())
images_path = args["images"]
masks_path = args["masks"]
# Load images and masks as numpy arrays
X = np.load(images_path)
Y = np.load(masks_path)
# Split the numpy arrays and save as .npy files
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=test_size, random_state=random_state
)
np.save("train_images.npy", X_train)
np.save("test_images.npy", X_test)
np.save("train_masks.npy", Y_train)
np.save("test_masks.npy", Y_test)
| [
"sklearn.model_selection.train_test_split",
"numpy.load",
"numpy.save",
"argparse.ArgumentParser"
] | [((207, 232), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (230, 232), False, 'import argparse\n'), ((526, 546), 'numpy.load', 'np.load', (['images_path'], {}), '(images_path)\n', (533, 546), True, 'import numpy as np\n'), ((551, 570), 'numpy.load', 'np.load', (['masks_path'], {}), '(masks_path)\n', (558, 570), True, 'import numpy as np\n'), ((655, 725), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, Y, test_size=test_size, random_state=random_state)\n', (671, 725), False, 'from sklearn.model_selection import train_test_split\n'), ((733, 769), 'numpy.save', 'np.save', (['"""train_images.npy"""', 'X_train'], {}), "('train_images.npy', X_train)\n", (740, 769), True, 'import numpy as np\n'), ((770, 804), 'numpy.save', 'np.save', (['"""test_images.npy"""', 'X_test'], {}), "('test_images.npy', X_test)\n", (777, 804), True, 'import numpy as np\n'), ((806, 841), 'numpy.save', 'np.save', (['"""train_masks.npy"""', 'Y_train'], {}), "('train_masks.npy', Y_train)\n", (813, 841), True, 'import numpy as np\n'), ((842, 875), 'numpy.save', 'np.save', (['"""test_masks.npy"""', 'Y_test'], {}), "('test_masks.npy', Y_test)\n", (849, 875), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import time
import warnings
from combat import combat
import numpy as np
import pandas as pd
import patsy
import pytest
from rpy2.rinterface import RRuntimeWarning
from rpy2.robjects import r
from types import FunctionType
@pytest.fixture
def r_result():
warnings.filterwarnings("ignore", category=RRuntimeWarning)
r['source'](os.path.join("tests", "R-combat.R"))
return pd.read_csv("r-batch.csv", index_col=0)
class TestCombat():
def test_import_is_function(self):
assert isinstance(combat, FunctionType)
def test_combat(self, r_result):
pheno = pd.read_csv('bladder-pheno.csv', index_col=0)
data = pd.read_csv('bladder-expr.csv', index_col=0)
model = patsy.dmatrix("~ age + cancer", pheno, return_type="dataframe")
t = time.time()
p_result = combat(data, pheno['batch'], model, "age")
print("{:.2f} seconds\n".format(time.time() - t))
print(str(p_result.iloc[:5, :5]))
p_result.to_csv("py-batch.csv")
print((p_result - r_result).max().max())
assert np.allclose(r_result, p_result)
| [
"warnings.filterwarnings",
"pandas.read_csv",
"numpy.allclose",
"patsy.dmatrix",
"time.time",
"combat.combat",
"os.path.join"
] | [((295, 354), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RRuntimeWarning'}), "('ignore', category=RRuntimeWarning)\n", (318, 354), False, 'import warnings\n'), ((419, 458), 'pandas.read_csv', 'pd.read_csv', (['"""r-batch.csv"""'], {'index_col': '(0)'}), "('r-batch.csv', index_col=0)\n", (430, 458), True, 'import pandas as pd\n'), ((371, 406), 'os.path.join', 'os.path.join', (['"""tests"""', '"""R-combat.R"""'], {}), "('tests', 'R-combat.R')\n", (383, 406), False, 'import os\n'), ((622, 667), 'pandas.read_csv', 'pd.read_csv', (['"""bladder-pheno.csv"""'], {'index_col': '(0)'}), "('bladder-pheno.csv', index_col=0)\n", (633, 667), True, 'import pandas as pd\n'), ((683, 727), 'pandas.read_csv', 'pd.read_csv', (['"""bladder-expr.csv"""'], {'index_col': '(0)'}), "('bladder-expr.csv', index_col=0)\n", (694, 727), True, 'import pandas as pd\n'), ((744, 807), 'patsy.dmatrix', 'patsy.dmatrix', (['"""~ age + cancer"""', 'pheno'], {'return_type': '"""dataframe"""'}), "('~ age + cancer', pheno, return_type='dataframe')\n", (757, 807), False, 'import patsy\n'), ((821, 832), 'time.time', 'time.time', ([], {}), '()\n', (830, 832), False, 'import time\n'), ((852, 894), 'combat.combat', 'combat', (['data', "pheno['batch']", 'model', '"""age"""'], {}), "(data, pheno['batch'], model, 'age')\n", (858, 894), False, 'from combat import combat\n'), ((1103, 1134), 'numpy.allclose', 'np.allclose', (['r_result', 'p_result'], {}), '(r_result, p_result)\n', (1114, 1134), True, 'import numpy as np\n'), ((936, 947), 'time.time', 'time.time', ([], {}), '()\n', (945, 947), False, 'import time\n')] |
import numpy as np
import h5py
from asciitree import LeftAligned
from collections import OrderedDict
from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK
# from ipdb import set_trace as stop
__all__ = ['i0_allen', '_extract_parameter_cycles', 'isint', 'fvoigt', 'lower_dict_keys', 'show_tree']
def i0_allen(wavelength, muAngle):
"""
Return the solar intensity at a specific wavelength and heliocentric angle
wavelength: wavelength in angstrom
muAngle: cosine of the heliocentric angle
"""
C = 2.99792458e10
H = 6.62606876e-27
if (muAngle == 0):
return 0.0
lambdaIC = 1e4 * np.asarray([0.20,0.22,0.245,0.265,0.28,0.30,0.32,0.35,0.37,0.38,0.40,0.45,0.50,0.55,0.60,0.80,1.0,1.5,2.0,3.0,5.0,10.0])
uData = np.asarray([0.12,-1.3,-0.1,-0.1,0.38,0.74,0.88,0.98,1.03,0.92,0.91,0.99,0.97,0.93,0.88,0.73,0.64,0.57,0.48,0.35,0.22,0.15])
vData = np.asarray([0.33,1.6,0.85,0.90,0.57, 0.20, 0.03,-0.1,-0.16,-0.05,-0.05,-0.17,-0.22,-0.23,-0.23,-0.22,-0.20,-0.21,-0.18,-0.12,-0.07,-0.07])
lambdaI0 = 1e4 * np.asarray([0.20,0.22,0.24,0.26,0.28,0.30,0.32,0.34,0.36,0.37,0.38,0.39,0.40,0.41,0.42,0.43,0.44,0.45,0.46,0.48,0.50,0.55,0.60,0.65,0.70,0.75,\
0.80,0.90,1.00,1.10,1.20,1.40,1.60,1.80,2.00,2.50,3.00,4.00,5.00,6.00,8.00,10.0,12.0])
I0 = np.asarray([0.06,0.21,0.29,0.60,1.30,2.45,3.25,3.77,4.13,4.23,4.63,4.95,5.15,5.26,5.28,5.24,5.19,5.10,5.00,4.79,4.55,4.02,3.52,3.06,2.69,2.28,2.03,\
1.57,1.26,1.01,0.81,0.53,0.36,0.238,0.160,0.078,0.041,0.0142,0.0062,0.0032,0.00095,0.00035,0.00018])
I0 *= 1e14 * (lambdaI0 * 1e-8)**2 / C
u = np.interp(wavelength, lambdaIC, uData)
v = np.interp(wavelength, lambdaIC, vData)
i0 = np.interp(wavelength, lambdaI0, I0)
return (1.0 - u - v + u * muAngle + v * muAngle**2)* i0
def _extract_parameter_cycles(s):
tmp = s[0].split('->')
value = float(tmp[0])
cycle1 = tmp[1].strip()
cycles = [cycle1] + s[1:]
return value, cycles
def isint(str):
try:
int(str)
return True
except ValueError:
return False
def isfloat(str):
if (str is None):
return False
try:
float(str)
return True
except ValueError:
return False
def toint(l):
return [int(x) if isint(x) else x for x in l]
def tofloat(l):
return [float(x) if isfloat(x) else None for x in l]
def onlyint(l):
return [i for i in l if isinstance(i, int)]
def fvoigt(damp,v):
"""
Fast implementation of the Voigt-Faraday function
Parameters
----------
damp : float
damping parameter
v : float
normalized wavelength (lambda-lambda0) / sigma
Returns
-------
voigt, faraday : float
Value of the Voigt and Faraday functions
Notes
-----
A rational approximation to the complex error function is used
after Hui, Armstrong, and Wray(1978, JQSRT 19, 509). H and F are
the real and imaginary parts of such function, respectively.
The procedure is inspired on that in SIR (Ruiz Cobo & del Toro
Iniesta 1992, ApJ 398, 385). On its turn, that routine was taken
from modifications by <NAME> (1986) to modifications by S.K.
Solanki (1985) to an original FORTRAN routine written by <NAME>
and <NAME>.
"""
A = [122.607931777104326, 214.382388694706425, 181.928533092181549,\
93.155580458138441, 30.180142196210589, 5.912626209773153,\
0.564189583562615]
B = [122.60793177387535, 352.730625110963558, 457.334478783897737,\
348.703917719495792, 170.354001821091472, 53.992906912940207,\
10.479857114260399,1.]
z = np.array(damp*np.ones(len(v)) + -abs(v)*1j)
Z = ((((((A[6]*z+A[5])*z+A[4])*z+A[3])*z+A[2])*z+A[1])*z+A[0])/\
(((((((z+B[6])*z+B[5])*z+B[4])*z+B[3])*z+B[2])*z+B[1])*z+B[0])
h = np.real(Z)
f = np.sign(v)*np.imag(Z)*0.5
return h, f
def lower_dict_keys(d):
out = {}
for k, v in d.items():
out[k.lower()] = v
return out
def show_tree(hdf5_file):
tree = {hdf5_file: OrderedDict()}
f = h5py.File(hdf5_file, 'r')
for k, v in f.items():
tree[hdf5_file][k] = OrderedDict()
for k2, v2 in v.items():
tree[hdf5_file][k][f'{k2} -> {v2.shape} {v2.dtype}'] = OrderedDict()
chrs = dict(
UP_AND_RIGHT=u"\u2514",
HORIZONTAL=u"\u2500",
VERTICAL=u"\u2502",
VERTICAL_AND_RIGHT=u"\u251C"
)
tr = LeftAligned(draw=BoxStyle(gfx = chrs, horiz_len=1))
print(tr(tree)) | [
"h5py.File",
"numpy.asarray",
"asciitree.drawing.BoxStyle",
"numpy.imag",
"numpy.real",
"numpy.sign",
"numpy.interp",
"collections.OrderedDict"
] | [((763, 912), 'numpy.asarray', 'np.asarray', (['[0.12, -1.3, -0.1, -0.1, 0.38, 0.74, 0.88, 0.98, 1.03, 0.92, 0.91, 0.99, \n 0.97, 0.93, 0.88, 0.73, 0.64, 0.57, 0.48, 0.35, 0.22, 0.15]'], {}), '([0.12, -1.3, -0.1, -0.1, 0.38, 0.74, 0.88, 0.98, 1.03, 0.92, \n 0.91, 0.99, 0.97, 0.93, 0.88, 0.73, 0.64, 0.57, 0.48, 0.35, 0.22, 0.15])\n', (773, 912), True, 'import numpy as np\n'), ((899, 1063), 'numpy.asarray', 'np.asarray', (['[0.33, 1.6, 0.85, 0.9, 0.57, 0.2, 0.03, -0.1, -0.16, -0.05, -0.05, -0.17, -\n 0.22, -0.23, -0.23, -0.22, -0.2, -0.21, -0.18, -0.12, -0.07, -0.07]'], {}), '([0.33, 1.6, 0.85, 0.9, 0.57, 0.2, 0.03, -0.1, -0.16, -0.05, -\n 0.05, -0.17, -0.22, -0.23, -0.23, -0.22, -0.2, -0.21, -0.18, -0.12, -\n 0.07, -0.07])\n', (909, 1063), True, 'import numpy as np\n'), ((1308, 1605), 'numpy.asarray', 'np.asarray', (['[0.06, 0.21, 0.29, 0.6, 1.3, 2.45, 3.25, 3.77, 4.13, 4.23, 4.63, 4.95, 5.15,\n 5.26, 5.28, 5.24, 5.19, 5.1, 5.0, 4.79, 4.55, 4.02, 3.52, 3.06, 2.69, \n 2.28, 2.03, 1.57, 1.26, 1.01, 0.81, 0.53, 0.36, 0.238, 0.16, 0.078, \n 0.041, 0.0142, 0.0062, 0.0032, 0.00095, 0.00035, 0.00018]'], {}), '([0.06, 0.21, 0.29, 0.6, 1.3, 2.45, 3.25, 3.77, 4.13, 4.23, 4.63,\n 4.95, 5.15, 5.26, 5.28, 5.24, 5.19, 5.1, 5.0, 4.79, 4.55, 4.02, 3.52, \n 3.06, 2.69, 2.28, 2.03, 1.57, 1.26, 1.01, 0.81, 0.53, 0.36, 0.238, 0.16,\n 0.078, 0.041, 0.0142, 0.0062, 0.0032, 0.00095, 0.00035, 0.00018])\n', (1318, 1605), True, 'import numpy as np\n'), ((1617, 1655), 'numpy.interp', 'np.interp', (['wavelength', 'lambdaIC', 'uData'], {}), '(wavelength, lambdaIC, uData)\n', (1626, 1655), True, 'import numpy as np\n'), ((1664, 1702), 'numpy.interp', 'np.interp', (['wavelength', 'lambdaIC', 'vData'], {}), '(wavelength, lambdaIC, vData)\n', (1673, 1702), True, 'import numpy as np\n'), ((1712, 1747), 'numpy.interp', 'np.interp', (['wavelength', 'lambdaI0', 'I0'], {}), '(wavelength, lambdaI0, I0)\n', (1721, 1747), True, 'import numpy as np\n'), ((3914, 3924), 'numpy.real', 'np.real', (['Z'], {}), '(Z)\n', (3921, 3924), True, 'import numpy as np\n'), ((4158, 4183), 'h5py.File', 'h5py.File', (['hdf5_file', '"""r"""'], {}), "(hdf5_file, 'r')\n", (4167, 4183), False, 'import h5py\n'), ((630, 769), 'numpy.asarray', 'np.asarray', (['[0.2, 0.22, 0.245, 0.265, 0.28, 0.3, 0.32, 0.35, 0.37, 0.38, 0.4, 0.45, 0.5,\n 0.55, 0.6, 0.8, 1.0, 1.5, 2.0, 3.0, 5.0, 10.0]'], {}), '([0.2, 0.22, 0.245, 0.265, 0.28, 0.3, 0.32, 0.35, 0.37, 0.38, 0.4,\n 0.45, 0.5, 0.55, 0.6, 0.8, 1.0, 1.5, 2.0, 3.0, 5.0, 10.0])\n', (640, 769), True, 'import numpy as np\n'), ((1060, 1322), 'numpy.asarray', 'np.asarray', (['[0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.37, 0.38, 0.39, 0.4,\n 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.48, 0.5, 0.55, 0.6, 0.65, 0.7, \n 0.75, 0.8, 0.9, 1.0, 1.1, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 4.0, 5.0, \n 6.0, 8.0, 10.0, 12.0]'], {}), '([0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.37, 0.38,\n 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.48, 0.5, 0.55, 0.6, \n 0.65, 0.7, 0.75, 0.8, 0.9, 1.0, 1.1, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0,\n 4.0, 5.0, 6.0, 8.0, 10.0, 12.0])\n', (1070, 1322), True, 'import numpy as np\n'), ((4134, 4147), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4145, 4147), False, 'from collections import OrderedDict\n'), ((4240, 4253), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4251, 4253), False, 'from collections import OrderedDict\n'), ((3933, 3943), 'numpy.sign', 'np.sign', (['v'], {}), '(v)\n', (3940, 3943), True, 'import numpy as np\n'), ((3944, 3954), 'numpy.imag', 'np.imag', (['Z'], {}), '(Z)\n', (3951, 3954), True, 'import numpy as np\n'), ((4355, 4368), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4366, 4368), False, 'from collections import OrderedDict\n'), ((4579, 4610), 'asciitree.drawing.BoxStyle', 'BoxStyle', ([], {'gfx': 'chrs', 'horiz_len': '(1)'}), '(gfx=chrs, horiz_len=1)\n', (4587, 4610), False, 'from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK\n')] |
# Adopted from https://github.com/KiroSummer/A_Syntax-aware_MTL_Framework_for_Chinese_SRL
# Inference functions for the SRL model.
import numpy as np
def decode_spans(span_starts, span_ends, span_scores, labels_inv):
"""
Args:
span_starts: [num_candidates,]
span_scores: [num_candidates, num_labels]
span_ends:
labels_inv:
Returns:
"""
pred_spans = []
span_labels = np.argmax(span_scores, axis=1) # [num_candidates]
spans_list = list(zip(span_starts, span_ends, span_labels, span_scores))
spans_list = sorted(spans_list, key=lambda x: x[3][x[2]], reverse=True)
predicted_spans = {}
for start, end, label, _ in spans_list:
# Skip invalid span.
if label == 0 or (start, end) in predicted_spans:
continue
pred_spans.append((start, end, labels_inv[label]))
predicted_spans[(start, end)] = label
return pred_spans
def greedy_decode(predict_dict, srl_labels_inv):
"""Greedy decoding for SRL predicate-argument structures.
Args:
predict_dict: Dictionary of name to numpy arrays.
srl_labels_inv: SRL label id to string name.
suppress_overlap: Whether to greedily suppress overlapping arguments for the same predicate.
Returns:
"""
arg_starts = predict_dict["arg_starts"]
arg_ends = predict_dict["arg_ends"]
predicates = predict_dict["predicates"]
arg_labels = predict_dict["arg_labels"]
scores = predict_dict["srl_scores"]
num_suppressed_args = 0
# Map from predicates to a list of labeled spans.
pred_to_args = {}
if len(arg_ends) > 0 and len(predicates) > 0:
max_len = max(np.max(arg_ends), np.max(predicates)) + 1
else:
max_len = 1
for j, pred_id in enumerate(predicates):
args_list = []
for i, (arg_start, arg_end) in enumerate(zip(arg_starts, arg_ends)):
# If label is not null.
if arg_labels[i][j] == 0:
continue
label = srl_labels_inv[arg_labels[i][j]]
# if label not in ["V", "C-V"]:
args_list.append((arg_start, arg_end, label, scores[i][j][arg_labels[i][j]]))
# Sort arguments by highest score first.
args_list = sorted(args_list, key=lambda x: x[3], reverse=True)
new_args_list = []
flags = [False for _ in range(max_len)]
# Predicate will not overlap with arguments either.
flags[pred_id] = True
for (arg_start, arg_end, label, score) in args_list:
# If none of the tokens has been covered:
if not max(flags[arg_start:arg_end + 1]):
new_args_list.append((arg_start, arg_end, label))
for k in range(arg_start, arg_end + 1):
flags[k] = True
# Only add predicate if it has any argument.
if new_args_list:
pred_to_args[pred_id] = new_args_list
num_suppressed_args += len(args_list) - len(new_args_list)
return pred_to_args, num_suppressed_args
_CORE_ARGS = {"ARG0": 1, "ARG1": 2, "ARG2": 4, "ARG3": 8, "ARG4": 16, "ARG5": 32, "ARGA": 64,
"A0": 1, "A1": 2, "A2": 4, "A3": 8, "A4": 16, "A5": 32, "AA": 64}
def get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in list(mention_to_predicted.items())}
return predicted_clusters, mention_to_predicted
def _decode_non_overlapping_spans(starts, ends, scores, max_len, labels_inv, pred_id):
labels = np.argmax(scores, axis=1)
spans = []
for i, (start, end, label) in enumerate(zip(starts, ends, labels)):
if label <= 0:
continue
label_str = labels_inv[label]
if pred_id is not None and label_str == "V":
continue
spans.append((start, end, label_str, scores[i][label]))
spans = sorted(spans, key=lambda x: x[3], reverse=True)
flags = np.zeros([max_len], dtype=bool)
if pred_id is not None:
flags[pred_id] = True
new_spans = []
for start, end, label_str, score in spans:
if not max(flags[start:end + 1]):
new_spans.append((start, end, label_str)) # , score))
for k in range(start, end + 1):
flags[k] = True
return new_spans
def _dp_decode_non_overlapping_spans(starts, ends, scores, max_len, labels_inv, pred_id, u_constraint=False):
num_roles = scores.shape[1] # [num_arg, num_roles]
labels = np.argmax(scores, axis=1).astype(np.int64)
spans = list(zip(starts, ends, list(range(len(starts)))))
spans = sorted(spans, key=lambda x: (x[0], x[1])) # sort according to the span start index
if u_constraint:
f = np.zeros([max_len + 1, 128], dtype=float) - 0.1
else: # This one
f = np.zeros([max_len + 1, 1], dtype=float) - 0.1
f[0, 0] = 0
states = {0: set([0])} # A dictionary from id to list of binary core-arg states.
pointers = {} # A dictionary from states to (arg_id, role, prev_t, prev_rs)
best_state = [(0, 0)]
def _update_state(t0, rs0, t1, rs1, delta, arg_id, role):
if f[t0][rs0] + delta > f[t1][rs1]:
f[t1][rs1] = f[t0][rs0] + delta
if t1 not in states:
states[t1] = set()
states[t1].update([rs1])
pointers[(t1, rs1)] = (arg_id, role, t0, rs0) # the pointers store
if f[t1][rs1] > f[best_state[0][0]][best_state[0][1]]:
best_state[0] = (t1, rs1)
for start, end, i in spans: # [arg_start, arg_end, arg_span_id]
assert scores[i][0] == 0 # dummy score
# The extra dummy score should be same for all states, so we can safely skip arguments overlap
# with the predicate.
if pred_id is not None and start <= pred_id and pred_id <= end: # skip the span contains the predicate
continue
r0 = labels[i] # Locally best role assignment.
# Strictly better to incorporate a dummy span if it has the highest local score.
if r0 == 0: # labels_inv[r0] == "O"
continue
r0_str = labels_inv[r0]
# Enumerate explored states.
t_states = [t for t in list(states.keys()) if t <= start] # collect the state which is before the current span
for t in t_states: # for each state
role_states = states[t]
# Update states if best role is not a core arg.
if not u_constraint or r0_str not in _CORE_ARGS: # True; this one
for rs in role_states: # the set type in the value in the state dict
_update_state(t, rs, end + 1, rs, scores[i][r0], i, r0) # update the state
else:
for rs in role_states:
for r in range(1, num_roles):
if scores[i][r] > 0:
r_str = labels_inv[r]
core_state = _CORE_ARGS.get(r_str, 0)
# print start, end, i, r_str, core_state, rs
if core_state & rs == 0:
_update_state(t, rs, end + 1, rs | core_state, scores[i][r], i, r)
# Backtrack to decode.
new_spans = []
t, rs = best_state[0]
while (t, rs) in pointers:
i, r, t0, rs0 = pointers[(t, rs)]
new_spans.append((int(starts[i]), int(ends[i]), labels_inv[r]))
t = t0
rs = rs0
return new_spans[::-1]
def srl_decode(sentence_lengths, predict_dict, srl_labels_inv, config): # decode the predictions.
# Decode sentence-level tasks.
num_sentences = len(sentence_lengths)
predictions = [{} for _ in range(num_sentences)]
# Sentence-level predictions.
for i in range(num_sentences): # for each sentences
# if predict_dict["No_arg"] is True:
# predictions["srl"][i][predict_dict["predicates"][i]] = []
# continue
predict_dict_num_args_ = predict_dict["num_args"].cpu().numpy()
predict_dict_num_preds_ = predict_dict["num_preds"].cpu().numpy()
predict_dict_predicates_ = predict_dict["predicates"].cpu().numpy()
predict_dict_arg_starts_ = predict_dict["arg_starts"].cpu().numpy()
predict_dict_arg_ends_ = predict_dict["arg_ends"].cpu().numpy()
predict_dict_srl_scores_ = predict_dict["srl_scores"].detach().cpu().numpy()
num_args = predict_dict_num_args_[i] # the number of the candidate argument spans
num_preds = predict_dict_num_preds_[i] # the number of the candidate predicates
# for each predicate id, exec the decode process
for j, pred_id in enumerate(predict_dict_predicates_[i][:num_preds]):
# sorted arg_starts and arg_ends and srl_scores ? should be??? enforce_srl_constraint = False
arg_spans = _dp_decode_non_overlapping_spans(
predict_dict_arg_starts_[i][:num_args],
predict_dict_arg_ends_[i][:num_args],
predict_dict_srl_scores_[i, :num_args, j, :],
sentence_lengths[i], srl_labels_inv, pred_id, config.enforce_srl_constraint)
# To avoid warnings in the eval script.
if config.use_gold_predicates: # false
arg_spans.append((pred_id, pred_id, "V"))
if arg_spans:
predictions[i][int(pred_id)] = sorted(arg_spans, key=lambda x: (x[0], x[1]))
return predictions
| [
"numpy.max",
"numpy.zeros",
"numpy.argmax"
] | [((427, 457), 'numpy.argmax', 'np.argmax', (['span_scores'], {'axis': '(1)'}), '(span_scores, axis=1)\n', (436, 457), True, 'import numpy as np\n'), ((4476, 4501), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4485, 4501), True, 'import numpy as np\n'), ((4881, 4912), 'numpy.zeros', 'np.zeros', (['[max_len]'], {'dtype': 'bool'}), '([max_len], dtype=bool)\n', (4889, 4912), True, 'import numpy as np\n'), ((5424, 5449), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (5433, 5449), True, 'import numpy as np\n'), ((5659, 5700), 'numpy.zeros', 'np.zeros', (['[max_len + 1, 128]'], {'dtype': 'float'}), '([max_len + 1, 128], dtype=float)\n', (5667, 5700), True, 'import numpy as np\n'), ((5741, 5780), 'numpy.zeros', 'np.zeros', (['[max_len + 1, 1]'], {'dtype': 'float'}), '([max_len + 1, 1], dtype=float)\n', (5749, 5780), True, 'import numpy as np\n'), ((1683, 1699), 'numpy.max', 'np.max', (['arg_ends'], {}), '(arg_ends)\n', (1689, 1699), True, 'import numpy as np\n'), ((1701, 1719), 'numpy.max', 'np.max', (['predicates'], {}), '(predicates)\n', (1707, 1719), True, 'import numpy as np\n')] |
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from dataloader import TestDataset
from collections import defaultdict
from ogb.linkproppred import Evaluator
class KGEModel(nn.Module):
def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, evaluator,
double_entity_embedding=False, double_relation_embedding=False):
super(KGEModel, self).__init__()
self.model_name = model_name
self.nentity = nentity
self.nrelation = nrelation
self.hidden_dim = hidden_dim
self.epsilon = 2.0
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.entity_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
self.relation_dim = hidden_dim * 2 if double_relation_embedding else hidden_dim
if model_name == 'TransM':
# self.entity_dim = self.entity_dim * self.entity_dim
self.relation_dim = self.entity_dim * self.entity_dim
if model_name == 'ConvFM':
self.entity_dim = self.entity_dim * self.entity_dim
self.relation_dim = self.entity_dim
if model_name == '5Star':
self.relation_dim = self.entity_dim * 4
if model_name == 'RelConv':
self.entity_dim = self.entity_dim ** 2
self.nfilters = self.entity_dim // 9
self.relation_dim = 9 * self.nfilters
self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))
nn.init.xavier_uniform_(self.entity_embedding)
# nn.init.uniform_(
# tensor=self.entity_embedding,
# a=-self.embedding_range.item(),
# b=self.embedding_range.item()
# )
self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))
nn.init.xavier_uniform_(self.relation_embedding)
# nn.init.uniform_(
# tensor=self.relation_embedding,
# a=-self.embedding_range.item(),
# b=self.embedding_range.item()
# )
# else:
# self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.entity_dim, self.entity_dim))
# nn.init.uniform_(
# tensor=self.relation_embedding,
# a=-self.embedding_range.item(),
# b=self.embedding_range.item()
# )
# Do not forget to modify this line when you add a new model in the "forward" function
if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'ConvE',
'ConvKB', 'NormConvKB', 'SymNormConvKB', 'TransM', 'HolE', 'RelConv', 'QuatE',
'FullConvKB',
'ConvFM', 'ConEx', 'NTN', 'ConvQuatE', 'ComplExQuatE', 'OctonionE']:
raise ValueError('model %s not supported' % model_name)
if model_name == 'RotatE' and (not double_entity_embedding or double_relation_embedding):
raise ValueError('RotatE should use --double_entity_embedding')
if model_name == 'ComplEx' and (not double_entity_embedding or not double_relation_embedding):
raise ValueError('ComplEx should use --double_entity_embedding and --double_relation_embedding')
if model_name == 'ConvE':
assert self.entity_dim == self.relation_dim
self.emb_dim1 = 10
self.emb_dim2 = self.entity_dim // self.emb_dim1
self.inp_drop = nn.Dropout(0.0)
self.fm_drop = nn.Dropout2d(0.0)
self.hid_drop = nn.Dropout(0.0)
self.conv1 = nn.Conv2d(1, 16, (3, 3))
self.bn0 = nn.BatchNorm2d(1)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm1d(self.entity_dim)
self.fc = torch.nn.Linear(16 * (self.emb_dim1 - 1) * (self.emb_dim2 - 2) * 2, self.entity_dim)
if model_name == 'ConvKB':
self.nfmap = 3
self.conv1_bn = nn.BatchNorm2d(1)
self.conv_layer = nn.Conv2d(1, self.nfmap, (1, 3)) # kernel size x 3
self.conv2_bn = nn.BatchNorm2d(self.nfmap)
self.dropout = nn.Dropout(0.0)
self.non_linearity = nn.ReLU() # you should also tune with torch.tanh() or torch.nn.Tanh()
self.fc_layer = nn.Linear(self.nfmap * self.entity_dim, 1, bias=False)
if model_name == 'NormConvKB':
self.nfmap = 12
# self.conv1_bn = nn.BatchNorm2d(1)
self.conv_layer = nn.Conv2d(1, self.nfmap, (1, 3)) # kernel size x 3
# self.conv2_bn = nn.BatchNorm2d(self.nfmap)
# self.conv2_lrn = nn.LocalResponseNorm(2)
self.dropout = nn.Dropout(0.0)
self.non_linearity = nn.ReLU() # you should also tune with torch.tanh() or torch.nn.Tanh()
self.fc_layer = nn.Linear(self.nfmap * self.entity_dim, 1, bias=False)
if model_name == 'ConvFM':
self.nfmap1 = 12
self.nfmap2 = 3
self.conv_layer_1 = nn.Conv2d(3, self.nfmap1, (3, 3), padding=1) # kernel size x 3
self.conv_layer_2 = nn.Conv2d(self.nfmap1, self.nfmap2, (3, 3), padding=1) # kernel size x 3
self.dropout = nn.Dropout(0.0)
self.non_linearity = nn.ReLU() # you should also tune with torch.tanh() or torch.nn.Tanh()
self.fc_layer = nn.Linear(self.nfmap2 * self.entity_dim, 1, bias=False)
if model_name == 'FullConvKB':
self.nfmaps = [8, 16, 8]
self.conv_layer_1 = nn.Conv2d(1, self.nfmaps[0], (3, 3), padding=1)
self.conv_layer_2 = nn.Conv2d(self.nfmaps[0], self.nfmaps[1], (3, 3), padding=1)
self.conv_layer_3 = nn.Conv2d(self.nfmaps[1], self.nfmaps[2], (2, 2))
self.conv_layer_4 = nn.Conv2d(self.nfmaps[2], 1, (1, 1))
self.conv_layers = [
self.conv_layer_1,
self.conv_layer_2,
self.conv_layer_3,
self.conv_layer_4
]
self.dropout = nn.Dropout(0.0)
self.non_linearity = nn.ReLU()
if model_name == 'ConEx':
self.nfmap = 1
self.conv_layer = nn.Conv2d(1, self.nfmap, (3, 3), padding=1)
self.dropout = nn.Dropout(0.0)
self.non_linearity = nn.ReLU()
self.fc_layer = nn.Linear(self.nfmap * self.entity_dim * 2, self.entity_dim)
if model_name == 'SymNormConvKB':
self.nfmap = 8
self.conv1_bn = nn.BatchNorm2d(1)
self.conv_layer1 = nn.Conv2d(1, self.nfmap, (1, 2))
self.conv_layer2 = nn.Conv2d(self.nfmap, self.nfmap, (1, 2))
self.conv2_bn = nn.BatchNorm2d(self.nfmap)
self.dropout = nn.Dropout(0.0)
self.non_linearity = nn.ReLU() # you should also tune with torch.tanh() or torch.nn.Tanh()
self.fc_layer = nn.Linear(self.nfmap * self.entity_dim, 1, bias=False)
if model_name == 'NTN':
num_slices = 4
self.w = nn.Parameter(data=torch.empty(
nrelation,
num_slices,
self.entity_dim,
self.entity_dim), requires_grad=True)
self.vh = nn.Parameter(data=torch.empty(
nrelation,
num_slices,
self.entity_dim), requires_grad=True)
self.vt = nn.Parameter(data=torch.empty(
nrelation,
num_slices,
self.entity_dim), requires_grad=True)
self.b = nn.Parameter(data=torch.empty(
nrelation,
num_slices), requires_grad=True)
self.u = nn.Parameter(data=torch.empty(
nrelation,
num_slices), requires_grad=True)
self.non_linearity = nn.Tanh()
if model_name == 'ConvQuatE':
self.conv_layer_h = nn.Conv2d(1, 4, (3, 3), padding=1)
self.conv_layer_r = nn.Conv2d(1, 4, (3, 3), padding=1)
self.conv_layer_t = nn.Conv2d(1, 4, (3, 3), padding=1)
self.evaluator = evaluator
def forward(self, sample, mode='single'):
'''
Forward function that calculate the score of a batch of triples.
In the 'single' mode, sample is a batch of triple.
In the 'head-batch' or 'tail-batch' mode, sample consists two part.
The first part is usually the positive sample.
And the second part is the entities in the negative samples.
Because negative samples and positive samples usually share two elements
in their triple ((head, relation) or (relation, tail)).
'''
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
self.head_idx = sample[:, 0]
head = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 0]
).unsqueeze(1)
self.relation_idx = sample[:, 1]
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=sample[:, 1]
).unsqueeze(1)
self.tail_idx = sample[:, 2]
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
self.head_idx = head_part.view(-1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
self.relation_idx = tail_part[:, 1]
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
self.tail_idx = tail_part[:, 2]
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
self.head_idx = head_part[:, 0]
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
self.relation_idx = head_part[:, 1]
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
self.tail_idx = tail_part.view(-1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
else:
raise ValueError('mode %s not supported' % mode)
model_func = {
'TransE': self.TransE,
'DistMult': self.DistMult,
'ComplEx': self.ComplEx,
'RotatE': self.RotatE,
'ConvE': self.ConvE,
'ConvKB': self.ConvKB,
'NormConvKB': self.NormConvKB,
'SymNormConvKB': self.SymNormConvKB,
'TransM': self.TransM,
'HolE': self.HolE,
'RelConv': self.RelConv,
'QuatE': self.QuatE,
'FullConvKB': self.FullConvKB,
'ConvFM': self.ConvFM,
'ConEx': self.ConEx,
'NTN': self.NTN,
'ConvQuatE': self.ConvQuatE,
'ComplExQuatE': self.ComplExQuatE,
'OctonionE': self.OctonionE
}
if self.model_name in model_func:
score = model_func[self.model_name](head, relation, tail, mode)
else:
raise ValueError('model %s not supported' % self.model_name)
return score
def TransE(self, head, relation, tail, mode):
# print(head.shape, relation.shape, tail.shape, mode)
if mode == 'head-batch':
score = head + (relation - tail)
else:
score = (head + relation) - tail
score = self.gamma.item() - torch.norm(score, p=1, dim=2)
return score
def DistMult(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim=2)
return score
def ComplEx(self, head, relation, tail, mode):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(relation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
score = re_head * re_score + im_head * im_score
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = re_score * re_tail + im_score * im_tail
score = score.sum(dim=2)
return score
def RotatE(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
def ConvE(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, self.emb_dim1, self.emb_dim2)
# tail = tail.view(-1, 1, self.emb_dim1, self.emb_dim2)
relation = relation.view(-1, 1, self.emb_dim1, self.emb_dim2)
x = torch.cat([head, relation], 2)
# x = self.bn0(x)
x = self.inp_drop(x)
x = self.conv1(x)
# x = self.bn1(x)
x = F.relu(x)
x = self.fm_drop(x)
# x = x.mean(1)
x = x.view(x.shape[0], -1)
x = self.fc(x)
x = self.hid_drop(x)
# x = self.bn2(x)
x = F.relu(x)
x = x.view(head_shape[0], num_neg, -1)
x = (x * tail).sum(2)
score = torch.sigmoid(x)
# score = x - tail
# score = self.gamma.item() - torch.norm(score, p=1, dim=2)
# x = torch.mean(x, 1)
# x = x.view(head_shape[0], num_neg, -1)
# score = torch.mean(x, 2)
# x = self.fc(x)
# score = x.view(-1, num_neg)
return score
def ConvKB(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, self.entity_dim, 1)
tail = tail.view(-1, 1, self.entity_dim, 1)
relation = relation.view(-1, 1, self.entity_dim, 1)
x = torch.cat([head, relation, tail], 3)
# conv_input = self.conv1_bn(x)
out_conv = self.conv_layer(x)
# out_conv = self.conv2_bn(out_conv)
out_conv = self.non_linearity(out_conv)
out_conv = out_conv.view(-1, self.nfmap * self.entity_dim)
input_fc = self.dropout(out_conv)
score = self.fc_layer(input_fc)
# x = self.bn0(x)
# x = self.conv1(x)
# # x = F.relu(x)
# x = x.view(-1, self.nfmap * self.entity_dim)
# # score = x.sum(1)
# score = torch.mm(x, self.w)
score = score.view(head_shape[0], num_neg)
# score = self.gamma.item() - torch.norm(score, p=1, dim=2)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for W in self.conv_layer.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.fc_layer.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def NormConvKB(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
head = F.normalize(head, 2, -1)
tail = F.normalize(tail, 2, -1)
relation = F.normalize(relation, 2, -1)
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, self.entity_dim, 1)
tail = tail.view(-1, 1, self.entity_dim, 1)
relation = relation.view(-1, 1, self.entity_dim, 1)
x = torch.cat([head, relation, tail], 3)
# conv_input = self.conv1_bn(x)
out_conv = self.conv_layer(x)
# out_conv = self.conv2_lrn(out_conv)
out_conv = self.non_linearity(out_conv)
out_conv = out_conv.view(-1, self.nfmap * self.entity_dim)
input_fc = self.dropout(out_conv)
score = self.fc_layer(input_fc)
score = score.view(head_shape[0], num_neg)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for W in self.conv_layer.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.fc_layer.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def ConEx(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(relation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
re_head = F.normalize(re_head, 2, -1)
im_head = F.normalize(im_head, 2, -1)
re_relation = F.normalize(im_head, 2, -1)
im_relation = F.normalize(im_relation, 2, -1)
re_tail = F.normalize(re_tail, 2, -1)
im_tail = F.normalize(im_tail, 2, -1)
# reshape and stack the head and relation embeddings
re_head = re_head.view(-1, 1, self.entity_dim // 2, 1)
im_head = im_head.view(-1, 1, self.entity_dim // 2, 1)
re_tail = re_tail.view(-1, self.entity_dim // 2)
im_tail = im_tail.view(-1, self.entity_dim // 2)
re_relation = re_relation.view(-1, 1, self.entity_dim // 2, 1)
im_relation = im_relation.view(-1, 1, self.entity_dim // 2, 1)
x = torch.cat([re_head, im_head, re_relation, im_relation], 3)
out_conv = self.conv_layer(x)
out_conv = self.non_linearity(out_conv)
out_conv = out_conv.view(-1, self.nfmap * self.entity_dim * 2)
input_fc = self.dropout(out_conv)
head_relation = self.fc_layer(input_fc)
re_head_relation, im_head_relation = torch.chunk(head_relation, 2, dim=1)
# re_head_relation = out_conv[:, :, :, 0].view(-1, self.entity_dim // 2)
# im_head_relation = out_conv[:, :, :, 1].view(-1, self.entity_dim // 2)
re_head = re_head.view(-1, self.entity_dim // 2)
im_head = im_head.view(-1, self.entity_dim // 2)
re_relation = re_relation.view(-1, self.entity_dim // 2)
im_relation = im_relation.view(-1, self.entity_dim // 2)
real_real_real = (re_head_relation * re_head * re_relation * re_tail).sum(dim=1)
real_imag_imag = (re_head_relation * re_head * im_relation * im_tail).sum(dim=1)
imag_real_imag = (im_head_relation * im_head * re_relation * im_tail).sum(dim=1)
imag_imag_real = (im_head_relation * im_head * im_relation * re_tail).sum(dim=1)
score = real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
score = score.view(head_shape[0], num_neg)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for W in self.conv_layer.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.fc_layer.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def ConvFM(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
matrix_dim = int(np.sqrt(self.entity_dim))
head = F.normalize(head, 2, -1)
tail = F.normalize(tail, 2, -1)
relation = F.normalize(relation, 2, -1)
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, matrix_dim, matrix_dim)
tail = tail.view(-1, 1, matrix_dim, matrix_dim)
relation = relation.view(-1, 1, matrix_dim, matrix_dim)
x = torch.cat([head, relation, tail], 1)
out_conv = self.conv_layer_1(x)
out_conv = self.non_linearity(out_conv)
out_conv = self.conv_layer_2(out_conv)
out_conv = self.non_linearity(out_conv)
out_conv = out_conv.view(-1, self.nfmap2 * self.entity_dim)
input_fc = self.dropout(out_conv)
score = self.fc_layer(input_fc)
score = score.view(head_shape[0], num_neg)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for W in self.conv_layer_1.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.conv_layer_2.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.fc_layer.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def SymNormConvKB(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, self.entity_dim, 1)
tail = tail.view(-1, 1, self.entity_dim, 1)
relation = relation.view(-1, 1, self.entity_dim, 1)
x = torch.cat([head, relation, tail], 3)
# conv_input = self.conv1_bn(x)
out_conv = self.conv_layer1(x)
# out_conv = self.conv2_bn(out_conv)
out_conv = self.non_linearity(out_conv)
out_conv = self.conv_layer2(out_conv)
out_conv = self.non_linearity(out_conv)
out_conv = out_conv.view(-1, self.nfmap * self.entity_dim)
input_fc = self.dropout(out_conv)
score = self.fc_layer(input_fc)
score = score.view(head_shape[0], num_neg)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for W in self.conv_layer1.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.conv_layer2.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.fc_layer.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def TransM(self, head, relation, tail, mode):
matrix_dim = self.entity_dim
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
relation = relation.view(-1, matrix_dim, matrix_dim)
transform = torch.matmul(head, relation).view(-1, head_shape[1], matrix_dim)
score = torch.matmul(transform, tail.view(-1, matrix_dim, tail_shape[1])).view(head_shape[0], -1)
return score
def HolE(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
# circular correlation
head_fft = torch.fft.rfft(head)
tail_fft = torch.fft.rfft(tail)
head_fft = torch.conj(head_fft)
# Hadamard product in frequency domain
p_fft = head_fft * tail_fft
# inverse real FFT, shape: (batch_size, num_entities, d)
composite = torch.fft.irfft(p_fft, dim=-1, n=head_shape[-1])
# inner product with relation embedding
score = torch.sum(relation * composite, dim=-1, keepdim=False)
return score
def RelConv(self, head, relation, tail, mode):
matrix_dim = int(np.sqrt(self.entity_dim))
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
relation = relation.view(-1, self.nfilters, 1, 3, 3)
head = head.view(-1, head_shape[1], 1, matrix_dim, matrix_dim)
tail = tail.view(-1, tail_shape[1], matrix_dim, matrix_dim)
# convolve
score = torch.zeros(head_shape[0], max(head_shape[1], tail_shape[1]))
for s in range(head_shape[0]):
conv_out = F.conv2d(head[s], relation[s], padding=1)
conv_out = conv_out.mean(1)
score[s] = (conv_out - tail[s]).sum(-1).sum(-1)
return score
def QuatE(self, head, relation, tail, mode):
# head = F.normalize(head, 2, -1)
# tail = F.normalize(tail, 2, -1)
# relation = F.normalize(relation, 2, -1)
# if required, repeat embeddings acc to mode
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
h_0, h_1, h_2, h_3 = torch.chunk(head, 4, dim=2)
re_0, re_1, re_2, re_3 = torch.chunk(relation, 4, dim=2)
t_0, t_1, t_2, t_3 = torch.chunk(tail, 4, dim=2)
# normalize relations
den = torch.sqrt(re_0 ** 2 + re_1 ** 2 + re_2 ** 2 + re_3 ** 2)
re_0, re_1, re_2, re_3 = re_0 / den, re_1 / den, re_2 / den, re_3 / den
# Hamiltonian product
A = h_0 * re_0 - h_1 * re_1 - h_2 * re_2 - h_3 * re_3
B = h_0 * re_1 + re_0 * h_1 + h_2 * re_3 - re_2 * h_3
C = h_0 * re_2 + re_0 * h_2 + h_3 * re_1 - re_3 * h_1
D = h_0 * re_3 + re_0 * h_3 + h_1 * re_2 - re_1 * h_2
score = A * t_0 + B * t_1 + C * t_2 + D * t_3
score = -score.sum(dim=2)
return score
def FullConvKB(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
head = F.normalize(head, 2, -1)
tail = F.normalize(tail, 2, -1)
relation = F.normalize(relation, 2, -1)
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, self.entity_dim, 1)
tail = tail.view(-1, 1, self.entity_dim, 1)
relation = relation.view(-1, 1, self.entity_dim, 1)
out_conv = torch.cat([head, relation, tail], 3)
for conv_layer in self.conv_layers:
out_conv = conv_layer(out_conv)
out_conv = self.non_linearity(out_conv)
out_conv = F.max_pool2d(out_conv, (3, 1))
score = torch.mean(out_conv.view(out_conv.shape[0], -1), 1)
score = score.view(head_shape[0], num_neg)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for conv_layer in self.conv_layers:
for W in conv_layer.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def NTN(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
#: Prepare h: (b, e, d) -> (b, e, 1, 1, d)
h_for_w = head.unsqueeze(dim=-2).unsqueeze(dim=-2)
#: Prepare t: (b, e, d) -> (b, e, 1, d, 1)
t_for_w = tail.unsqueeze(dim=-2).unsqueeze(dim=-1)
#: Prepare w: (R, k, d, d) -> (b, k, d, d) -> (b, 1, k, d, d)
w_r = self.w.index_select(dim=0, index=self.relation_idx).unsqueeze(dim=1)
# h.T @ W @ t, shape: (b, e, k, 1, 1)
hwt = (h_for_w @ w_r @ t_for_w)
#: reduce (b, e, k, 1, 1) -> (b, e, k)
hwt = hwt.squeeze(dim=-1).squeeze(dim=-1)
#: Prepare vh: (R, k, d) -> (b, k, d) -> (b, 1, k, d)
vh_r = self.vh.index_select(dim=0, index=self.relation_idx).unsqueeze(dim=1)
#: Prepare h: (b, e, d) -> (b, e, d, 1)
h_for_v = head.unsqueeze(dim=-1)
# V_h @ h, shape: (b, e, k, 1)
vhh = vh_r @ h_for_v
#: reduce (b, e, k, 1) -> (b, e, k)
vhh = vhh.squeeze(dim=-1)
#: Prepare vt: (R, k, d) -> (b, k, d) -> (b, 1, k, d)
vt_r = self.vt.index_select(dim=0, index=self.relation_idx).unsqueeze(dim=1)
#: Prepare t: (b, e, d) -> (b, e, d, 1)
t_for_v = tail.unsqueeze(dim=-1)
# V_t @ t, shape: (b, e, k, 1)
vtt = vt_r @ t_for_v
#: reduce (b, e, k, 1) -> (b, e, k)
vtt = vtt.squeeze(dim=-1)
#: Prepare b: (R, k) -> (b, k) -> (b, 1, k)
b = self.b.index_select(dim=0, index=self.relation_idx).unsqueeze(dim=1)
# a = f(h.T @ W @ t + Vh @ h + Vt @ t + b), shape: (b, e, k)
pre_act = hwt + vhh + vtt + b
act = self.non_linearity(pre_act)
# prepare u: (R, k) -> (b, k) -> (b, 1, k, 1)
u = self.u.index_select(dim=0, index=self.relation_idx).unsqueeze(dim=1).unsqueeze(dim=-1)
# prepare act: (b, e, k) -> (b, e, 1, k)
act = act.unsqueeze(dim=-2)
# compute score, shape: (b, e, 1, 1)
score = act @ u
# reduce
score = score.squeeze(dim=-1).squeeze(dim=-1)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
# for W in self.w.parameters():
# l2_reg = l2_reg + W.norm(2)
# for W in self.vh.parameters():
# l2_reg = l2_reg + W.norm(2)
# for W in self.vt.parameters():
# l2_reg = l2_reg + W.norm(2)
# for W in self.u.parameters():
# l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def ConvQuatE(self, head, relation, tail, mode):
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
matrix_dim = int(np.sqrt(self.entity_dim))
head = F.normalize(head, 2, -1)
tail = F.normalize(tail, 2, -1)
relation = F.normalize(relation, 2, -1)
# if required, repeat embeddings acc to mode
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
# reshape and stack the triplet embeddings
head = head.view(-1, 1, matrix_dim, matrix_dim)
tail = tail.view(-1, 1, matrix_dim, matrix_dim)
relation = relation.view(-1, 1, matrix_dim, matrix_dim)
out_conv_h = self.conv_layer_h(head).view(head_shape[0], num_neg, 4, matrix_dim * matrix_dim)
out_conv_r = self.conv_layer_r(relation).view(head_shape[0], num_neg, 4, matrix_dim * matrix_dim)
out_conv_t = self.conv_layer_t(tail).view(head_shape[0], num_neg, 4, matrix_dim * matrix_dim)
h_0, h_1, h_2, h_3 = out_conv_h[:, :, 0], out_conv_h[:, :, 1], out_conv_h[:, :, 2], out_conv_h[:, :, 3]
re_0, re_1, re_2, re_3 = out_conv_r[:, :, 0], out_conv_r[:, :, 1], out_conv_r[:, :, 2], out_conv_r[:, :, 3]
t_0, t_1, t_2, t_3 = out_conv_t[:, :, 0], out_conv_t[:, :, 1], out_conv_t[:, :, 2], out_conv_t[:, :, 3]
# normalize relations
den = torch.sqrt(re_0 ** 2 + re_1 ** 2 + re_2 ** 2 + re_3 ** 2)
re_0, re_1, re_2, re_3 = re_0 / den, re_1 / den, re_2 / den, re_3 / den
# Hamiltonian product
A = h_0 * re_0 - h_1 * re_1 - h_2 * re_2 - h_3 * re_3
B = h_0 * re_1 + re_0 * h_1 + h_2 * re_3 - re_2 * h_3
C = h_0 * re_2 + re_0 * h_2 + h_3 * re_1 - re_3 * h_1
D = h_0 * re_3 + re_0 * h_3 + h_1 * re_2 - re_1 * h_2
score = A * t_0 + B * t_1 + C * t_2 + D * t_3
score = -score.sum(dim=2)
# regularization
l2_reg = torch.mean(head ** 2) + torch.mean(tail ** 2) + torch.mean(relation ** 2)
for W in self.conv_layer_h.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.conv_layer_t.parameters():
l2_reg = l2_reg + W.norm(2)
for W in self.conv_layer_r.parameters():
l2_reg = l2_reg + W.norm(2)
self.l2_reg = l2_reg
return score
def ComplExQuatE(self, head, relation, tail, mode):
complex_score = self.ComplEx(head, relation, tail, mode)
quate_score = self.QuatE(head, relation, tail, mode)
return torch.mean(torch.stack([complex_score, quate_score]), dim=0)
def OctonionE(self, head, relation, tail, mode):
"""https://github.com/Sujit-O/pykg2vec/blob/492807b627574f95b0db9e7cb9f090c3c45a030a/pykg2vec/models/pointwise.py#L772"""
# if required, repeat embeddings acc to mode
head_shape, tail_shape, relation_shape = head.size(), tail.size(), relation.size()
num_neg = 1
if mode == 'head-batch':
num_neg = head_shape[1]
tail = tail.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
elif mode == 'tail-batch':
num_neg = tail_shape[1]
head = head.repeat(1, num_neg, 1)
relation = relation.repeat(1, num_neg, 1)
e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h = torch.chunk(head, 8, dim=2)
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = torch.chunk(relation, 8, dim=2)
e_1_t, e_2_t, e_3_t, e_4_t, e_5_t, e_6_t, e_7_t, e_8_t = torch.chunk(tail, 8, dim=2)
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = self._onorm(r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8)
o_1, o_2, o_3, o_4, o_5, o_6, o_7, o_8 = self._omult(e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h,
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8)
score_r = (o_1 * e_1_t + o_2 * e_2_t + o_3 * e_3_t + o_4 * e_4_t
+ o_5 * e_5_t + o_6 * e_6_t + o_7 * e_7_t + o_8 * e_8_t)
return -torch.sum(score_r, -1)
def _qmult(self, s_a, x_a, y_a, z_a, s_b, x_b, y_b, z_b):
a = s_a * s_b - x_a * x_b - y_a * y_b - z_a * z_b
b = s_a * x_b + s_b * x_a + y_a * z_b - y_b * z_a
c = s_a * y_b + s_b * y_a + z_a * x_b - z_b * x_a
d = s_a * z_b + s_b * z_a + x_a * y_b - x_b * y_a
return a, b, c, d
def _qstar(self, a, b, c, d):
return a, -b, -c, -d
def _omult(self, a_1, a_2, a_3, a_4, b_1, b_2, b_3, b_4, c_1, c_2, c_3, c_4, d_1, d_2, d_3, d_4):
d_1_star, d_2_star, d_3_star, d_4_star = self._qstar(d_1, d_2, d_3, d_4)
c_1_star, c_2_star, c_3_star, c_4_star = self._qstar(c_1, c_2, c_3, c_4)
o_1, o_2, o_3, o_4 = self._qmult(a_1, a_2, a_3, a_4, c_1, c_2, c_3, c_4)
o_1s, o_2s, o_3s, o_4s = self._qmult(d_1_star, d_2_star, d_3_star, d_4_star, b_1, b_2, b_3, b_4)
o_5, o_6, o_7, o_8 = self._qmult(d_1, d_2, d_3, d_4, a_1, a_2, a_3, a_4)
o_5s, o_6s, o_7s, o_8s = self._qmult(b_1, b_2, b_3, b_4, c_1_star, c_2_star, c_3_star, c_4_star)
return o_1 - o_1s, o_2 - o_2s, o_3 - o_3s, o_4 - o_4s, \
o_5 + o_5s, o_6 + o_6s, o_7 + o_7s, o_8 + o_8s
def _onorm(self, r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8):
denominator = torch.sqrt(r_1 ** 2 + r_2 ** 2 + r_3 ** 2 + r_4 ** 2
+ r_5 ** 2 + r_6 ** 2 + r_7 ** 2 + r_8 ** 2)
r_1 = r_1 / denominator
r_2 = r_2 / denominator
r_3 = r_3 / denominator
r_4 = r_4 / denominator
r_5 = r_5 / denominator
r_6 = r_6 / denominator
r_7 = r_7 / denominator
r_8 = r_8 / denominator
return r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8
@staticmethod
def train_step(model, optimizer, train_iterator, args, accumulate=False):
'''
A single train step. Apply back-propagation and return the loss
'''
model.train()
positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)
if args.cuda:
positive_sample = positive_sample.cuda()
negative_sample = negative_sample.cuda()
subsampling_weight = subsampling_weight.cuda()
l2_reg_models = ['ConvKB', 'NormConvKB', 'SymNormConvKB', 'NTN', 'ConvQuatE']
negative_score = model((positive_sample, negative_sample), mode=mode)
if args.model in l2_reg_models:
neg_regularization = model.l2_reg
if args.negative_adversarial_sampling:
# In self-adversarial sampling, we do not apply back-propagation on the sampling weight
negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim=1).detach()
* F.logsigmoid(-negative_score)).sum(dim=1)
else:
negative_score = F.logsigmoid(-negative_score).mean(dim=1)
positive_score = model(positive_sample)
if args.model in l2_reg_models:
pos_regularization = model.l2_reg
positive_score = F.logsigmoid(positive_score).squeeze(dim=1)
if args.uni_weight:
positive_sample_loss = - positive_score.mean()
negative_sample_loss = - negative_score.mean()
else:
positive_sample_loss = - (subsampling_weight * positive_score).sum() / subsampling_weight.sum()
negative_sample_loss = - (subsampling_weight * negative_score).sum() / subsampling_weight.sum()
loss = (positive_sample_loss + negative_sample_loss) / 2
if args.model in l2_reg_models:
regularization = (pos_regularization + neg_regularization) / 2
if args.regularization != 0.0:
if args.model in l2_reg_models:
# Use L2 regularization for ConvKB
regularization = args.regularization * regularization
else:
# Use L3 regularization for ComplEx and DistMult
regularization = args.regularization * (
model.entity_embedding.norm(p=3) ** 3 +
model.relation_embedding.norm(p=3).norm(p=3) ** 3
)
loss = loss + regularization
regularization_log = {'regularization': regularization.item()}
else:
regularization_log = {}
loss.backward()
if not accumulate:
optimizer.step()
optimizer.zero_grad()
log = {
**regularization_log,
'positive_sample_loss': positive_sample_loss.item(),
'negative_sample_loss': negative_sample_loss.item(),
'loss': loss.item()
}
return log
@staticmethod
def test_step(model, test_triples, args, entity_dict, random_sampling=False):
'''
Evaluate the model on test or valid datasets
'''
model.eval()
# Prepare dataloader for evaluation
test_dataloader_head = DataLoader(
TestDataset(
test_triples,
args,
'head-batch',
random_sampling,
entity_dict
),
batch_size=args.test_batch_size,
num_workers=max(1, args.cpu_num // 2),
collate_fn=TestDataset.collate_fn
)
test_dataloader_tail = DataLoader(
TestDataset(
test_triples,
args,
'tail-batch',
random_sampling,
entity_dict
),
batch_size=args.test_batch_size,
num_workers=max(1, args.cpu_num // 2),
collate_fn=TestDataset.collate_fn
)
test_dataset_list = [test_dataloader_head, test_dataloader_tail]
test_logs = defaultdict(list)
step = 0
total_steps = sum([len(dataset) for dataset in test_dataset_list])
with torch.no_grad():
for test_dataset in test_dataset_list:
for positive_sample, negative_sample, mode in test_dataset:
if args.cuda:
positive_sample = positive_sample.cuda()
negative_sample = negative_sample.cuda()
batch_size = positive_sample.size(0)
score = model((positive_sample, negative_sample), mode)
batch_results = model.evaluator.eval({'y_pred_pos': score[:, 0],
'y_pred_neg': score[:, 1:]})
for metric in batch_results:
test_logs[metric].append(batch_results[metric])
if step % args.test_log_steps == 0:
logging.info('Evaluating the model... (%d/%d)' % (step, total_steps))
step += 1
metrics = {}
for metric in test_logs:
metrics[metric] = torch.cat(test_logs[metric]).mean().item()
return metrics
| [
"torch.nn.Dropout",
"torch.sqrt",
"torch.empty",
"torch.cat",
"collections.defaultdict",
"torch.cos",
"torch.fft.irfft",
"torch.nn.functional.normalize",
"torch.no_grad",
"torch.Tensor",
"torch.nn.functional.relu",
"torch.zeros",
"torch.nn.Linear",
"torch.nn.functional.max_pool2d",
"torc... | [((1923, 1969), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.entity_embedding'], {}), '(self.entity_embedding)\n', (1946, 1969), True, 'import torch.nn as nn\n'), ((2243, 2291), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.relation_embedding'], {}), '(self.relation_embedding)\n', (2266, 2291), True, 'import torch.nn as nn\n'), ((13169, 13196), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(2)'}), '(head, 2, dim=2)\n', (13180, 13196), False, 'import torch\n'), ((13232, 13263), 'torch.chunk', 'torch.chunk', (['relation', '(2)'], {'dim': '(2)'}), '(relation, 2, dim=2)\n', (13243, 13263), False, 'import torch\n'), ((13291, 13318), 'torch.chunk', 'torch.chunk', (['tail', '(2)'], {'dim': '(2)'}), '(tail, 2, dim=2)\n', (13302, 13318), False, 'import torch\n'), ((13933, 13960), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(2)'}), '(head, 2, dim=2)\n', (13944, 13960), False, 'import torch\n'), ((13988, 14015), 'torch.chunk', 'torch.chunk', (['tail', '(2)'], {'dim': '(2)'}), '(tail, 2, dim=2)\n', (13999, 14015), False, 'import torch\n'), ((14182, 14207), 'torch.cos', 'torch.cos', (['phase_relation'], {}), '(phase_relation)\n', (14191, 14207), False, 'import torch\n'), ((14230, 14255), 'torch.sin', 'torch.sin', (['phase_relation'], {}), '(phase_relation)\n', (14239, 14255), False, 'import torch\n'), ((14765, 14805), 'torch.stack', 'torch.stack', (['[re_score, im_score]'], {'dim': '(0)'}), '([re_score, im_score], dim=0)\n', (14776, 14805), False, 'import torch\n'), ((15731, 15761), 'torch.cat', 'torch.cat', (['[head, relation]', '(2)'], {}), '([head, relation], 2)\n', (15740, 15761), False, 'import torch\n'), ((15881, 15890), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (15887, 15890), True, 'import torch.nn.functional as F\n'), ((16068, 16077), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (16074, 16077), True, 'import torch.nn.functional as F\n'), ((16171, 16187), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (16184, 16187), False, 'import torch\n'), ((17267, 17303), 'torch.cat', 'torch.cat', (['[head, relation, tail]', '(3)'], {}), '([head, relation, tail], 3)\n', (17276, 17303), False, 'import torch\n'), ((18450, 18474), 'torch.nn.functional.normalize', 'F.normalize', (['head', '(2)', '(-1)'], {}), '(head, 2, -1)\n', (18461, 18474), True, 'import torch.nn.functional as F\n'), ((18490, 18514), 'torch.nn.functional.normalize', 'F.normalize', (['tail', '(2)', '(-1)'], {}), '(tail, 2, -1)\n', (18501, 18514), True, 'import torch.nn.functional as F\n'), ((18534, 18562), 'torch.nn.functional.normalize', 'F.normalize', (['relation', '(2)', '(-1)'], {}), '(relation, 2, -1)\n', (18545, 18562), True, 'import torch.nn.functional as F\n'), ((19206, 19242), 'torch.cat', 'torch.cat', (['[head, relation, tail]', '(3)'], {}), '([head, relation, tail], 3)\n', (19215, 19242), False, 'import torch\n'), ((20541, 20568), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(2)'}), '(head, 2, dim=2)\n', (20552, 20568), False, 'import torch\n'), ((20604, 20635), 'torch.chunk', 'torch.chunk', (['relation', '(2)'], {'dim': '(2)'}), '(relation, 2, dim=2)\n', (20615, 20635), False, 'import torch\n'), ((20663, 20690), 'torch.chunk', 'torch.chunk', (['tail', '(2)'], {'dim': '(2)'}), '(tail, 2, dim=2)\n', (20674, 20690), False, 'import torch\n'), ((20710, 20737), 'torch.nn.functional.normalize', 'F.normalize', (['re_head', '(2)', '(-1)'], {}), '(re_head, 2, -1)\n', (20721, 20737), True, 'import torch.nn.functional as F\n'), ((20756, 20783), 'torch.nn.functional.normalize', 'F.normalize', (['im_head', '(2)', '(-1)'], {}), '(im_head, 2, -1)\n', (20767, 20783), True, 'import torch.nn.functional as F\n'), ((20806, 20833), 'torch.nn.functional.normalize', 'F.normalize', (['im_head', '(2)', '(-1)'], {}), '(im_head, 2, -1)\n', (20817, 20833), True, 'import torch.nn.functional as F\n'), ((20856, 20887), 'torch.nn.functional.normalize', 'F.normalize', (['im_relation', '(2)', '(-1)'], {}), '(im_relation, 2, -1)\n', (20867, 20887), True, 'import torch.nn.functional as F\n'), ((20906, 20933), 'torch.nn.functional.normalize', 'F.normalize', (['re_tail', '(2)', '(-1)'], {}), '(re_tail, 2, -1)\n', (20917, 20933), True, 'import torch.nn.functional as F\n'), ((20952, 20979), 'torch.nn.functional.normalize', 'F.normalize', (['im_tail', '(2)', '(-1)'], {}), '(im_tail, 2, -1)\n', (20963, 20979), True, 'import torch.nn.functional as F\n'), ((21437, 21495), 'torch.cat', 'torch.cat', (['[re_head, im_head, re_relation, im_relation]', '(3)'], {}), '([re_head, im_head, re_relation, im_relation], 3)\n', (21446, 21495), False, 'import torch\n'), ((21789, 21825), 'torch.chunk', 'torch.chunk', (['head_relation', '(2)'], {'dim': '(1)'}), '(head_relation, 2, dim=1)\n', (21800, 21825), False, 'import torch\n'), ((23274, 23298), 'torch.nn.functional.normalize', 'F.normalize', (['head', '(2)', '(-1)'], {}), '(head, 2, -1)\n', (23285, 23298), True, 'import torch.nn.functional as F\n'), ((23314, 23338), 'torch.nn.functional.normalize', 'F.normalize', (['tail', '(2)', '(-1)'], {}), '(tail, 2, -1)\n', (23325, 23338), True, 'import torch.nn.functional as F\n'), ((23358, 23386), 'torch.nn.functional.normalize', 'F.normalize', (['relation', '(2)', '(-1)'], {}), '(relation, 2, -1)\n', (23369, 23386), True, 'import torch.nn.functional as F\n'), ((24042, 24078), 'torch.cat', 'torch.cat', (['[head, relation, tail]', '(1)'], {}), '([head, relation, tail], 1)\n', (24051, 24078), False, 'import torch\n'), ((25689, 25725), 'torch.cat', 'torch.cat', (['[head, relation, tail]', '(3)'], {}), '([head, relation, tail], 3)\n', (25698, 25725), False, 'import torch\n'), ((27271, 27291), 'torch.fft.rfft', 'torch.fft.rfft', (['head'], {}), '(head)\n', (27285, 27291), False, 'import torch\n'), ((27311, 27331), 'torch.fft.rfft', 'torch.fft.rfft', (['tail'], {}), '(tail)\n', (27325, 27331), False, 'import torch\n'), ((27352, 27372), 'torch.conj', 'torch.conj', (['head_fft'], {}), '(head_fft)\n', (27362, 27372), False, 'import torch\n'), ((27543, 27591), 'torch.fft.irfft', 'torch.fft.irfft', (['p_fft'], {'dim': '(-1)', 'n': 'head_shape[-1]'}), '(p_fft, dim=-1, n=head_shape[-1])\n', (27558, 27591), False, 'import torch\n'), ((27657, 27711), 'torch.sum', 'torch.sum', (['(relation * composite)'], {'dim': '(-1)', 'keepdim': '(False)'}), '(relation * composite, dim=-1, keepdim=False)\n', (27666, 27711), False, 'import torch\n'), ((29172, 29199), 'torch.chunk', 'torch.chunk', (['head', '(4)'], {'dim': '(2)'}), '(head, 4, dim=2)\n', (29183, 29199), False, 'import torch\n'), ((29233, 29264), 'torch.chunk', 'torch.chunk', (['relation', '(4)'], {'dim': '(2)'}), '(relation, 4, dim=2)\n', (29244, 29264), False, 'import torch\n'), ((29294, 29321), 'torch.chunk', 'torch.chunk', (['tail', '(4)'], {'dim': '(2)'}), '(tail, 4, dim=2)\n', (29305, 29321), False, 'import torch\n'), ((29367, 29424), 'torch.sqrt', 'torch.sqrt', (['(re_0 ** 2 + re_1 ** 2 + re_2 ** 2 + re_3 ** 2)'], {}), '(re_0 ** 2 + re_1 ** 2 + re_2 ** 2 + re_3 ** 2)\n', (29377, 29424), False, 'import torch\n'), ((30057, 30081), 'torch.nn.functional.normalize', 'F.normalize', (['head', '(2)', '(-1)'], {}), '(head, 2, -1)\n', (30068, 30081), True, 'import torch.nn.functional as F\n'), ((30097, 30121), 'torch.nn.functional.normalize', 'F.normalize', (['tail', '(2)', '(-1)'], {}), '(tail, 2, -1)\n', (30108, 30121), True, 'import torch.nn.functional as F\n'), ((30141, 30169), 'torch.nn.functional.normalize', 'F.normalize', (['relation', '(2)', '(-1)'], {}), '(relation, 2, -1)\n', (30152, 30169), True, 'import torch.nn.functional as F\n'), ((30820, 30856), 'torch.cat', 'torch.cat', (['[head, relation, tail]', '(3)'], {}), '([head, relation, tail], 3)\n', (30829, 30856), False, 'import torch\n'), ((34630, 34654), 'torch.nn.functional.normalize', 'F.normalize', (['head', '(2)', '(-1)'], {}), '(head, 2, -1)\n', (34641, 34654), True, 'import torch.nn.functional as F\n'), ((34670, 34694), 'torch.nn.functional.normalize', 'F.normalize', (['tail', '(2)', '(-1)'], {}), '(tail, 2, -1)\n', (34681, 34694), True, 'import torch.nn.functional as F\n'), ((34714, 34742), 'torch.nn.functional.normalize', 'F.normalize', (['relation', '(2)', '(-1)'], {}), '(relation, 2, -1)\n', (34725, 34742), True, 'import torch.nn.functional as F\n'), ((36082, 36139), 'torch.sqrt', 'torch.sqrt', (['(re_0 ** 2 + re_1 ** 2 + re_2 ** 2 + re_3 ** 2)'], {}), '(re_0 ** 2 + re_1 ** 2 + re_2 ** 2 + re_3 ** 2)\n', (36092, 36139), False, 'import torch\n'), ((38041, 38068), 'torch.chunk', 'torch.chunk', (['head', '(8)'], {'dim': '(2)'}), '(head, 8, dim=2)\n', (38052, 38068), False, 'import torch\n'), ((38118, 38149), 'torch.chunk', 'torch.chunk', (['relation', '(8)'], {'dim': '(2)'}), '(relation, 8, dim=2)\n', (38129, 38149), False, 'import torch\n'), ((38215, 38242), 'torch.chunk', 'torch.chunk', (['tail', '(8)'], {'dim': '(2)'}), '(tail, 8, dim=2)\n', (38226, 38242), False, 'import torch\n'), ((39992, 40093), 'torch.sqrt', 'torch.sqrt', (['(r_1 ** 2 + r_2 ** 2 + r_3 ** 2 + r_4 ** 2 + r_5 ** 2 + r_6 ** 2 + r_7 ** 2 +\n r_8 ** 2)'], {}), '(r_1 ** 2 + r_2 ** 2 + r_3 ** 2 + r_4 ** 2 + r_5 ** 2 + r_6 ** 2 +\n r_7 ** 2 + r_8 ** 2)\n', (40002, 40093), False, 'import torch\n'), ((44473, 44490), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (44484, 44490), False, 'from collections import defaultdict\n'), ((837, 858), 'torch.Tensor', 'torch.Tensor', (['[gamma]'], {}), '([gamma])\n', (849, 858), False, 'import torch\n'), ((1876, 1913), 'torch.zeros', 'torch.zeros', (['nentity', 'self.entity_dim'], {}), '(nentity, self.entity_dim)\n', (1887, 1913), False, 'import torch\n'), ((2192, 2233), 'torch.zeros', 'torch.zeros', (['nrelation', 'self.relation_dim'], {}), '(nrelation, self.relation_dim)\n', (2203, 2233), False, 'import torch\n'), ((3890, 3905), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (3900, 3905), True, 'import torch.nn as nn\n'), ((3933, 3950), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.0)'], {}), '(0.0)\n', (3945, 3950), True, 'import torch.nn as nn\n'), ((3979, 3994), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (3989, 3994), True, 'import torch.nn as nn\n'), ((4020, 4044), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(16)', '(3, 3)'], {}), '(1, 16, (3, 3))\n', (4029, 4044), True, 'import torch.nn as nn\n'), ((4068, 4085), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1)'], {}), '(1)\n', (4082, 4085), True, 'import torch.nn as nn\n'), ((4109, 4127), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (4123, 4127), True, 'import torch.nn as nn\n'), ((4151, 4182), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.entity_dim'], {}), '(self.entity_dim)\n', (4165, 4182), True, 'import torch.nn as nn\n'), ((4205, 4294), 'torch.nn.Linear', 'torch.nn.Linear', (['(16 * (self.emb_dim1 - 1) * (self.emb_dim2 - 2) * 2)', 'self.entity_dim'], {}), '(16 * (self.emb_dim1 - 1) * (self.emb_dim2 - 2) * 2, self.\n entity_dim)\n', (4220, 4294), False, 'import torch\n'), ((4381, 4398), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1)'], {}), '(1)\n', (4395, 4398), True, 'import torch.nn as nn\n'), ((4429, 4461), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.nfmap', '(1, 3)'], {}), '(1, self.nfmap, (1, 3))\n', (4438, 4461), True, 'import torch.nn as nn\n'), ((4509, 4535), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.nfmap'], {}), '(self.nfmap)\n', (4523, 4535), True, 'import torch.nn as nn\n'), ((4563, 4578), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (4573, 4578), True, 'import torch.nn as nn\n'), ((4612, 4621), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4619, 4621), True, 'import torch.nn as nn\n'), ((4711, 4765), 'torch.nn.Linear', 'nn.Linear', (['(self.nfmap * self.entity_dim)', '(1)'], {'bias': '(False)'}), '(self.nfmap * self.entity_dim, 1, bias=False)\n', (4720, 4765), True, 'import torch.nn as nn\n'), ((4912, 4944), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.nfmap', '(1, 3)'], {}), '(1, self.nfmap, (1, 3))\n', (4921, 4944), True, 'import torch.nn as nn\n'), ((5103, 5118), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (5113, 5118), True, 'import torch.nn as nn\n'), ((5152, 5161), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5159, 5161), True, 'import torch.nn as nn\n'), ((5251, 5305), 'torch.nn.Linear', 'nn.Linear', (['(self.nfmap * self.entity_dim)', '(1)'], {'bias': '(False)'}), '(self.nfmap * self.entity_dim, 1, bias=False)\n', (5260, 5305), True, 'import torch.nn as nn\n'), ((5431, 5475), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'self.nfmap1', '(3, 3)'], {'padding': '(1)'}), '(3, self.nfmap1, (3, 3), padding=1)\n', (5440, 5475), True, 'import torch.nn as nn\n'), ((5527, 5581), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nfmap1', 'self.nfmap2', '(3, 3)'], {'padding': '(1)'}), '(self.nfmap1, self.nfmap2, (3, 3), padding=1)\n', (5536, 5581), True, 'import torch.nn as nn\n'), ((5628, 5643), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (5638, 5643), True, 'import torch.nn as nn\n'), ((5677, 5686), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5684, 5686), True, 'import torch.nn as nn\n'), ((5776, 5831), 'torch.nn.Linear', 'nn.Linear', (['(self.nfmap2 * self.entity_dim)', '(1)'], {'bias': '(False)'}), '(self.nfmap2 * self.entity_dim, 1, bias=False)\n', (5785, 5831), True, 'import torch.nn as nn\n'), ((5941, 5988), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.nfmaps[0]', '(3, 3)'], {'padding': '(1)'}), '(1, self.nfmaps[0], (3, 3), padding=1)\n', (5950, 5988), True, 'import torch.nn as nn\n'), ((6021, 6081), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nfmaps[0]', 'self.nfmaps[1]', '(3, 3)'], {'padding': '(1)'}), '(self.nfmaps[0], self.nfmaps[1], (3, 3), padding=1)\n', (6030, 6081), True, 'import torch.nn as nn\n'), ((6114, 6163), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nfmaps[1]', 'self.nfmaps[2]', '(2, 2)'], {}), '(self.nfmaps[1], self.nfmaps[2], (2, 2))\n', (6123, 6163), True, 'import torch.nn as nn\n'), ((6196, 6232), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nfmaps[2]', '(1)', '(1, 1)'], {}), '(self.nfmaps[2], 1, (1, 1))\n', (6205, 6232), True, 'import torch.nn as nn\n'), ((6448, 6463), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (6458, 6463), True, 'import torch.nn as nn\n'), ((6497, 6506), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6504, 6506), True, 'import torch.nn as nn\n'), ((6599, 6642), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.nfmap', '(3, 3)'], {'padding': '(1)'}), '(1, self.nfmap, (3, 3), padding=1)\n', (6608, 6642), True, 'import torch.nn as nn\n'), ((6670, 6685), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (6680, 6685), True, 'import torch.nn as nn\n'), ((6719, 6728), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6726, 6728), True, 'import torch.nn as nn\n'), ((6757, 6817), 'torch.nn.Linear', 'nn.Linear', (['(self.nfmap * self.entity_dim * 2)', 'self.entity_dim'], {}), '(self.nfmap * self.entity_dim * 2, self.entity_dim)\n', (6766, 6817), True, 'import torch.nn as nn\n'), ((6916, 6933), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1)'], {}), '(1)\n', (6930, 6933), True, 'import torch.nn as nn\n'), ((6965, 6997), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.nfmap', '(1, 2)'], {}), '(1, self.nfmap, (1, 2))\n', (6974, 6997), True, 'import torch.nn as nn\n'), ((7029, 7070), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nfmap', 'self.nfmap', '(1, 2)'], {}), '(self.nfmap, self.nfmap, (1, 2))\n', (7038, 7070), True, 'import torch.nn as nn\n'), ((7099, 7125), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.nfmap'], {}), '(self.nfmap)\n', (7113, 7125), True, 'import torch.nn as nn\n'), ((7153, 7168), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (7163, 7168), True, 'import torch.nn as nn\n'), ((7202, 7211), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7209, 7211), True, 'import torch.nn as nn\n'), ((7301, 7355), 'torch.nn.Linear', 'nn.Linear', (['(self.nfmap * self.entity_dim)', '(1)'], {'bias': '(False)'}), '(self.nfmap * self.entity_dim, 1, bias=False)\n', (7310, 7355), True, 'import torch.nn as nn\n'), ((8223, 8232), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8230, 8232), True, 'import torch.nn as nn\n'), ((8304, 8338), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)', '(3, 3)'], {'padding': '(1)'}), '(1, 4, (3, 3), padding=1)\n', (8313, 8338), True, 'import torch.nn as nn\n'), ((8371, 8405), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)', '(3, 3)'], {'padding': '(1)'}), '(1, 4, (3, 3), padding=1)\n', (8380, 8405), True, 'import torch.nn as nn\n'), ((8438, 8472), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)', '(3, 3)'], {'padding': '(1)'}), '(1, 4, (3, 3), padding=1)\n', (8447, 8472), True, 'import torch.nn as nn\n'), ((12794, 12823), 'torch.norm', 'torch.norm', (['score'], {'p': '(1)', 'dim': '(2)'}), '(score, p=1, dim=2)\n', (12804, 12823), False, 'import torch\n'), ((18038, 18063), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (18048, 18063), False, 'import torch\n'), ((19708, 19733), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (19718, 19733), False, 'import torch\n'), ((22815, 22840), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (22825, 22840), False, 'import torch\n'), ((23232, 23256), 'numpy.sqrt', 'np.sqrt', (['self.entity_dim'], {}), '(self.entity_dim)\n', (23239, 23256), True, 'import numpy as np\n'), ((24556, 24581), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (24566, 24581), False, 'import torch\n'), ((26286, 26311), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (26296, 26311), False, 'import torch\n'), ((27811, 27835), 'numpy.sqrt', 'np.sqrt', (['self.entity_dim'], {}), '(self.entity_dim)\n', (27818, 27835), True, 'import numpy as np\n'), ((28289, 28330), 'torch.nn.functional.conv2d', 'F.conv2d', (['head[s]', 'relation[s]'], {'padding': '(1)'}), '(head[s], relation[s], padding=1)\n', (28297, 28330), True, 'import torch.nn.functional as F\n'), ((31021, 31051), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['out_conv', '(3, 1)'], {}), '(out_conv, (3, 1))\n', (31033, 31051), True, 'import torch.nn.functional as F\n'), ((31264, 31289), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (31274, 31289), False, 'import torch\n'), ((34010, 34035), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (34020, 34035), False, 'import torch\n'), ((34588, 34612), 'numpy.sqrt', 'np.sqrt', (['self.entity_dim'], {}), '(self.entity_dim)\n', (34595, 34612), True, 'import numpy as np\n'), ((36680, 36705), 'torch.mean', 'torch.mean', (['(relation ** 2)'], {}), '(relation ** 2)\n', (36690, 36705), False, 'import torch\n'), ((37235, 37276), 'torch.stack', 'torch.stack', (['[complex_score, quate_score]'], {}), '([complex_score, quate_score])\n', (37246, 37276), False, 'import torch\n'), ((38731, 38753), 'torch.sum', 'torch.sum', (['score_r', '(-1)'], {}), '(score_r, -1)\n', (38740, 38753), False, 'import torch\n'), ((43676, 43751), 'dataloader.TestDataset', 'TestDataset', (['test_triples', 'args', '"""head-batch"""', 'random_sampling', 'entity_dict'], {}), "(test_triples, args, 'head-batch', random_sampling, entity_dict)\n", (43687, 43751), False, 'from dataloader import TestDataset\n'), ((44055, 44130), 'dataloader.TestDataset', 'TestDataset', (['test_triples', 'args', '"""tail-batch"""', 'random_sampling', 'entity_dict'], {}), "(test_triples, args, 'tail-batch', random_sampling, entity_dict)\n", (44066, 44130), False, 'from dataloader import TestDataset\n'), ((44598, 44613), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (44611, 44613), False, 'import torch\n'), ((17990, 18011), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (18000, 18011), False, 'import torch\n'), ((18014, 18035), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (18024, 18035), False, 'import torch\n'), ((19660, 19681), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (19670, 19681), False, 'import torch\n'), ((19684, 19705), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (19694, 19705), False, 'import torch\n'), ((22767, 22788), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (22777, 22788), False, 'import torch\n'), ((22791, 22812), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (22801, 22812), False, 'import torch\n'), ((24508, 24529), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (24518, 24529), False, 'import torch\n'), ((24532, 24553), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (24542, 24553), False, 'import torch\n'), ((26238, 26259), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (26248, 26259), False, 'import torch\n'), ((26262, 26283), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (26272, 26283), False, 'import torch\n'), ((26887, 26915), 'torch.matmul', 'torch.matmul', (['head', 'relation'], {}), '(head, relation)\n', (26899, 26915), False, 'import torch\n'), ((31216, 31237), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (31226, 31237), False, 'import torch\n'), ((31240, 31261), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (31250, 31261), False, 'import torch\n'), ((33962, 33983), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (33972, 33983), False, 'import torch\n'), ((33986, 34007), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (33996, 34007), False, 'import torch\n'), ((36632, 36653), 'torch.mean', 'torch.mean', (['(head ** 2)'], {}), '(head ** 2)\n', (36642, 36653), False, 'import torch\n'), ((36656, 36677), 'torch.mean', 'torch.mean', (['(tail ** 2)'], {}), '(tail ** 2)\n', (36666, 36677), False, 'import torch\n'), ((41748, 41776), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['positive_score'], {}), '(positive_score)\n', (41760, 41776), True, 'import torch.nn.functional as F\n'), ((7455, 7523), 'torch.empty', 'torch.empty', (['nrelation', 'num_slices', 'self.entity_dim', 'self.entity_dim'], {}), '(nrelation, num_slices, self.entity_dim, self.entity_dim)\n', (7466, 7523), False, 'import torch\n'), ((7650, 7701), 'torch.empty', 'torch.empty', (['nrelation', 'num_slices', 'self.entity_dim'], {}), '(nrelation, num_slices, self.entity_dim)\n', (7661, 7701), False, 'import torch\n'), ((7812, 7863), 'torch.empty', 'torch.empty', (['nrelation', 'num_slices', 'self.entity_dim'], {}), '(nrelation, num_slices, self.entity_dim)\n', (7823, 7863), False, 'import torch\n'), ((7973, 8007), 'torch.empty', 'torch.empty', (['nrelation', 'num_slices'], {}), '(nrelation, num_slices)\n', (7984, 8007), False, 'import torch\n'), ((8101, 8135), 'torch.empty', 'torch.empty', (['nrelation', 'num_slices'], {}), '(nrelation, num_slices)\n', (8112, 8135), False, 'import torch\n'), ((9213, 9281), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'sample[:, 0]'}), '(self.entity_embedding, dim=0, index=sample[:, 0])\n', (9231, 9281), False, 'import torch\n'), ((9426, 9496), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'sample[:, 1]'}), '(self.relation_embedding, dim=0, index=sample[:, 1])\n', (9444, 9496), False, 'import torch\n'), ((9633, 9701), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'sample[:, 2]'}), '(self.entity_embedding, dim=0, index=sample[:, 2])\n', (9651, 9701), False, 'import torch\n'), ((41546, 41575), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['(-negative_score)'], {}), '(-negative_score)\n', (41558, 41575), True, 'import torch.nn.functional as F\n'), ((10258, 10331), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'tail_part[:, 1]'}), '(self.relation_embedding, dim=0, index=tail_part[:, 1])\n', (10276, 10331), False, 'import torch\n'), ((10471, 10542), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'tail_part[:, 2]'}), '(self.entity_embedding, dim=0, index=tail_part[:, 2])\n', (10489, 10542), False, 'import torch\n'), ((41461, 41490), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['(-negative_score)'], {}), '(-negative_score)\n', (41473, 41490), True, 'import torch.nn.functional as F\n'), ((45415, 45484), 'logging.info', 'logging.info', (["('Evaluating the model... (%d/%d)' % (step, total_steps))"], {}), "('Evaluating the model... (%d/%d)' % (step, total_steps))\n", (45427, 45484), False, 'import logging\n'), ((10844, 10915), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'head_part[:, 0]'}), '(self.entity_embedding, dim=0, index=head_part[:, 0])\n', (10862, 10915), False, 'import torch\n'), ((11063, 11136), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'head_part[:, 1]'}), '(self.relation_embedding, dim=0, index=head_part[:, 1])\n', (11081, 11136), False, 'import torch\n'), ((41356, 41419), 'torch.nn.functional.softmax', 'F.softmax', (['(negative_score * args.adversarial_temperature)'], {'dim': '(1)'}), '(negative_score * args.adversarial_temperature, dim=1)\n', (41365, 41419), True, 'import torch.nn.functional as F\n'), ((45613, 45641), 'torch.cat', 'torch.cat', (['test_logs[metric]'], {}), '(test_logs[metric])\n', (45622, 45641), False, 'import torch\n')] |
"""Data generators
"""
import glob
import os
import json
from pathlib import Path
from typing import Tuple, List, Union
from PIL import Image
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import Sequence
def encode_sentence(s: str, tokenizer) -> np.ndarray:
"""Encode a sentence with bert tokens
:param s: sentence
:param tokenizer: the BERT tokenizer
"""
tokens = [i for i in list(tokenizer.tokenize(s))]
tokens = tokens[1:]
return np.array(tokenizer.convert_tokens_to_ids(tokens))
def bert_encode(x_data: List[str], tokenizer, seq_len: int) -> List[tf.Tensor]:
"""Encode text data with BERT tokens
:param x_data: list of strings for conversion
:param tokenizer: the BERT tokenizer
:param seq_len: length of sequences
"""
data = list()
for i in x_data:
tmp = encode_sentence(i, tokenizer)
if len(tmp) > seq_len-1:
tmp = tmp[0:seq_len-1]
data.append(tmp)
sentence = tf.ragged.constant(data)
cls = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*sentence.shape[0]
input_word_ids = tf.concat([cls, sentence], axis=-1)
input_mask = tf.ones_like(input_word_ids).to_tensor()
type_cls = tf.zeros_like(cls)
type_s1 = tf.zeros_like(sentence)
input_type_ids = tf.concat(
[type_cls, type_s1], axis=-1).to_tensor()
inputs = [
input_word_ids.to_tensor(),
input_mask,
input_type_ids]
return inputs
def format_metadata_output(descriptions: List[str],
titles: List[str],
tokenizer = None,
max_len: Union[int, List[int]] = 0) -> List[Union[np.ndarray, tf.Tensor]]:
"""Format metadata output for BERT and USE models
"""
formatted_data = list()
# make the input max_len interable is not a list
if isinstance(max_len, int):
max_lens = [max_len] * 2
else:
max_lens = max_len
if tokenizer is not None:
for attr, max_length in zip([descriptions, titles], max_lens):
formatted_data += bert_encode(attr, tokenizer, max_length)
else:
formatted_data = [np.array(descriptions), np.array(titles)]
return formatted_data
class BaseDataGenerator(Sequence):
"""Keras-like data generator for image and metadata
"""
def __init__(self,
path: str,
batch_size: int,
target_size: Tuple[int, int],
scaling: float = (1. / 255),
sample_frac: float = None,
preshuffle: bool = True,
class_limit: int = None) -> None:
"""Constructor
:param path: path to data tree
:param batch_size: size of batches used in training
:param target_size: image size
:param scaling: image scaling value
:param sample_frac: per class sampling
"""
# arguments
self.path = path
self.batch_size = batch_size
self.target_size = target_size
self.scaling = scaling
self.sample_frac = sample_frac
# derived
self.files = list()
self.json_to_image_mapping = dict()
self.label_mapping = dict()
self.classes = list()
# create a label maker
# targeting sparse categorical crossentropy
classes = sorted(glob.glob(self.path + '*'))[ : class_limit ]
for i, label in enumerate(classes):
label = label.split('/')[-1]
self.classes.append(label)
self.label_mapping[label] = i
# get the file pathes
# sample on a per class basis
for class_path in classes:
class_files = glob.glob(class_path + '/*.*')
if self.sample_frac is not None:
class_files = np.random.choice(
class_files,
np.floor(len(class_files) * self.sample_frac).astype(int),
replace = False
)
self.files += list(class_files)
# preshuffle for first epoch
if preshuffle:
self.on_epoch_end()
print(f"Found {len(self.files)} instances belonging to {len(self.classes)} classes")
@property
def num_classes(self) -> int:
"""The number of classes found
"""
return len(self.classes)
@property
def num_instances(self) -> int:
"""The number of classes found
"""
return len(self.files)
@staticmethod
def _load_metadata(path: str, lower_case: bool = False) -> Tuple[str, str]:
"""Load metadata from the JSON files
"""
with open(Path(path).with_suffix(''), 'r') as in_file:
items = json.load(in_file)
desc = items['description']
titl = items['title']
if lower_case:
desc = desc.lower()
titl = titl.lower()
return desc, titl
def __len__(self) -> int:
"""Denotes the number of batches per epoch
"""
return int(np.floor(len(self.files) / self.batch_size))
def on_epoch_end(self) -> None:
"""Updates indexes after each epoch
"""
np.random.shuffle(self.files)
class ImageMetaDataGenerator(BaseDataGenerator):
"""Keras-like data generator for image and metadata
"""
def __init__(self,
path: str,
batch_size: int,
target_size: Tuple[int, int],
scaling: float = (1. / 255),
sample_frac: float = None,
class_limit: int = None,
bert_tokenizer = None,
bert_sentence_len: Union[int, List[int]] = 0) -> None:
"""Constructor
:param path: path to data tree
:param batch_size: size of batches used in training
:param target_size: image size
:param scaling: image scaling value
:param sample_frac: per class sampling
:param preshuffle: shuffle before first epoch
:param class_limit: a limit on the number of classes used for training
:param bert_tokenizer: the BERT tokenizer (if using BERT)
:param bert_sentence_len: the size of the sentences lengths
"""
super().__init__(path,
batch_size,
target_size,
scaling,
sample_frac,
class_limit=class_limit)
# set up BERT items if using BERT
self.tokenizer = bert_tokenizer
self.max_seq_len = bert_sentence_len
if self.tokenizer is not None:
self.bert_tokenize = True
else:
self.bert_tokenize = False
def _load_image(self, path: str) -> np.ndarray:
"""Load image convert to RGB and resize
"""
img = Image.open(path)
img = img.resize(self.target_size).convert('RGB')
img = np.array(img) * self.scaling
return img
def __getitem__(self, index: int) -> Tuple[List[np.ndarray], np.ndarray]:
"""Generate one batch of data
Returned as ([Image, Description, Title], label)
"""
labels = list()
images = list()
descriptions = list()
titles = list()
# get a batch
f_batch = self.files[index * self.batch_size:(index + 1) * self.batch_size]
# prepare the batch
for f in f_batch:
# get the text data
desc, titl = self._load_metadata(f, self.bert_tokenize)
descriptions.append(desc)
titles.append(titl)
# get the image
images.append(
self._load_image(f)
)
# get the label
labels.append(
self.label_mapping[
f.split('/')[-2]
]
)
formatted_data = format_metadata_output(
descriptions,
titles,
self.tokenizer,
self.max_seq_len
)
img_reshape_size = [len(f_batch)] + list(self.target_size) + [3]
images = np.array(images).reshape(*img_reshape_size)
formatted_data = [images] + formatted_data
# images, descriptions, titles, labels
return (formatted_data, np.array(labels))
class MetaDataGenerator(BaseDataGenerator):
"""Keras-like data generator for metadata
"""
def __init__(self,
path: str,
batch_size: int,
target_size: Tuple[int, int],
scaling: float = (1. / 255),
sample_frac: float = None,
preshuffle: bool = True,
class_limit: int = None,
bert_tokenizer = None,
bert_sentence_len: Union[int, List[int]] = 0
) -> None:
"""Constructor
:param path: path to data tree
:param batch_size: size of batches used in training
:param target_size: image size
:param scaling: image scaling value
:param sample_frac: per class sampling
:param preshuffle: shuffle before first epoch
:param class_limit: a limit on the number of classes used for training
:param bert_tokenizer: the BERT tokenizer (if using BERT)
:param bert_sentence_len: the size of the sentences lengths
"""
super().__init__(path,
batch_size,
(300, 300),
1. / 255,
sample_frac,
preshuffle,
class_limit=class_limit)
# set up BERT items if using BERT
self.tokenizer = bert_tokenizer
self.max_seq_len = bert_sentence_len
if self.tokenizer is not None:
self.bert_tokenize = True
else:
self.bert_tokenize = False
def __getitem__(self, index: int) -> Tuple[List[Union[np.ndarray, tf.Tensor]], np.ndarray]:
"""Generate one batch of data
(Returned as [Description, Title], label)
"""
labels = list()
descriptions = list()
titles = list()
# get a batch
f_batch = self.files[index * self.batch_size:(index + 1) * self.batch_size]
# prepare the batch
for f in f_batch:
# get the text data
desc, titl = self._load_metadata(f, self.bert_tokenize)
descriptions.append(desc)
titles.append(titl)
# get the label
labels.append(
self.label_mapping[
f.split('/')[-2]
]
)
if self.bert_tokenize:
formatted_data = format_metadata_output(
descriptions,
titles,
self.tokenizer,
self.max_seq_len
)
# descriptions, titles, labels
return (formatted_data, np.array(labels))
else:
return ([np.array(descriptions),
np.array(titles)],
np.array(labels))
class ImageGenerator(BaseDataGenerator):
"""Keras-like data generator for image and metadata
"""
def __init__(self,
path: str,
batch_size: int,
target_size: Tuple[int, int],
scaling: float = (1. / 255),
sample_frac: float = None,
class_limit: int = None) -> None:
"""Constructor
:param path: path to data tree
:param batch_size: size of batches used in training
:param target_size: image size
:param scaling: image scaling value
:param sample_frac: per class sampling
"""
super().__init__(path,
batch_size,
target_size,
scaling,
sample_frac,
class_limit=class_limit)
def _load_image(self, path: str) -> np.ndarray:
"""Load image convert to RGB and resize
"""
img = Image.open(path)
img = img.resize(self.target_size).convert('RGB')
img = np.array(img) * self.scaling
return img
def __getitem__(self, index: int) -> Tuple[List[np.ndarray], np.ndarray]:
"""Generate one batch of data
Returned as (Image, label)
"""
labels = list()
images = list()
# get a batch
f_batch = self.files[index * self.batch_size:(index + 1) * self.batch_size]
# prepare the batch
for f in f_batch:
# get the image
images.append(
self._load_image(f)
)
# get the label
labels.append(
self.label_mapping[
f.split('/')[-2]
]
)
img_reshape_size = [len(f_batch)] + list(self.target_size) + [3]
# images, descriptions, titles, labels
return (np.array(images).reshape(*img_reshape_size), #X
np.array(labels)) #y
def get_generators(generator_type: str,
primary_path: str,
batch_size: int,
img_size: Tuple[int],
sampling_frac: float = None,
class_limit: int = None) -> Tuple[Sequence]:
"""
:param path: path to data tree; should contain train and validation
:param batch_size: size of batches used in training
:param target_size: image size
:param sample_frac: per class sampling
:return: tuple of training data gen and validation generator
"""
# pick the type of class to use
if generator_type == 'image':
training_data_cls = ImageGenerator
validation_data_cls = ImageGenerator
elif generator_type == 'text':
training_data_cls = MetaDataGenerator
validation_data_cls = MetaDataGenerator
elif generator_type == 'multi':
training_data_cls = ImageMetaDataGenerator
validation_data_cls = ImageMetaDataGenerator
else:
raise ValueError('generator type not understood must be one of \
[image, texxt, multi]')
# build generators
training_data = training_data_cls(
primary_path + 'train/',
batch_size,
img_size,
scaling=(1. / 255),
sample_frac=sampling_frac,
class_limit=class_limit
)
validation_data = validation_data_cls(
primary_path + 'validation/',
batch_size,
img_size,
scaling=(1. / 255),
sample_frac=sampling_frac,
class_limit=class_limit
)
return training_data, validation_data
| [
"json.load",
"tensorflow.zeros_like",
"tensorflow.concat",
"PIL.Image.open",
"tensorflow.ones_like",
"pathlib.Path",
"numpy.array",
"glob.glob",
"tensorflow.ragged.constant",
"numpy.random.shuffle"
] | [((998, 1022), 'tensorflow.ragged.constant', 'tf.ragged.constant', (['data'], {}), '(data)\n', (1016, 1022), True, 'import tensorflow as tf\n'), ((1122, 1157), 'tensorflow.concat', 'tf.concat', (['[cls, sentence]'], {'axis': '(-1)'}), '([cls, sentence], axis=-1)\n', (1131, 1157), True, 'import tensorflow as tf\n'), ((1232, 1250), 'tensorflow.zeros_like', 'tf.zeros_like', (['cls'], {}), '(cls)\n', (1245, 1250), True, 'import tensorflow as tf\n'), ((1265, 1288), 'tensorflow.zeros_like', 'tf.zeros_like', (['sentence'], {}), '(sentence)\n', (1278, 1288), True, 'import tensorflow as tf\n'), ((5287, 5316), 'numpy.random.shuffle', 'np.random.shuffle', (['self.files'], {}), '(self.files)\n', (5304, 5316), True, 'import numpy as np\n'), ((6972, 6988), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6982, 6988), False, 'from PIL import Image\n'), ((12332, 12348), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (12342, 12348), False, 'from PIL import Image\n'), ((1175, 1203), 'tensorflow.ones_like', 'tf.ones_like', (['input_word_ids'], {}), '(input_word_ids)\n', (1187, 1203), True, 'import tensorflow as tf\n'), ((1310, 1349), 'tensorflow.concat', 'tf.concat', (['[type_cls, type_s1]'], {'axis': '(-1)'}), '([type_cls, type_s1], axis=-1)\n', (1319, 1349), True, 'import tensorflow as tf\n'), ((2190, 2212), 'numpy.array', 'np.array', (['descriptions'], {}), '(descriptions)\n', (2198, 2212), True, 'import numpy as np\n'), ((2214, 2230), 'numpy.array', 'np.array', (['titles'], {}), '(titles)\n', (2222, 2230), True, 'import numpy as np\n'), ((3757, 3787), 'glob.glob', 'glob.glob', (["(class_path + '/*.*')"], {}), "(class_path + '/*.*')\n", (3766, 3787), False, 'import glob\n'), ((4803, 4821), 'json.load', 'json.load', (['in_file'], {}), '(in_file)\n', (4812, 4821), False, 'import json\n'), ((7061, 7074), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (7069, 7074), True, 'import numpy as np\n'), ((8453, 8469), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8461, 8469), True, 'import numpy as np\n'), ((12421, 12434), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (12429, 12434), True, 'import numpy as np\n'), ((13338, 13354), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (13346, 13354), True, 'import numpy as np\n'), ((3408, 3434), 'glob.glob', 'glob.glob', (["(self.path + '*')"], {}), "(self.path + '*')\n", (3417, 3434), False, 'import glob\n'), ((8269, 8285), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (8277, 8285), True, 'import numpy as np\n'), ((11171, 11187), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (11179, 11187), True, 'import numpy as np\n'), ((11297, 11313), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (11305, 11313), True, 'import numpy as np\n'), ((11224, 11246), 'numpy.array', 'np.array', (['descriptions'], {}), '(descriptions)\n', (11232, 11246), True, 'import numpy as np\n'), ((11263, 11279), 'numpy.array', 'np.array', (['titles'], {}), '(titles)\n', (11271, 11279), True, 'import numpy as np\n'), ((13275, 13291), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (13283, 13291), True, 'import numpy as np\n'), ((4738, 4748), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (4742, 4748), False, 'from pathlib import Path\n')] |
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras import layers, models, optimizers
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Input, Conv2D, Activation, Dense, Dropout, Lambda, Reshape, Concatenate
from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax
from ..base.deepCapsLayers import Conv2DCaps, ConvCapsuleLayer3D, CapsuleLayer, CapsToScalars, Mask_CID, ConvertToCaps, FlattenCaps
def CapsNet(input_shape, num_class, routings,dim_capsule,**kwargs):
# assemble encoder
x = Input(shape=input_shape)
l = x
l = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding="same",kernel_initializer='he_normal')(l) # common conv layer
l = BatchNormalization()(l)
l = ConvertToCaps()(l)
l = Conv2DCaps(32, 4, kernel_size=(3, 3), strides=(2, 2), r_num=1, b_alphas=[1, 1, 1])(l)
l_skip = Conv2DCaps(32, 4, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 4, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 4, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = layers.Add()([l, l_skip])
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(2, 2), r_num=1, b_alphas=[1, 1, 1])(l)
l_skip = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = layers.Add()([l, l_skip])
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(2, 2), r_num=1, b_alphas=[1, 1, 1])(l)
l_skip = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = layers.Add()([l, l_skip])
l1 = l
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(2, 2), r_num=1, b_alphas=[1, 1, 1])(l)
l_skip = ConvCapsuleLayer3D(kernel_size=3, num_capsule=32, num_atoms=8, strides=1, padding='same', routings=3)(l)
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = Conv2DCaps(32, 8, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[1, 1, 1])(l)
l = layers.Add()([l, l_skip])
l2 = l
la = FlattenCaps()(l2)
lb = FlattenCaps()(l1)
l = layers.Concatenate(axis=-2)([la, lb])
# l = Dropout(0.4)(l)
digits_caps = CapsuleLayer(num_capsule=num_class, dim_capsule=dim_capsule, routings=routings, channels=0, name='digit_caps')(l)
l = CapsToScalars(name='capsnet')(digits_caps)
#l = Softmax()(l)
m_capsnet = models.Model(inputs=x, outputs=l, name='capsnet_model')
y = Input(shape=(num_class,))
masked_by_y = Mask_CID()([digits_caps, y])
masked = Mask_CID()(digits_caps)
# Decoder Network
decoder = models.Sequential(name='decoder')
decoder.add(Dense(input_dim=dim_capsule, activation="relu",kernel_initializer='he_normal', output_dim=np.prod(input_shape)))
decoder.add(Reshape(input_shape))
decoder.add(BatchNormalization(momentum=0.8))
decoder.add(Conv2DTranspose(64, (3,3), padding='same',activation="relu",kernel_initializer='he_normal'))
decoder.add(Conv2DTranspose(32, (3,3), padding='same', activation="relu",kernel_initializer='he_normal'))
decoder.add(Conv2DTranspose(16, (3,3), padding='same',activation="relu",kernel_initializer='he_normal'))
decoder.add(Conv2DTranspose(8, (3,3), padding='same', activation="relu",kernel_initializer='he_normal'))
decoder.add(Conv2DTranspose(5, (3,3), padding='same',activation="linear",kernel_initializer='he_normal'))
decoder.add(Reshape(target_shape=(64, 64, 5), name='out_recon'))
train_model = models.Model([x, y], [m_capsnet.output, decoder(masked_by_y)])
eval_model = models.Model(x, [m_capsnet.output, decoder(masked)])
train_model.summary()
return train_model, eval_model
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.models.Model",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Conv2DTranspose",
... | [((630, 654), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (635, 654), False, 'from tensorflow.keras.layers import Input, Conv2D, Activation, Dense, Dropout, Lambda, Reshape, Concatenate\n'), ((2916, 2971), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'x', 'outputs': 'l', 'name': '"""capsnet_model"""'}), "(inputs=x, outputs=l, name='capsnet_model')\n", (2928, 2971), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((2981, 3006), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(num_class,)'}), '(shape=(num_class,))\n', (2986, 3006), False, 'from tensorflow.keras.layers import Input, Conv2D, Activation, Dense, Dropout, Lambda, Reshape, Concatenate\n'), ((3131, 3164), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {'name': '"""decoder"""'}), "(name='decoder')\n", (3148, 3164), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((674, 780), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""', 'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(128, (3, 3), strides=(1, 1), activation='relu', padding='same',\n kernel_initializer='he_normal')\n", (680, 780), False, 'from tensorflow.keras.layers import Input, Conv2D, Activation, Dense, Dropout, Lambda, Reshape, Concatenate\n'), ((808, 828), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (826, 828), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((1249, 1261), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (1259, 1261), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((1665, 1677), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (1675, 1677), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((2081, 2093), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (2091, 2093), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((2527, 2539), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (2537, 2539), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((2628, 2655), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(-2)'}), '(axis=-2)\n', (2646, 2655), False, 'from tensorflow.keras import layers, models, optimizers\n'), ((3310, 3330), 'tensorflow.keras.layers.Reshape', 'Reshape', (['input_shape'], {}), '(input_shape)\n', (3317, 3330), False, 'from tensorflow.keras.layers import Input, Conv2D, Activation, Dense, Dropout, Lambda, Reshape, Concatenate\n'), ((3348, 3380), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3366, 3380), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((3398, 3496), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(64, (3, 3), padding='same', activation='relu',\n kernel_initializer='he_normal')\n", (3413, 3496), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((3507, 3605), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(32, (3, 3), padding='same', activation='relu',\n kernel_initializer='he_normal')\n", (3522, 3605), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((3617, 3715), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(16)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(16, (3, 3), padding='same', activation='relu',\n kernel_initializer='he_normal')\n", (3632, 3715), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((3726, 3823), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(8)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(8, (3, 3), padding='same', activation='relu',\n kernel_initializer='he_normal')\n", (3741, 3823), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((3835, 3934), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(5)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""linear"""', 'kernel_initializer': '"""he_normal"""'}), "(5, (3, 3), padding='same', activation='linear',\n kernel_initializer='he_normal')\n", (3850, 3934), False, 'from tensorflow.keras.layers import BatchNormalization, MaxPooling2D, Flatten, Conv1D, Deconvolution2D, Conv2DTranspose, Softmax\n'), ((3945, 3996), 'tensorflow.keras.layers.Reshape', 'Reshape', ([], {'target_shape': '(64, 64, 5)', 'name': '"""out_recon"""'}), "(target_shape=(64, 64, 5), name='out_recon')\n", (3952, 3996), False, 'from tensorflow.keras.layers import Input, Conv2D, Activation, Dense, Dropout, Lambda, Reshape, Concatenate\n'), ((3271, 3291), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (3278, 3291), True, 'import numpy as np\n')] |
import numpy as np
import torch
from scipy.stats import invgamma
from genrl.agents.bandits.contextual.base import DCBAgent
from genrl.agents.bandits.contextual.common import NeuralBanditModel, TransitionDB
from genrl.utils.data_bandits.base import DataBasedBandit
class NeuralLinearPosteriorAgent(DCBAgent):
"""Deep contextual bandit agent using bayesian regression on for posterior inference
A neural network is used to transform context vector to a latent represntation on
which bayesian regression is performed.
Args:
bandit (DataBasedBandit): The bandit to solve
init_pulls (int, optional): Number of times to select each action initially.
Defaults to 3.
hidden_dims (List[int], optional): Dimensions of hidden layers of network.
Defaults to [50, 50].
init_lr (float, optional): Initial learning rate. Defaults to 0.1.
lr_decay (float, optional): Decay rate for learning rate. Defaults to 0.5.
lr_reset (bool, optional): Whether to reset learning rate ever train interval.
Defaults to True.
max_grad_norm (float, optional): Maximum norm of gradients for gradient clipping.
Defaults to 0.5.
dropout_p (Optional[float], optional): Probability for dropout. Defaults to None
which implies dropout is not to be used.
eval_with_dropout (bool, optional): Whether or not to use dropout at inference.
Defaults to False.
nn_update_ratio (int, optional): . Defaults to 2.
lambda_prior (float, optional): Guassian prior for linear model. Defaults to 0.25.
a0 (float, optional): Inverse gamma prior for noise. Defaults to 3.0.
b0 (float, optional): Inverse gamma prior for noise. Defaults to 3.0.
device (str): Device to use for tensor operations.
"cpu" for cpu or "cuda" for cuda. Defaults to "cpu".
"""
def __init__(self, bandit: DataBasedBandit, **kwargs):
super(NeuralLinearPosteriorAgent, self).__init__(
bandit, kwargs.get("device", "cpu")
)
self.init_pulls = kwargs.get("init_pulls", 3)
self.lambda_prior = kwargs.get("lambda_prior", 0.25)
self.a0 = kwargs.get("a0", 6.0)
self.b0 = kwargs.get("b0", 6.0)
hidden_dims = kwargs.get("hidden_dims", [50, 50])
self.latent_dim = hidden_dims[-1]
self.nn_update_ratio = kwargs.get("nn_update_ratio", 2)
self.model = (
NeuralBanditModel(
context_dim=self.context_dim,
hidden_dims=kwargs.get("hidden_dims", [50, 50]),
n_actions=self.n_actions,
init_lr=kwargs.get("init_lr", 0.1),
max_grad_norm=kwargs.get("max_grad_norm", 0.5),
lr_decay=kwargs.get("lr_decay", 0.5),
lr_reset=kwargs.get("lr_reset", True),
dropout_p=kwargs.get("dropout_p", None),
)
.to(torch.float)
.to(self.device)
)
self.eval_with_dropout = kwargs.get("eval_with_dropout", False)
self.mu = torch.zeros(
size=(self.n_actions, self.latent_dim + 1),
device=self.device,
dtype=torch.float,
)
self.cov = torch.stack(
[
(1.0 / self.lambda_prior)
* torch.eye(self.latent_dim + 1, device=self.device, dtype=torch.float)
for _ in range(self.n_actions)
]
)
self.inv_cov = torch.stack(
[
self.lambda_prior
* torch.eye(self.latent_dim + 1, device=self.device, dtype=torch.float)
for _ in range(self.n_actions)
]
)
self.a = self.a0 * torch.ones(
self.n_actions, device=self.device, dtype=torch.float
)
self.b = self.b0 * torch.ones(
self.n_actions, device=self.device, dtype=torch.float
)
self.db = TransitionDB(self.device)
self.latent_db = TransitionDB()
self.t = 0
self.update_count = 0
def select_action(self, context: torch.Tensor) -> int:
"""Select an action based on given context.
Selects an action by computing a forward pass through network to output
a representation of the context on which bayesian linear regression is
performed to select an action.
Args:
context (torch.Tensor): The context vector to select action for.
Returns:
int: The action to take.
"""
self.model.use_dropout = self.eval_with_dropout
self.t += 1
if self.t < self.n_actions * self.init_pulls:
return torch.tensor(
self.t % self.n_actions, device=self.device, dtype=torch.int
)
var = torch.tensor(
[self.b[i] * invgamma.rvs(self.a[i]) for i in range(self.n_actions)],
device=self.device,
dtype=torch.float,
)
try:
beta = (
torch.tensor(
np.stack(
[
np.random.multivariate_normal(
self.mu[i], var[i] * self.cov[i]
)
for i in range(self.n_actions)
]
)
)
.to(self.device)
.to(torch.float)
)
except np.linalg.LinAlgError as e: # noqa F841
beta = (
(
torch.stack(
[
torch.distributions.MultivariateNormal(
torch.zeros(self.context_dim + 1),
torch.eye(self.context_dim + 1),
).sample()
for i in range(self.n_actions)
]
)
)
.to(self.device)
.to(torch.float)
)
results = self.model(context)
latent_context = results["x"]
values = torch.mv(beta, torch.cat([latent_context.squeeze(0), torch.ones(1)]))
action = torch.argmax(values).to(torch.int)
return action
def update_db(self, context: torch.Tensor, action: int, reward: int):
"""Updates transition database with given transition
Updates latent context and predicted rewards seperately.
Args:
context (torch.Tensor): Context recieved
action (int): Action taken
reward (int): Reward recieved
"""
self.db.add(context, action, reward)
results = self.model(context)
self.latent_db.add(results["x"].detach(), action, reward)
def update_params(self, action: int, batch_size: int = 512, train_epochs: int = 20):
"""Update parameters of the agent.
Trains neural network and updates bayesian regression parameters.
Args:
action (int): Action to update the parameters for.
batch_size (int, optional): Size of batch to update parameters with.
Defaults to 512
train_epochs (int, optional): Epochs to train neural network for.
Defaults to 20
"""
self.update_count += 1
if self.update_count % self.nn_update_ratio == 0:
self.model.train_model(self.db, train_epochs, batch_size)
z, y = self.latent_db.get_data_for_action(action, batch_size)
z = torch.cat([z, torch.ones(z.shape[0], 1)], dim=1)
inv_cov = torch.mm(z.T, z) + self.lambda_prior * torch.eye(self.latent_dim + 1)
cov = torch.inverse(inv_cov)
mu = torch.mm(cov, torch.mm(z.T, y))
a = self.a0 + self.t / 2
b = self.b0 + (torch.mm(y.T, y) - torch.mm(mu.T, torch.mm(inv_cov, mu))) / 2
self.mu[action] = mu.squeeze(1)
self.cov[action] = cov
self.inv_cov[action] = inv_cov
self.a[action] = a
self.b[action] = b
| [
"torch.ones",
"torch.eye",
"torch.argmax",
"torch.mm",
"genrl.agents.bandits.contextual.common.TransitionDB",
"scipy.stats.invgamma.rvs",
"numpy.random.multivariate_normal",
"torch.zeros",
"torch.inverse",
"torch.tensor"
] | [((3113, 3211), 'torch.zeros', 'torch.zeros', ([], {'size': '(self.n_actions, self.latent_dim + 1)', 'device': 'self.device', 'dtype': 'torch.float'}), '(size=(self.n_actions, self.latent_dim + 1), device=self.device,\n dtype=torch.float)\n', (3124, 3211), False, 'import torch\n'), ((3993, 4018), 'genrl.agents.bandits.contextual.common.TransitionDB', 'TransitionDB', (['self.device'], {}), '(self.device)\n', (4005, 4018), False, 'from genrl.agents.bandits.contextual.common import NeuralBanditModel, TransitionDB\n'), ((4044, 4058), 'genrl.agents.bandits.contextual.common.TransitionDB', 'TransitionDB', ([], {}), '()\n', (4056, 4058), False, 'from genrl.agents.bandits.contextual.common import NeuralBanditModel, TransitionDB\n'), ((7755, 7777), 'torch.inverse', 'torch.inverse', (['inv_cov'], {}), '(inv_cov)\n', (7768, 7777), False, 'import torch\n'), ((3772, 3837), 'torch.ones', 'torch.ones', (['self.n_actions'], {'device': 'self.device', 'dtype': 'torch.float'}), '(self.n_actions, device=self.device, dtype=torch.float)\n', (3782, 3837), False, 'import torch\n'), ((3887, 3952), 'torch.ones', 'torch.ones', (['self.n_actions'], {'device': 'self.device', 'dtype': 'torch.float'}), '(self.n_actions, device=self.device, dtype=torch.float)\n', (3897, 3952), False, 'import torch\n'), ((4727, 4801), 'torch.tensor', 'torch.tensor', (['(self.t % self.n_actions)'], {'device': 'self.device', 'dtype': 'torch.int'}), '(self.t % self.n_actions, device=self.device, dtype=torch.int)\n', (4739, 4801), False, 'import torch\n'), ((7671, 7687), 'torch.mm', 'torch.mm', (['z.T', 'z'], {}), '(z.T, z)\n', (7679, 7687), False, 'import torch\n'), ((7805, 7821), 'torch.mm', 'torch.mm', (['z.T', 'y'], {}), '(z.T, y)\n', (7813, 7821), False, 'import torch\n'), ((6272, 6292), 'torch.argmax', 'torch.argmax', (['values'], {}), '(values)\n', (6284, 6292), False, 'import torch\n'), ((7618, 7643), 'torch.ones', 'torch.ones', (['z.shape[0]', '(1)'], {}), '(z.shape[0], 1)\n', (7628, 7643), False, 'import torch\n'), ((7710, 7740), 'torch.eye', 'torch.eye', (['(self.latent_dim + 1)'], {}), '(self.latent_dim + 1)\n', (7719, 7740), False, 'import torch\n'), ((3361, 3430), 'torch.eye', 'torch.eye', (['(self.latent_dim + 1)'], {'device': 'self.device', 'dtype': 'torch.float'}), '(self.latent_dim + 1, device=self.device, dtype=torch.float)\n', (3370, 3430), False, 'import torch\n'), ((3604, 3673), 'torch.eye', 'torch.eye', (['(self.latent_dim + 1)'], {'device': 'self.device', 'dtype': 'torch.float'}), '(self.latent_dim + 1, device=self.device, dtype=torch.float)\n', (3613, 3673), False, 'import torch\n'), ((4885, 4908), 'scipy.stats.invgamma.rvs', 'invgamma.rvs', (['self.a[i]'], {}), '(self.a[i])\n', (4897, 4908), False, 'from scipy.stats import invgamma\n'), ((6238, 6251), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (6248, 6251), False, 'import torch\n'), ((7879, 7895), 'torch.mm', 'torch.mm', (['y.T', 'y'], {}), '(y.T, y)\n', (7887, 7895), False, 'import torch\n'), ((7913, 7934), 'torch.mm', 'torch.mm', (['inv_cov', 'mu'], {}), '(inv_cov, mu)\n', (7921, 7934), False, 'import torch\n'), ((5163, 5226), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mu[i]', '(var[i] * self.cov[i])'], {}), '(self.mu[i], var[i] * self.cov[i])\n', (5192, 5226), True, 'import numpy as np\n'), ((5748, 5781), 'torch.zeros', 'torch.zeros', (['(self.context_dim + 1)'], {}), '(self.context_dim + 1)\n', (5759, 5781), False, 'import torch\n'), ((5815, 5846), 'torch.eye', 'torch.eye', (['(self.context_dim + 1)'], {}), '(self.context_dim + 1)\n', (5824, 5846), False, 'import torch\n')] |
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.models.walk_forward_predictor import WalkForwardPredictor
from src.models.lr import LRegression
from src.utils import series_to_supervised
# Get data path or create a directory if it does not exist
# TODO: This is hacky. Need to fix
pathlib.Path(os.path.join(os.path.dirname(os.getcwd()), "..", "data")).mkdir(parents=True, exist_ok=True)
data_path = os.path.join(os.path.dirname(os.getcwd()), "..", "data")
# Check if file exists
if not os.path.exists(os.path.join(data_path, "local_etfs_close.csv")):
raise ValueError("No data in data folder!")
# Get gold data
gold_etf_data = pd.read_csv(os.path.join(data_path, "local_etfs_close.csv"), index_col=0)
gold_etf_data = gold_etf_data["GLD"].to_frame().ffill().dropna()
dates = gold_etf_data
n_features = 1
gold_etf_data = series_to_supervised(gold_etf_data, n_in=n_features, n_out=1)
input_data = gold_etf_data.drop(['var1(t)'], axis=1)
output_data = gold_etf_data.drop(['var1(t-1)'], axis=1)
# Create LR model
lr_model = LRegression(name="lr_gold_wf")
# Initiate our model
wf_model = WalkForwardPredictor(model=lr_model, start_date="2004-11-08", end_date="2021-06-01",
input_pct_change=1, output_pct_change=1, window_size=252, frequency=1,
prediction_length=10, validation_size=1, sliding_window=True,
random_validation=False, train_from_scratch=True)
# Train our model through time, and obtain the predictions and errors
lr_predictions, lr_error = wf_model.train_and_predict(input_data, output_data)
print("LR Walk Forward")
print(lr_predictions)
print(lr_error)
wf_se = lr_error ** 2.0
wf_se.plot()
plt.title("SE: Linear Regression")
plt.show()
lr_predictions_cumulative = lr_predictions.fillna(0.0)
lr_predictions_cumulative = (1.0 + lr_predictions_cumulative).cumprod()
lr_predictions_cumulative = lr_predictions_cumulative.apply(lambda x: np.log(x), axis=0)
lr_predictions_cumulative.plot()
plt.title("Cumulative Return: Linear Regression")
plt.show()
plt.close()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.log",
"src.models.walk_forward_predictor.WalkForwardPredictor",
"src.utils.series_to_supervised",
"matplotlib.pyplot.close",
"os.getcwd",
"src.models.lr.LRegression",
"os.path.join"
] | [((888, 949), 'src.utils.series_to_supervised', 'series_to_supervised', (['gold_etf_data'], {'n_in': 'n_features', 'n_out': '(1)'}), '(gold_etf_data, n_in=n_features, n_out=1)\n', (908, 949), False, 'from src.utils import series_to_supervised\n'), ((1090, 1120), 'src.models.lr.LRegression', 'LRegression', ([], {'name': '"""lr_gold_wf"""'}), "(name='lr_gold_wf')\n", (1101, 1120), False, 'from src.models.lr import LRegression\n'), ((1154, 1435), 'src.models.walk_forward_predictor.WalkForwardPredictor', 'WalkForwardPredictor', ([], {'model': 'lr_model', 'start_date': '"""2004-11-08"""', 'end_date': '"""2021-06-01"""', 'input_pct_change': '(1)', 'output_pct_change': '(1)', 'window_size': '(252)', 'frequency': '(1)', 'prediction_length': '(10)', 'validation_size': '(1)', 'sliding_window': '(True)', 'random_validation': '(False)', 'train_from_scratch': '(True)'}), "(model=lr_model, start_date='2004-11-08', end_date=\n '2021-06-01', input_pct_change=1, output_pct_change=1, window_size=252,\n frequency=1, prediction_length=10, validation_size=1, sliding_window=\n True, random_validation=False, train_from_scratch=True)\n", (1174, 1435), False, 'from src.models.walk_forward_predictor import WalkForwardPredictor\n'), ((1771, 1805), 'matplotlib.pyplot.title', 'plt.title', (['"""SE: Linear Regression"""'], {}), "('SE: Linear Regression')\n", (1780, 1805), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1814, 1816), True, 'import matplotlib.pyplot as plt\n'), ((2067, 2116), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Return: Linear Regression"""'], {}), "('Cumulative Return: Linear Regression')\n", (2076, 2116), True, 'import matplotlib.pyplot as plt\n'), ((2117, 2127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2125, 2127), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2139), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2137, 2139), True, 'import matplotlib.pyplot as plt\n'), ((706, 753), 'os.path.join', 'os.path.join', (['data_path', '"""local_etfs_close.csv"""'], {}), "(data_path, 'local_etfs_close.csv')\n", (718, 753), False, 'import os\n'), ((489, 500), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (498, 500), False, 'import os\n'), ((563, 610), 'os.path.join', 'os.path.join', (['data_path', '"""local_etfs_close.csv"""'], {}), "(data_path, 'local_etfs_close.csv')\n", (575, 610), False, 'import os\n'), ((2015, 2024), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (2021, 2024), True, 'import numpy as np\n'), ((384, 395), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (393, 395), False, 'import os\n')] |
import numpy as np
def collapse(probas, output_type = 'bin'):
"""
Parameters
----------
probas: An array of probablities.
output_type: If ternary, output can be -1, 0 and 1.
Returns
-------
b: An array of binary states, whose dimensionality is identical to probas.
"""
p = np.abs(probas)
b = (p >= np.random.rand(*p.shape)).astype(np.int)
return np.where(probas >= 0, b, {'bin': 0, 'ter': -b}[output_type])
def duoramp(x, low = None, high = None):
"""
Parameters
----------
x : Anything that can be transformed into an numpy array
low : A number, optional
The lowest value an entry in x can be. The default is None.
high : A number, optional
The highest value an entry in x can be. The default is None.
Returns
-------
y : A numpy array
The value of each element is between low and high.
"""
y = np.asarray(x)
if low is not None:
y[y < low] = low
if high is not None:
y[y > high] = high
return y
def logistic(x, temperature = 1.0):
"""
Parameters
----------
x: A numeric array.
temperature: A non-negative number controlling the slope of the function.
Returns
-------
y: The value of the function, which is often used as a probability.
-------
The function is numerically stable for very big/small values.
"""
_x = np.asarray(x)
if temperature == 0: # The logistic function is reduced to a step function.
y = np.zeros(_x.shape)
y[_x > 0] = 1.0
y[_x == 0] = 0.5
else:
norx = _x / temperature
mask_p = norx > 0
mask_n = norx < 0
y = np.ones_like(norx)
y[mask_p] = 1 / (1 + np.exp(-norx[mask_p]))
# positive x gives small exp(-x): 1<denom<2
z = np.zeros_like(y[mask_n])
z = np.exp(norx[mask_n])
y[mask_n] = z / (1 + z)
# negative x gives small exp(x)=z: 1<denom<2
return y
def softmax(x, temperature = 1.0):
"""
Parameters
----------
x: A two-dimensional numeric array; each row is a distribution of activations.
temperature: A non-negative number controlling the slope of the function.
Returns
-------
P: The value of the function, which is often used as a probability. Each row adds up to 1.
-------
The function is numerically stable for very big/small values.
"""
# normalise x so that the biggest value is 0; this stablises np.exp
norx = x - np.amax(x, axis = 1).reshape(len(x), 1)
_p = []
if temperature == 0:
for xrow in norx:
prow = np.zeros_like(xrow, dtype = float)
mask_max = xrow == 0
prow[mask_max] = 1 / np.count_nonzero(mask_max)
_p.append(prow)
else:
for xrow in norx:
e = np.exp(xrow / temperature)
prow = e / np.sum(e)
_p.append(prow)
P = np.reshape(_p, norx.shape)
return P
def entropy(P, base = None):
"""
Parameters
----------
P: A two-dimensional numeric array; each row is a probability distribution.
base: The logarithmic base when calculating entropy with the default value being e.
Returns
-------
H: The entropy of each distribution in P.
"""
norp = P / np.sum(P, axis = 1).reshape(len(P), 1)
norp[norp == 0] = 1 # plog(p) = when p = 0 or 1
if base is None:
denom = 1
else:
denom = np.log(base)
logp = np.log(norp) / denom
H = np.asarray([-np.dot(ps, logps) for ps, logps in zip(norp, logp)])
return H
def kl_divergence(p, q, base = np.e):
return np.sum(np.where(p != 0, p * np.log(p / q), 0)) / np.log(base) | [
"numpy.zeros_like",
"numpy.abs",
"numpy.ones_like",
"numpy.log",
"numpy.count_nonzero",
"numpy.sum",
"numpy.asarray",
"numpy.zeros",
"numpy.amax",
"numpy.where",
"numpy.reshape",
"numpy.exp",
"numpy.random.rand",
"numpy.dot"
] | [((319, 333), 'numpy.abs', 'np.abs', (['probas'], {}), '(probas)\n', (325, 333), True, 'import numpy as np\n'), ((400, 460), 'numpy.where', 'np.where', (['(probas >= 0)', 'b', "{'bin': 0, 'ter': -b}[output_type]"], {}), "(probas >= 0, b, {'bin': 0, 'ter': -b}[output_type])\n", (408, 460), True, 'import numpy as np\n'), ((930, 943), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (940, 943), True, 'import numpy as np\n'), ((1436, 1449), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1446, 1449), True, 'import numpy as np\n'), ((2991, 3017), 'numpy.reshape', 'np.reshape', (['_p', 'norx.shape'], {}), '(_p, norx.shape)\n', (3001, 3017), True, 'import numpy as np\n'), ((1542, 1560), 'numpy.zeros', 'np.zeros', (['_x.shape'], {}), '(_x.shape)\n', (1550, 1560), True, 'import numpy as np\n'), ((1729, 1747), 'numpy.ones_like', 'np.ones_like', (['norx'], {}), '(norx)\n', (1741, 1747), True, 'import numpy as np\n'), ((1864, 1888), 'numpy.zeros_like', 'np.zeros_like', (['y[mask_n]'], {}), '(y[mask_n])\n', (1877, 1888), True, 'import numpy as np\n'), ((1901, 1921), 'numpy.exp', 'np.exp', (['norx[mask_n]'], {}), '(norx[mask_n])\n', (1907, 1921), True, 'import numpy as np\n'), ((3521, 3533), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (3527, 3533), True, 'import numpy as np\n'), ((3545, 3557), 'numpy.log', 'np.log', (['norp'], {}), '(norp)\n', (3551, 3557), True, 'import numpy as np\n'), ((3752, 3764), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (3758, 3764), True, 'import numpy as np\n'), ((2687, 2719), 'numpy.zeros_like', 'np.zeros_like', (['xrow'], {'dtype': 'float'}), '(xrow, dtype=float)\n', (2700, 2719), True, 'import numpy as np\n'), ((2895, 2921), 'numpy.exp', 'np.exp', (['(xrow / temperature)'], {}), '(xrow / temperature)\n', (2901, 2921), True, 'import numpy as np\n'), ((348, 372), 'numpy.random.rand', 'np.random.rand', (['*p.shape'], {}), '(*p.shape)\n', (362, 372), True, 'import numpy as np\n'), ((1777, 1798), 'numpy.exp', 'np.exp', (['(-norx[mask_p])'], {}), '(-norx[mask_p])\n', (1783, 1798), True, 'import numpy as np\n'), ((2565, 2583), 'numpy.amax', 'np.amax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (2572, 2583), True, 'import numpy as np\n'), ((2788, 2814), 'numpy.count_nonzero', 'np.count_nonzero', (['mask_max'], {}), '(mask_max)\n', (2804, 2814), True, 'import numpy as np\n'), ((2945, 2954), 'numpy.sum', 'np.sum', (['e'], {}), '(e)\n', (2951, 2954), True, 'import numpy as np\n'), ((3365, 3382), 'numpy.sum', 'np.sum', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (3371, 3382), True, 'import numpy as np\n'), ((3587, 3604), 'numpy.dot', 'np.dot', (['ps', 'logps'], {}), '(ps, logps)\n', (3593, 3604), True, 'import numpy as np\n'), ((3731, 3744), 'numpy.log', 'np.log', (['(p / q)'], {}), '(p / q)\n', (3737, 3744), True, 'import numpy as np\n')] |
from typing import List, Optional
import numpy as np
import xarray as xr
from .parse_ad2cp import Ad2cpDataPacket, Field, HeaderOrDataRecordFormats
from .set_groups_base import SetGroupsBase, set_encodings
def merge_attrs(datasets: List[xr.Dataset]) -> List[xr.Dataset]:
"""
Merges attrs from a list of datasets.
Prioritizes keys from later datsets.
"""
total_attrs = dict()
for ds in datasets:
total_attrs.update(ds.attrs)
for ds in datasets:
ds.attrs = total_attrs
return datasets
class SetGroupsAd2cp(SetGroupsBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pulse_compressed = self.parser_obj.get_pulse_compressed()
self.combine_packets()
def combine_packets(self):
self.ds = None
# # TODO: where to put string data in output?
# pad raw samples so that "sample" dimenion has same length
max_samples = 0
for packet in self.parser_obj.echosounder_raw_packets:
# both _r and _i have same dimensions
max_samples = max(
max_samples, packet.data["echosounder_raw_samples_i"].shape[0]
)
for packet in self.parser_obj.echosounder_raw_packets:
packet.data["echosounder_raw_samples_i"] = np.pad(
packet.data["echosounder_raw_samples_i"],
((0, max_samples - packet.data["echosounder_raw_samples_i"].shape[0])),
)
packet.data["echosounder_raw_samples_q"] = np.pad(
packet.data["echosounder_raw_samples_q"],
((0, max_samples - packet.data["echosounder_raw_samples_q"].shape[0])),
)
def make_dataset(
packets: List[Ad2cpDataPacket], ping_time_dim: str
) -> Optional[xr.Dataset]:
for i in range(len(packets)):
packet = packets[i]
data_vars = dict()
for field_name, field_value in packet.data.items():
# add dimension names to data vars for xarray
# TODO might not work with altimeter_spare
field = HeaderOrDataRecordFormats.data_record_format(
packet.data_record_type
).get_field(field_name)
if field is not None:
dims = field.dimensions(packet.data_record_type)
units = field.units()
else:
dims = Field.default_dimensions()
units = None
if units:
data_vars[field_name] = (
tuple(dim.value for dim in dims),
[field_value],
{"Units": units},
)
else:
data_vars[field_name] = (
tuple(dim.value for dim in dims),
[field_value],
)
coords = {
"ping_time": [packet.timestamp],
ping_time_dim: [packet.timestamp],
}
if "beams" in packet.data_exclude:
coords["beam"] = packet.data_exclude["beams"]
new_packet = xr.Dataset(data_vars=data_vars, coords=coords)
# modify in place to reduce memory consumption
packets[i] = new_packet
if len(packets) > 0:
packets = merge_attrs(packets)
return xr.combine_by_coords(
packets,
data_vars="minimal",
coords="minimal",
combine_attrs="override",
)
else:
return None
burst_ds = make_dataset(
self.parser_obj.burst_packets, ping_time_dim="ping_time_burst"
)
average_ds = make_dataset(
self.parser_obj.average_packets, ping_time_dim="ping_time_average"
)
echosounder_ds = make_dataset(
self.parser_obj.echosounder_packets, ping_time_dim="ping_time_echosounder"
)
echosounder_raw_ds = make_dataset(
self.parser_obj.echosounder_raw_packets,
ping_time_dim="ping_time_echosounder_raw",
)
echosounder_raw_transmit_ds = make_dataset(
self.parser_obj.echosounder_raw_transmit_packets,
ping_time_dim="ping_time_echosounder_raw_transmit",
)
datasets = [
ds
for ds in (
burst_ds,
average_ds,
echosounder_ds,
echosounder_raw_ds,
echosounder_raw_transmit_ds,
)
if ds
]
for dataset in datasets:
if "offset_of_data" in dataset:
print(dataset["offset_of_data"])
datasets = merge_attrs(datasets)
self.ds = xr.merge(datasets)
def set_env(self) -> xr.Dataset:
ds = xr.Dataset(
data_vars={
"sound_speed_indicative": self.ds.get("speed_of_sound"),
"temperature": self.ds.get("temperature"),
"pressure": self.ds.get("pressure"),
},
coords={
"ping_time": self.ds.get("ping_time"),
"ping_time_burst": self.ds.get("ping_time_burst", []),
"ping_time_average": self.ds.get("ping_time_average", []),
"ping_time_echosounder": self.ds.get("ping_time_echosounder", []),
},
)
# FIXME: this is a hack because the current file saving
# mechanism requires that the env group have ping_time as a dimension,
# but ping_time might not be a dimension if the dataset is completely
# empty
if "ping_time" not in ds.dims:
ds = ds.expand_dims(dim="ping_time")
return set_encodings(ds)
def set_platform(self) -> xr.Dataset:
ds = xr.Dataset(
data_vars={
"heading": self.ds.get("heading"),
"pitch": self.ds.get("pitch"),
"roll": self.ds.get("roll"),
"magnetometer_raw_x": self.ds.get("magnetometer_raw_x"),
"magnetometer_raw_y": self.ds.get("magnetometer_raw_y"),
"magnetometer_raw_z": self.ds.get("magnetometer_raw_z"),
},
coords={
"ping_time": self.ds.get("ping_time"),
"ping_time_burst": self.ds.get("ping_time_burst"),
"ping_time_average": self.ds.get("ping_time_average"),
"ping_time_echosounder": self.ds.get("ping_time_echosounder"),
"beam": self.ds.get("beam"),
"range_bin_burst": self.ds.get("range_bin_burst"),
"range_bin_average": self.ds.get("range_bin_average"),
"range_bin_echosounder": self.ds.get("range_bin_echosounder"),
},
attrs={
"platform_name": self.ui_param["platform_name"],
"platform_type": self.ui_param["platform_type"],
"platform_code_ICES": self.ui_param["platform_code_ICES"],
},
)
return set_encodings(ds)
def set_beam(self) -> xr.Dataset:
# TODO: should we divide beam into burst/average (e.g., beam_burst, beam_average)
# like was done for range_bin (we have range_bin_burst, range_bin_average,
# and range_bin_echosounder)?
data_vars = {
"number_of_beams": self.ds.get("num_beams"),
"coordinate_system": self.ds.get("coordinate_system"),
"number_of_cells": self.ds.get("num_cells"),
"blanking": self.ds.get("blanking"),
"cell_size": self.ds.get("cell_size"),
"velocity_range": self.ds.get("velocity_range"),
"echosounder_frequency": self.ds.get("echosounder_frequency"),
"ambiguity_velocity": self.ds.get("ambiguity_velocity"),
"data_set_description": self.ds.get("dataset_description"),
"transmit_energy": self.ds.get("transmit_energy"),
"velocity_scaling": self.ds.get("velocity_scaling"),
"velocity_burst": self.ds.get("velocity_data_burst"),
"velocity_average": self.ds.get("velocity_data_average"),
# "velocity_echosounder": self.ds.get("velocity_data_echosounder"),
"amplitude_burst": self.ds.get("amplitude_data_burst"),
"amplitude_average": self.ds.get("amplitude_data_average"),
# "amplitude_echosounder": self.ds.get("amplitude_data_echosounder"),
"correlation_burst": self.ds.get("correlation_data_burst"),
"correlation_average": self.ds.get("correlation_data_average"),
"correlation_echosounder": self.ds.get("correlation_data_echosounder"),
# "echosounder": self.ds.get("echosounder_data"),
"amplitude_echosounder": self.ds.get("echosounder_data"),
"figure_of_merit": self.ds.get("figure_of_merit_data"),
"altimeter_distance": self.ds.get("altimeter_distance"),
"altimeter_quality": self.ds.get("altimeter_quality"),
"ast_distance": self.ds.get("ast_distance"),
"ast_quality": self.ds.get("ast_quality"),
"ast_offset_100us": self.ds.get("ast_offset_100us"),
"ast_pressure": self.ds.get("ast_pressure"),
"altimeter_spare": self.ds.get("altimeter_spare"),
"altimeter_raw_data_num_samples": self.ds.get(
"altimeter_raw_data_num_samples"
),
"altimeter_raw_data_sample_distance": self.ds.get(
"altimeter_raw_data_sample_distance"
),
"altimeter_raw_data_samples": self.ds.get("altimeter_raw_data_samples"),
}
ds = xr.Dataset(
data_vars=data_vars,
coords={
"ping_time": self.ds.get("ping_time"),
"ping_time_burst": self.ds.get("ping_time_burst"),
"ping_time_average": self.ds.get("ping_time_average"),
"ping_time_echosounder": self.ds.get("ping_time_echosounder"),
"beam": self.ds.get("beam"),
"range_bin_burst": self.ds.get("range_bin_burst"),
"range_bin_average": self.ds.get("range_bin_average"),
"range_bin_echosounder": self.ds.get("range_bin_echosounder"),
"altimeter_sample_bin": self.ds.get("altimeter_sample_bin"),
},
attrs={"pulse_compressed": self.pulse_compressed},
)
# FIXME: this is a hack because the current file saving
# mechanism requires that the beam group have ping_time as a dimension,
# but ping_time might not be a dimension if the dataset is completely
# empty
if "ping_time" not in ds.dims:
ds = ds.expand_dims(dim="ping_time")
return set_encodings(ds)
def set_vendor(self) -> xr.Dataset:
attrs = {
"pressure_sensor_valid": self.ds.get("pressure_sensor_valid"),
"temperature_sensor_valid": self.ds.get("temperature_sensor_valid"),
"compass_sensor_valid": self.ds.get("compass_sensor_valid"),
"tilt_sensor_valid": self.ds.get("tilt_sensor_valid"),
}
attrs = {
field_name: field_value.data[0]
for field_name, field_value in attrs.items()
if field_value is not None
}
ds = xr.Dataset(
data_vars={
"data_record_version": self.ds.get("version"),
"error": self.ds.get("error"),
"status": self.ds.get("status"),
"status0": self.ds.get("status0"),
"battery_voltage": self.ds.get("battery_voltage"),
"power_level": self.ds.get("power_level"),
"temperature_of_pressure_sensor": self.ds.get(
"temperature_from_pressure_sensor"
),
"nominal_correlation": self.ds.get("nominal_correlation"),
"magnetometer_temperature": self.ds.get("magnetometer_temperature"),
"real_ping_time_clock_temperature": self.ds.get(
"real_ping_time_clock_temperature"
),
"ensemble_counter": self.ds.get("ensemble_counter"),
"ahrs_rotation_matrix_mij": (
"mij",
[
self.ds.get("ahrs_rotation_matrix_m11"),
self.ds.get("ahrs_rotation_matrix_m12"),
self.ds.get("ahrs_rotation_matrix_m13"),
self.ds.get("ahrs_rotation_matrix_m21"),
self.ds.get("ahrs_rotation_matrix_m22"),
self.ds.get("ahrs_rotation_matrix_m23"),
self.ds.get("ahrs_rotation_matrix_m31"),
self.ds.get("ahrs_rotation_matrix_m32"),
self.ds.get("ahrs_rotation_matrix_m33"),
],
),
"ahrs_quaternions_wxyz": (
"wxyz",
[
self.ds.get("ahrs_quaternions_w"),
self.ds.get("ahrs_quaternions_x"),
self.ds.get("ahrs_quaternions_y"),
self.ds.get("ahrs_quaternions_z"),
],
),
"ahrs_gyro_xyz": (
"xyz",
[
self.ds.get("ahrs_gyro_x"),
self.ds.get("ahrs_gyro_y"),
self.ds.get("ahrs_gyro_z"),
],
),
"percentage_good_data": self.ds.get("percentage_good_data"),
"std_dev_pitch": self.ds.get("std_dev_pitch"),
"std_dev_roll": self.ds.get("std_dev_roll"),
"std_dev_heading": self.ds.get("std_dev_heading"),
"std_dev_pressure": self.ds.get("std_dev_pressure"),
"echosounder_raw_samples_i": self.ds.get("echosounder_raw_samples_i"),
"echosounder_raw_samples_q": self.ds.get("echosounder_raw_samples_q"),
"echosounder_raw_transmit_samples_i": self.ds.get(
"echosounder_raw_transmit_samples_i"
),
"echosounder_raw_transmit_samples_q": self.ds.get(
"echosounder_raw_transmit_samples_q"
),
"echosounder_raw_beam": self.ds.get("echosounder_raw_beam"),
"echosounder_raw_echogram": self.ds.get("echosounder_raw_echogram"),
},
coords={
"ping_time": self.ds.get("ping_time"),
"ping_time_burst": self.ds.get("ping_time_burst"),
"ping_time_average": self.ds.get("ping_time_average"),
"ping_time_echosounder": self.ds.get("ping_time_echosounder"),
"ping_time_echosounder_raw": self.ds.get("ping_time_echosounder_raw"),
"ping_time_echosounder_raw_transmit": self.ds.get(
"ping_time_echosounder_raw_transmit"
),
"sample": self.ds.get("sample"),
"sample_transmit": self.ds.get("sample_transmit"),
"beam": self.ds.get("beam"),
"range_bin_average": self.ds.get("range_bin_average"),
"range_bin_burst": self.ds.get("range_bin_burst"),
"range_bin_echosounder": self.ds.get("range_bin_echosounder"),
},
attrs={**attrs, "pulse_compressed": self.pulse_compressed},
)
ds = ds.reindex(
{
"mij": np.array(["11", "12", "13", "21", "22", "23", "31", "32", "33"]),
"wxyz": np.array(["w", "x", "y", "z"]),
"xyz": np.array(["x", "y", "z"]),
}
)
# FIXME: this is a hack because the current file saving
# mechanism requires that the vendor group have ping_time as a dimension,
# but ping_time might not be a dimension if the dataset is completely
# empty
if "ping_time" not in ds.dims:
ds = ds.expand_dims(dim="ping_time")
return set_encodings(ds)
def set_sonar(self) -> xr.Dataset:
ds = xr.Dataset(
attrs={
"sonar_manufacturer": "Nortek",
"sonar_model": "AD2CP",
"sonar_serial_number": "",
"sonar_software_name": "",
"sonar_software_version": "",
"sonar_firmware_version": "",
"sonar_type": "acoustic Doppler current profiler (ADCP)",
}
)
if "serial_number" in self.ds:
ds.attrs["sonar_serial_number"] = int(self.ds["serial_number"].data[0])
firmware_version = self.parser_obj.get_firmware_version()
if firmware_version is not None:
ds.attrs["sonar_firmware_version"] = ", ".join(
[f"{k}:{v}" for k, v in firmware_version.items()]
)
return ds
| [
"numpy.pad",
"xarray.combine_by_coords",
"xarray.Dataset",
"xarray.merge",
"numpy.array"
] | [((5034, 5052), 'xarray.merge', 'xr.merge', (['datasets'], {}), '(datasets)\n', (5042, 5052), True, 'import xarray as xr\n'), ((16546, 16804), 'xarray.Dataset', 'xr.Dataset', ([], {'attrs': "{'sonar_manufacturer': 'Nortek', 'sonar_model': 'AD2CP',\n 'sonar_serial_number': '', 'sonar_software_name': '',\n 'sonar_software_version': '', 'sonar_firmware_version': '',\n 'sonar_type': 'acoustic Doppler current profiler (ADCP)'}"}), "(attrs={'sonar_manufacturer': 'Nortek', 'sonar_model': 'AD2CP',\n 'sonar_serial_number': '', 'sonar_software_name': '',\n 'sonar_software_version': '', 'sonar_firmware_version': '',\n 'sonar_type': 'acoustic Doppler current profiler (ADCP)'})\n", (16556, 16804), True, 'import xarray as xr\n'), ((1318, 1441), 'numpy.pad', 'np.pad', (["packet.data['echosounder_raw_samples_i']", "(0, max_samples - packet.data['echosounder_raw_samples_i'].shape[0])"], {}), "(packet.data['echosounder_raw_samples_i'], (0, max_samples - packet.\n data['echosounder_raw_samples_i'].shape[0]))\n", (1324, 1441), True, 'import numpy as np\n'), ((1541, 1664), 'numpy.pad', 'np.pad', (["packet.data['echosounder_raw_samples_q']", "(0, max_samples - packet.data['echosounder_raw_samples_q'].shape[0])"], {}), "(packet.data['echosounder_raw_samples_q'], (0, max_samples - packet.\n data['echosounder_raw_samples_q'].shape[0]))\n", (1547, 1664), True, 'import numpy as np\n'), ((3355, 3401), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': 'data_vars', 'coords': 'coords'}), '(data_vars=data_vars, coords=coords)\n', (3365, 3401), True, 'import xarray as xr\n'), ((3609, 3707), 'xarray.combine_by_coords', 'xr.combine_by_coords', (['packets'], {'data_vars': '"""minimal"""', 'coords': '"""minimal"""', 'combine_attrs': '"""override"""'}), "(packets, data_vars='minimal', coords='minimal',\n combine_attrs='override')\n", (3629, 3707), True, 'import xarray as xr\n'), ((15934, 15998), 'numpy.array', 'np.array', (["['11', '12', '13', '21', '22', '23', '31', '32', '33']"], {}), "(['11', '12', '13', '21', '22', '23', '31', '32', '33'])\n", (15942, 15998), True, 'import numpy as np\n'), ((16024, 16054), 'numpy.array', 'np.array', (["['w', 'x', 'y', 'z']"], {}), "(['w', 'x', 'y', 'z'])\n", (16032, 16054), True, 'import numpy as np\n'), ((16079, 16104), 'numpy.array', 'np.array', (["['x', 'y', 'z']"], {}), "(['x', 'y', 'z'])\n", (16087, 16104), True, 'import numpy as np\n')] |
''' Model utilities '''
import random
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow.keras import backend as K
from tensorflow import GradientTape
from tensorflow.keras.models import Model
from tensorflow import cast, reduce_mean
from sklearn.metrics import roc_auc_score, roc_curve
def Weighted_Loss(classes, epsilon=1e-7):
'''
Weighted_Loss function -- genrates a loss function
parameters:
y_true: Ground truth for all classes, as an Numpy Array. Either one or two
dimensional array.
epsilon: smoothing value to prevent division by 0 exceptions. Default 1e-7.
returns:
Loss function
effects:
None
'''
neg_w = np.sum(classes, axis=0) / classes.shape[0]
pos_w = 1 - neg_w
if len(classes.shape) < 2:
neg_w = np.array([neg_w])
pos_w = np.array([pos_w])
def weighted_loss(y_true, y_pred):
'''
Weighted Binary Crossentropy Loss (for multiple classes)
parameters:
y_true: Ground truth for all classes
y_pred: Predicted classes
Both are Numpy array, one or two dimensions, as one-hot encoded labels.
returns:
weighted_loss (as scalar value)
'''
loss = 0
for i in range(len(pos_w)):
pos_loss = -1 * K.mean(pos_w[i] * y_true[:,i] * K.log(y_pred[:,i] + epsilon))
neg_loss = -1 * K.mean(neg_w[i] * (1 - y_true[:,i]) *
K.log(1 - y_pred[:,i] + epsilon))
loss += pos_loss + neg_loss
return loss
return weighted_loss
def get_dice_loss(epsilon=1e-7):
'''
Get dice loss function
parameter:
epsilon: smoothing, default 1e-7
returns:
dice loss function
'''
def dice_loss(y_true, y_pred):
dice_numerator = 2 * K.sum(y_true * y_pred, axis=(1, 2)) + epsilon
dice_denominator = (K.sum(K.pow(y_true, 2), axis=(1, 2)) +
K.sum(K.pow(y_pred, 2), axis=(1, 2)) + epsilon)
loss = 1 - K.mean(dice_numerator / dice_denominator)
return loss
return dice_loss
def model_metrics(y_true, y_pred, labels):
'''
Calculate metrics (accuracy, sensitivity, specificity, ppv, roc curves and
auc scores) for each class.
Paremeters:
y_true: Ground truth for all classes
y_pred: Predicted classes
Both are Numpy array, one or two dimensions, as one-hot encoded labels.
labels: array of the labels (as strings)
returns: nested dictionary
{classA: {'accuracy': accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'auc_score': auc_score,
'roc_curve': {'tpr': tpr, 'fpr': fpr},
classB: ...}
effects:
None
'''
metrics = []
roc_curves = []
for i in range(len(labels)):
# tp, fp, tn, fn
tp = np.sum((y_true[:,i] == 1) & (y_pred[:,i] == 1))
fp = np.sum((y_true[:,i] == 0) & (y_pred[:,i] == 1))
tn = np.sum((y_true[:,i] == 0) & (y_pred[:,i] == 0))
fn = np.sum((y_true[:,i] == 1) & (y_pred[:,i] == 0))
# sensitivity, specificity
accuracy = (tp + tn) / (tp + tn + fp + fn)
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
# Calculate PPV according to Bayes Theorem
prev = np.sum(y_true[:,i]) / len(y_true[:,i])
numerator = sensitivity * prev
denominator = sensitivity * prev + (1 - specificity) * (1 - prev)
ppv = numerator / denominator
#claculate ROC and AUC
fpr, tpr, _ = roc_curve(y_true[:,i], y_pred[:,i])
auc_score = roc_auc_score(y_true[:,i], y_pred[:,i])
metrics.append([accuracy, sensitivity, specificity, ppv, auc_score])
roc_curves.append([fpr, tpr])
df = pd.DataFrame(metrics,
columns=['Accuracy', 'Sensitivity', 'Specificity',
'PPV', 'Auc_score'],
index=labels)
return df, roc_curves
def dice_coeff(y_true, y_pred, epsilon=1e-7):
dice_numerator = 2 * K.sum(y_true * y_pred, axis=(1, 2)) + epsilon
dice_denominator = (K.sum(y_true, axis=(1, 2)) +
K.sum(y_pred, axis=(1, 2)) + epsilon)
coeff = K.mean(dice_numerator / dice_denominator)
return coeff
def grad_cam(model, image, cls, layer_name, test=False):
'''
GradCAM method for visualizing input saliency.
parameters:
model: the model in use
image: a chosen image as array (w, h, 3)
cls: index of the labels
layer_name: layer of the model to obtain feature maps
returns:
class activation map for image, as image array
effects:
None
Thank you to https://gist.github.com/RaphaelMeudec/e9a805fa82880876f8d89766f0690b54
'''
grad_model = Model([model.inputs],
[model.get_layer(layer_name).output, model.output])
with GradientTape() as tape:
conv_outputs, predictions = grad_model([image])
loss = predictions[:, cls]
output = conv_outputs[0]
grads = tape.gradient(loss, conv_outputs)[0]
guided_grads = cast(output > 0, 'float32') * cast(grads > 0, 'float32') * grads
weights = reduce_mean(guided_grads, axis=(0, 1))
cam = np.ones(output.shape[0: 2], dtype = np.float32)
for i, w in enumerate(weights):
cam += w * output[:, :, i]
cam = cv2.resize(cam.numpy(), (image.shape[1], image.shape[2]))
cam = np.maximum(cam, 0)
heatmap = (cam - cam.min()) / (cam.max() - cam.min() + 1e-7)
cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_VIRIDIS)
output_image = cv2.addWeighted(cv2.cvtColor(image[0].astype('uint8'), cv2.COLOR_RGB2BGR),
0.5, cam, 1, 0)
return output_image
| [
"pandas.DataFrame",
"tensorflow.keras.backend.pow",
"numpy.uint8",
"numpy.maximum",
"numpy.sum",
"sklearn.metrics.roc_curve",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.mean",
"tensorflow.reduce_mean",
"numpy.ones",
"sklearn.metrics.roc_auc_score",
"tensorflow.cast",
"tensorfl... | [((3976, 4087), 'pandas.DataFrame', 'pd.DataFrame', (['metrics'], {'columns': "['Accuracy', 'Sensitivity', 'Specificity', 'PPV', 'Auc_score']", 'index': 'labels'}), "(metrics, columns=['Accuracy', 'Sensitivity', 'Specificity',\n 'PPV', 'Auc_score'], index=labels)\n", (3988, 4087), True, 'import pandas as pd\n'), ((4446, 4487), 'tensorflow.keras.backend.mean', 'K.mean', (['(dice_numerator / dice_denominator)'], {}), '(dice_numerator / dice_denominator)\n', (4452, 4487), True, 'from tensorflow.keras import backend as K\n'), ((5403, 5441), 'tensorflow.reduce_mean', 'reduce_mean', (['guided_grads'], {'axis': '(0, 1)'}), '(guided_grads, axis=(0, 1))\n', (5414, 5441), False, 'from tensorflow import cast, reduce_mean\n'), ((5453, 5497), 'numpy.ones', 'np.ones', (['output.shape[0:2]'], {'dtype': 'np.float32'}), '(output.shape[0:2], dtype=np.float32)\n', (5460, 5497), True, 'import numpy as np\n'), ((5652, 5670), 'numpy.maximum', 'np.maximum', (['cam', '(0)'], {}), '(cam, 0)\n', (5662, 5670), True, 'import numpy as np\n'), ((726, 749), 'numpy.sum', 'np.sum', (['classes'], {'axis': '(0)'}), '(classes, axis=0)\n', (732, 749), True, 'import numpy as np\n'), ((839, 856), 'numpy.array', 'np.array', (['[neg_w]'], {}), '([neg_w])\n', (847, 856), True, 'import numpy as np\n'), ((873, 890), 'numpy.array', 'np.array', (['[pos_w]'], {}), '([pos_w])\n', (881, 890), True, 'import numpy as np\n'), ((3039, 3088), 'numpy.sum', 'np.sum', (['((y_true[:, i] == 1) & (y_pred[:, i] == 1))'], {}), '((y_true[:, i] == 1) & (y_pred[:, i] == 1))\n', (3045, 3088), True, 'import numpy as np\n'), ((3100, 3149), 'numpy.sum', 'np.sum', (['((y_true[:, i] == 0) & (y_pred[:, i] == 1))'], {}), '((y_true[:, i] == 0) & (y_pred[:, i] == 1))\n', (3106, 3149), True, 'import numpy as np\n'), ((3161, 3210), 'numpy.sum', 'np.sum', (['((y_true[:, i] == 0) & (y_pred[:, i] == 0))'], {}), '((y_true[:, i] == 0) & (y_pred[:, i] == 0))\n', (3167, 3210), True, 'import numpy as np\n'), ((3222, 3271), 'numpy.sum', 'np.sum', (['((y_true[:, i] == 1) & (y_pred[:, i] == 0))'], {}), '((y_true[:, i] == 1) & (y_pred[:, i] == 0))\n', (3228, 3271), True, 'import numpy as np\n'), ((3754, 3791), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (3763, 3791), False, 'from sklearn.metrics import roc_auc_score, roc_curve\n'), ((3810, 3851), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (3823, 3851), False, 'from sklearn.metrics import roc_auc_score, roc_curve\n'), ((5110, 5124), 'tensorflow.GradientTape', 'GradientTape', ([], {}), '()\n', (5122, 5124), False, 'from tensorflow import GradientTape\n'), ((5764, 5787), 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), '(255 * heatmap)\n', (5772, 5787), True, 'import numpy as np\n'), ((2069, 2110), 'tensorflow.keras.backend.mean', 'K.mean', (['(dice_numerator / dice_denominator)'], {}), '(dice_numerator / dice_denominator)\n', (2075, 2110), True, 'from tensorflow.keras import backend as K\n'), ((3510, 3530), 'numpy.sum', 'np.sum', (['y_true[:, i]'], {}), '(y_true[:, i])\n', (3516, 3530), True, 'import numpy as np\n'), ((4272, 4307), 'tensorflow.keras.backend.sum', 'K.sum', (['(y_true * y_pred)'], {'axis': '(1, 2)'}), '(y_true * y_pred, axis=(1, 2))\n', (4277, 4307), True, 'from tensorflow.keras import backend as K\n'), ((4342, 4368), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(1, 2)'}), '(y_true, axis=(1, 2))\n', (4347, 4368), True, 'from tensorflow.keras import backend as K\n'), ((4396, 4422), 'tensorflow.keras.backend.sum', 'K.sum', (['y_pred'], {'axis': '(1, 2)'}), '(y_pred, axis=(1, 2))\n', (4401, 4422), True, 'from tensorflow.keras import backend as K\n'), ((5324, 5351), 'tensorflow.cast', 'cast', (['(output > 0)', '"""float32"""'], {}), "(output > 0, 'float32')\n", (5328, 5351), False, 'from tensorflow import cast, reduce_mean\n'), ((5354, 5380), 'tensorflow.cast', 'cast', (['(grads > 0)', '"""float32"""'], {}), "(grads > 0, 'float32')\n", (5358, 5380), False, 'from tensorflow import cast, reduce_mean\n'), ((1860, 1895), 'tensorflow.keras.backend.sum', 'K.sum', (['(y_true * y_pred)'], {'axis': '(1, 2)'}), '(y_true * y_pred, axis=(1, 2))\n', (1865, 1895), True, 'from tensorflow.keras import backend as K\n'), ((1940, 1956), 'tensorflow.keras.backend.pow', 'K.pow', (['y_true', '(2)'], {}), '(y_true, 2)\n', (1945, 1956), True, 'from tensorflow.keras import backend as K\n'), ((2008, 2024), 'tensorflow.keras.backend.pow', 'K.pow', (['y_pred', '(2)'], {}), '(y_pred, 2)\n', (2013, 2024), True, 'from tensorflow.keras import backend as K\n'), ((1372, 1401), 'tensorflow.keras.backend.log', 'K.log', (['(y_pred[:, i] + epsilon)'], {}), '(y_pred[:, i] + epsilon)\n', (1377, 1401), True, 'from tensorflow.keras import backend as K\n'), ((1509, 1542), 'tensorflow.keras.backend.log', 'K.log', (['(1 - y_pred[:, i] + epsilon)'], {}), '(1 - y_pred[:, i] + epsilon)\n', (1514, 1542), True, 'from tensorflow.keras import backend as K\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Raccoon
#
import math
import sys
from openbabel import openbabel as ob
import numpy as np
from . import obutils
def get_vector(coor1, coord2):
""" calculate normalized vector between atoms"""
vec = np.array([coord2[0] - coord1[0], coord2[1] - coord1[1], coord2[2] - coord1[2]], 'f')
return normalize(vec)
def vector(a, b):
"""
Return the vector between a and b
"""
return b - a
def resize_vector(v, length, origin=None):
""" Resize a vector v to a new length in regard to a origin """
if origin is not None:
return (normalize(v - origin) * length) + origin
else:
return normalize(v) * length
def get_vector_normal(vector):
"""return the first vector normal to the input vector"""
return np.array(vector[1], vector[0], vector[2])
def normalize(v):
""" numpy normalize"""
return v / np.sqrt(np.dot(v, v))
###def centroid(self, atomlist):
### """ calculate centroid """
### centroid = np.array([0., 0., 0.], 'f')
### for i in atomlist:
### centroid += obutils.getAtomCoord(p.getAtomCoord(i)
### return centroid/len(atomlist)
###
def averageCoords(coordList):
""" http://stackoverflow.com/questions/23020659/fastest-way-to-calculate-the-centroid-of-a-set-of-coordinate-tuples-in-python-wi"""
avg = np.zeros(3)
for c in coordList:
avg += c
return avg / len(coordList)
def calcPlaneVect(v1, v2, norm=True):
""" calculate plane defined by two numpy.vectors"""
# print "PLANE", v1, v2
plane = np.cross(v1, v2)
if not norm:
return plane
return normalize(plane)
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
http://stackoverflow.com/questions/6802577/python-rotation-of-3d-vector
"""
axis = np.asarray(axis)
theta = np.asarray(theta)
axis = axis/math.sqrt(np.dot(axis, axis))
a = math.cos(theta/2)
b, c, d = -axis*math.sin(theta/2)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def rotate_around_axis(vector, rot_axis, apply_point=[0., 0., 0.]):
"""
Rotate a vector around an axis (rot_axis) applied to the point apply_point (NEW)
Rotate a vector applied in apply_point around a pivot rot_axis ?
vector = vector that is rotated
rot_axis = vector around wich rotation is performed
?????? CHANGING THE INPUT VALUE?
"""
# From Ludo
# vector
x = vector[0]
y = vector[1]
z = vector[2]
# rotation pivot
u = rot_axis[0]
v = rot_axis[1]
w = rot_axis[2]
ux = u*x
uy = u*y
uz = u*z
vx = v*x
vy = v*y
vz = v*z
wx = w*x
wy = w*y
wz = w*z
sa = math.sin(rot_axis[3])
ca = math.cos(rot_axis[3])
#vector[0]=(u*(ux+vy+wz)+(x*(v*v+w*w)-u*(vy+wz))*ca+(-wy+vz)*sa)+ apply_point[0]
#vector[1]=(v*(ux+vy+wz)+(y*(u*u+w*w)-v*(ux+wz))*ca+(wx-uz)*sa)+ apply_point[1]
#vector[2]=(w*(ux+vy+wz)+(z*(u*u+v*v)-w*(ux+vy))*ca+(-vx+uy)*sa)+ apply_point[2]
p0 = (u*(ux+vy+wz)+(x*(v*v+w*w)-u*(vy+wz))*ca+(-wy+vz)*sa) + apply_point[0]
p1 = (v*(ux+vy+wz)+(y*(u*u+w*w)-v*(ux+wz))*ca+(wx-uz)*sa) + apply_point[1]
p2 = (w*(ux+vy+wz)+(z*(u*u+v*v)-w*(ux+vy))*ca+(-vx+uy)*sa) + apply_point[2]
#b = [vector, m, rot_axis]
return np.array([p0, p1, p2])
def rotation_axis(p0, p1, p2, origin=None):
"""
Compute rotation axis centered at the origin if not None
"""
r = normalize(np.cross(vector(p1, p0), vector(p2, p0)))
if origin is not None:
return origin + r
return p0 + r
def atom_to_move(o, p):
"""
Return the coordinates xyz of an atom just above acceptor/donor atom o
"""
# It will not work if there is just one dimension
p = np.atleast_2d(p)
return o + normalize(-1. * vector(o, np.mean(p, axis=0)))
def rotate_point(p, p1, p2, angle):
""" Rotate the point p around the axis p1-p2
Source: http://paulbourke.net/geometry/rotate/PointRotate.py"""
# Translate the point we want to rotate to the origin
pn = p - p1
# Get the unit vector from the axis p1-p2
n = p2 - p1
n = normalize(n)
# Setup the rotation matrix
c = np.cos(angle)
t = 1. - np.cos(angle)
s = np.sin(angle)
x, y, z = n[0], n[1], n[2]
R = np.array([[t*x**2 + c, t*x*y - s*z, t*x*z + s*y],
[t*x*y + s*z, t*y**2 + c, t*y*z - s*x],
[t*x*z - s*y, t*y*z + s*x, t*z**2 + c]])
# ... and apply it
ptr = np.dot(pn, R)
# And to finish, we put it back
p = ptr + p1
return p
def getVecNormalToVec(vec):
"""
calculate a vector that is normal to the numpy.array vector input
Source: http://forums.create.msdn.com/forums/p/9551/50048.aspx
A coworker pointed out a trick to get a vector perpendicular to the normal vector:
simply swap two of the values, negate one of those, and zero the third.
So, if I have a normal vector of form Vector3(a, b, c), then one such vector that
is perpendicular to it is Vector3(b, -a, 0). Thus, there are six possible vectors
that are attainable by this method. The only trouble case is when the normal vector
contains elements whose values are zero, in which case you have to be a bit careful
which values you swap and negate. You just never want to end up with the zero vector.
n n 0
n 0 n
0 n n
n 0 0
0 n 0
n n n
0 0 0
if np.array([0., 0., 0.], 'f') == vec:
print "Warning: zero vector, no normal possible"
return vec
x,y,z = vec
zero = [ a == 0 for a in vec ]
idx = range(3)
swap1, swap2, zero = None, None, None
for i in range(3):
if not vec[i] == 0:
negate = -vec[i]
else:
swap1 =
"""
if (not vec[1] == 0) or (not vec[2] == 0):
c = np.array([1.,0.,0.], 'f')
else:
c = np.array([0.,1.,0.], 'f')
return calcPlane(vec, c, norm=True)
def calcDihedral(a1, a2, a3, a4):
""" given 4 set of coordinates return the dihedral
angle between them
"""
v1 = vector(a1, a2)
v2 = vector(a3, a2)
v3 = vector(a3, a4)
v4 = np.cross(v1, v2)
v5 = np.cross(v2, v4)
try:
dihe = math.atan2(np.dot(v3,v4), np.dot(v3,v5) * math.sqrt(np.dot(v2,v2)))
except ZeroDivisionError:
dihe = 0.
return dihe
def makeCircleOnPlane(center, r, normal, points = 8):
"""
Calculate the points of a circle lying on an arbitrary plane
defined by the normal vector.
center : coords of center of the circle
r : radius
normal : normal of the plane where the circle lies
points : number of points for the circle
# http://www.physicsforums.com/showthread.php?t=123168
# P = Rcos(theta))U + Rsin(theta)N x U +c
# Where u is a unit vector from the centre of the circle
# to any point on the circumference; R is the radius;
# n is a unit vector perpendicular to the plane and c is the centre of the circle.
http://forums.create.msdn.com/forums/p/9551/50048.aspx
A coworker pointed out a trick to get a vector perpendicular to the normal vector:
simply swap two of the values, negate one of those, and zero the third.
So, if I have a normal vector of form Vector3(a, b, c), then one such vector that
is perpendicular to it is Vector3(b, -a, 0). Thus, there are six possible vectors
that are attainable by this method. The only trouble case is when the normal vector
contains elements whose values are zero, in which case you have to be a bit careful
which values you swap and negate. You just never want to end up with the zero vector.
"""
N = normal
U = array([N[1], -N[0], 0], 'f')
step = PI2/points
circle = []
for i in range(points):
theta = PI2-(step*i)
P = (r*cos(theta)*U)+(r*sin(theta))*(cross(N,U))+center
P = normalize(vector(center,P))*r
P = vecSum(P,center)
circle.append(P)
return circle
def quickdist(f,s,sq = False):
""" works with coordinates/vectors"""
try:
d=(f[0]-s[0])**2 + (f[1]-s[1])**2 + (f[2]-s[2])**2
if sq: return math.sqrt(d)
else: return d
except:
print("First", f)
print("Second", s)
print("WARNING! missing coordinates", sys.exc_info()[1])
raise Exception
#return None
def atomsToVector(at1, at2=None, norm=0):
at1 = atomCoord(at1)
if at2: at2 = atomCoord(at2)
return vector(at1, at2, norm=norm)
def vector(p1 , p2 = None, norm = 0): # TODO use Numpy?
if not p2 is None:
vec = np.array([p2[0]-p1[0],p2[1]-p1[1],p2[2]-p1[2]],'f')
else:
vec = np.array([p1[0], p1[1], p1[2] ], 'f' )
if norm:
return normalize(vec)
else:
return vec
def norm(A): # TODO use Numpy
"Return vector norm"
return np.sqrt(sum(A*A))
def normalize(A): # TODO use Numpy
"Normalize the Vector"
return A/norm(A)
def calcPlane(p1, p2, p3):
# returns the plane containing the 3 input points
v12 = vector(p1,p2)
v13 = vector(p3,p2)
return normalize(np.cross(v12, v13))
def dot(vector1, vector2): # TODO remove and use Numpy
dot_product = 0.
for i in range(0, len(vector1)):
dot_product += (vector1[i] * vector2[i])
return dot_product
def vecAngle(v1, v2, rad=1): # TODO remove and use Numpy?
angle = dot(normalize(v1), normalize(v2))
if np.array_equal(v1, v2):
return 0
try:
if rad:
return math.acos(angle)
else:
return math.degrees(math.acos(angle))
except ValueError:
print("#vecAngle> CHECK TrottNormalization", v1, v2, sys.exc_info()[1])
return 0
def absoluteAngleDifference(angle1, angle2, rad=1):
""" https://gamedev.stackexchange.com/questions/4467/comparing-angles-and-working-out-the-difference"""
ref = 180
if rad:
ref = math.radians(180)
diff = ref - abs( abs(angle1 - angle2) - ref)
return diff
def vecSum(vec1, vec2): # TODO remove and use Numpy # TODO to be used in the PDBQT+ data!
return array([vec1[0]+vec2[0], vec1[1]+vec2[1], vec1[2]+vec2[2] ], 'f')
def normValue(v, vmin, vmax, normrange=[0,10]):
# http://mathforum.org/library/drmath/view/60433.html
# min = A
# max = B
# v = x
# y = 1 + (x-A)*(10-1)/(B-A)
#return 1 + (v-vmin)*(10-1)/(vmax-vmin)
return normrange[0] + (v-vmin)*( normrange[1] )/(vmax-vmin)
#top = (v-vmin)(10-1)
#down = (vmax-vmin)
#x = 1 + top/down
#return x
def normProduct(a, b, mode = 'simple'):
if mode =='simple': return a*b
elif mode =='scaled': return (a*b)*(a+b)
def avgVector_untested(vec_list, normalize=False):
# XXX NOT WORKING!!!
# http://devmaster.net/forums/topic/5443-average-direction-vector/
#weight = 1;
#average = vec[0];
#for (i = 1; i < n; ++i)
#{
# find angle between average and vec[i];
# angle *= weight / (weight + 1);
# average = rotate vec[i] towards average by angle;
# ++weight;
#}
print("avgVector> NOT WORKING!!!! NEVER TESTED")
weight = 1
average = vec_list[0]
for i in range(len(vec_list) - 1):
angle = vecAngle(average, vec_list[i + 1])
angle *= weight / (weight + 1)
average = rotate_around_axis(vec_list[i + 1], m, ax)
# XXX m?
# XXX ax?
weight += 1
return average
def averageVector(vectorList, norm=True):
""" """
vector = np.array([0.,0.,0.], 'f')
for v in vectorList:
vector += v
vector = vector/len(vectorList)
if norm:
vector = normalize(vector)
return vector
def coplanar(plane, coord_list = [], reference = [0., 0., 0.], tolerance = 0.2):
""" return list of coordinates that are within <tolerance>
from the plane. If the reference is provided, vectors will be
calculated with <reference> as origin.
"""
coplane_list = []
for c in coord_list:
pos = vector(reference, c)
if dot(plane, pos) <= tolerance:
coplane_list.append(c)
return coplane_list
def calcRingCentroidNormal(atomCoords):
""" extract aromatic ring geometric info from a numpy array """
a1 = atomCoords[0]
a2 = atomCoords[1]
a3 = atomCoords[2]
centroid = averageCoords(atomCoords)
plane = calcPlane(a1, a2, a3)
v1 = vector(centroid, a1)
v2 = vector(centroid, a2)
normal1 = normalize(np.cross(v1, v2))
normal2 = normalize(np.cross(v2, v1))
centroid_norm1 = normalize(vector(centroid, normal1))
centroid_norm2 = normalize(vector(centroid, normal2))
return {'centroid':centroid, 'plane':plane, 'normals':[normal1, normal2],
'centroid_normals':[centroid_norm1, centroid_norm2]}
def gaussian(x, ymax = 1., center=0., spread=0.7, invert=False):
""" simple gaussian function"""
if invert:
invert = -1
else:
invert = 1
return ymax * e **( -((x-center)**2/ (2*spread**2) ) ) * invert
def ellipticGaussian(coord, pseudo, planeVec, centroid=None, dist=None, ellipticity = 1.0,
g_ymax=1.0, g_center=0.0, g_spread=1.2, g_invert=True):
""" calculate elliptical gaussian potential based on the angle with the plane
____
.-- __.M
< P--'
`''----|
|
------O----- plane
"""
vec = vector(coord, pseudo)
theta = vecAngle( vec, planeVec)
if dist is None:
d = quickdist(coord, pseudo, sq=1)
else:
d = dist
d += (d * ellipticity * (math.fabs(math.cos(theta))))
## this code creates a concave cone-shaped ring
## d += -(d * ellipticity * (math.fabs(math.cos(theta))))
gvalue = gaussian(d, ymax = g_ymax, center = g_center,
spread=g_spread, invert=g_invert)
return gvalue
| [
"math.sqrt",
"math.radians",
"numpy.asarray",
"numpy.zeros",
"numpy.cross",
"math.sin",
"math.acos",
"numpy.sin",
"numpy.array",
"math.cos",
"numpy.cos",
"numpy.mean",
"numpy.array_equal",
"numpy.dot",
"sys.exc_info",
"numpy.atleast_2d"
] | [((261, 350), 'numpy.array', 'np.array', (['[coord2[0] - coord1[0], coord2[1] - coord1[1], coord2[2] - coord1[2]]', '"""f"""'], {}), "([coord2[0] - coord1[0], coord2[1] - coord1[1], coord2[2] - coord1[\n 2]], 'f')\n", (269, 350), True, 'import numpy as np\n'), ((811, 852), 'numpy.array', 'np.array', (['vector[1]', 'vector[0]', 'vector[2]'], {}), '(vector[1], vector[0], vector[2])\n', (819, 852), True, 'import numpy as np\n'), ((1358, 1369), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1366, 1369), True, 'import numpy as np\n'), ((1579, 1595), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (1587, 1595), True, 'import numpy as np\n'), ((1917, 1933), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (1927, 1933), True, 'import numpy as np\n'), ((1946, 1963), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (1956, 1963), True, 'import numpy as np\n'), ((2018, 2037), 'math.cos', 'math.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (2026, 2037), False, 'import math\n'), ((2183, 2354), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (2191, 2354), True, 'import numpy as np\n'), ((3013, 3034), 'math.sin', 'math.sin', (['rot_axis[3]'], {}), '(rot_axis[3])\n', (3021, 3034), False, 'import math\n'), ((3044, 3065), 'math.cos', 'math.cos', (['rot_axis[3]'], {}), '(rot_axis[3])\n', (3052, 3065), False, 'import math\n'), ((3602, 3624), 'numpy.array', 'np.array', (['[p0, p1, p2]'], {}), '([p0, p1, p2])\n', (3610, 3624), True, 'import numpy as np\n'), ((4060, 4076), 'numpy.atleast_2d', 'np.atleast_2d', (['p'], {}), '(p)\n', (4073, 4076), True, 'import numpy as np\n'), ((4493, 4506), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4499, 4506), True, 'import numpy as np\n'), ((4542, 4555), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4548, 4555), True, 'import numpy as np\n'), ((4596, 4782), 'numpy.array', 'np.array', (['[[t * x ** 2 + c, t * x * y - s * z, t * x * z + s * y], [t * x * y + s * z,\n t * y ** 2 + c, t * y * z - s * x], [t * x * z - s * y, t * y * z + s *\n x, t * z ** 2 + c]]'], {}), '([[t * x ** 2 + c, t * x * y - s * z, t * x * z + s * y], [t * x *\n y + s * z, t * y ** 2 + c, t * y * z - s * x], [t * x * z - s * y, t *\n y * z + s * x, t * z ** 2 + c]])\n', (4604, 4782), True, 'import numpy as np\n'), ((4795, 4808), 'numpy.dot', 'np.dot', (['pn', 'R'], {}), '(pn, R)\n', (4801, 4808), True, 'import numpy as np\n'), ((6463, 6479), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (6471, 6479), True, 'import numpy as np\n'), ((6489, 6505), 'numpy.cross', 'np.cross', (['v2', 'v4'], {}), '(v2, v4)\n', (6497, 6505), True, 'import numpy as np\n'), ((9743, 9765), 'numpy.array_equal', 'np.array_equal', (['v1', 'v2'], {}), '(v1, v2)\n', (9757, 9765), True, 'import numpy as np\n'), ((11812, 11842), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]', '"""f"""'], {}), "([0.0, 0.0, 0.0], 'f')\n", (11820, 11842), True, 'import numpy as np\n'), ((2056, 2075), 'math.sin', 'math.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (2064, 2075), False, 'import math\n'), ((4520, 4533), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4526, 4533), True, 'import numpy as np\n'), ((6141, 6171), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]', '"""f"""'], {}), "([1.0, 0.0, 0.0], 'f')\n", (6149, 6171), True, 'import numpy as np\n'), ((6189, 6219), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]', '"""f"""'], {}), "([0.0, 1.0, 0.0], 'f')\n", (6197, 6219), True, 'import numpy as np\n'), ((8912, 8972), 'numpy.array', 'np.array', (['[p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]', '"""f"""'], {}), "([p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]], 'f')\n", (8920, 8972), True, 'import numpy as np\n'), ((8988, 9024), 'numpy.array', 'np.array', (['[p1[0], p1[1], p1[2]]', '"""f"""'], {}), "([p1[0], p1[1], p1[2]], 'f')\n", (8996, 9024), True, 'import numpy as np\n'), ((9422, 9440), 'numpy.cross', 'np.cross', (['v12', 'v13'], {}), '(v12, v13)\n', (9430, 9440), True, 'import numpy as np\n'), ((10231, 10248), 'math.radians', 'math.radians', (['(180)'], {}), '(180)\n', (10243, 10248), False, 'import math\n'), ((12778, 12794), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (12786, 12794), True, 'import numpy as np\n'), ((12820, 12836), 'numpy.cross', 'np.cross', (['v2', 'v1'], {}), '(v2, v1)\n', (12828, 12836), True, 'import numpy as np\n'), ((923, 935), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (929, 935), True, 'import numpy as np\n'), ((1990, 2008), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (1996, 2008), True, 'import numpy as np\n'), ((6541, 6555), 'numpy.dot', 'np.dot', (['v3', 'v4'], {}), '(v3, v4)\n', (6547, 6555), True, 'import numpy as np\n'), ((8463, 8475), 'math.sqrt', 'math.sqrt', (['d'], {}), '(d)\n', (8472, 8475), False, 'import math\n'), ((9828, 9844), 'math.acos', 'math.acos', (['angle'], {}), '(angle)\n', (9837, 9844), False, 'import math\n'), ((13891, 13906), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (13899, 13906), False, 'import math\n'), ((6556, 6570), 'numpy.dot', 'np.dot', (['v3', 'v5'], {}), '(v3, v5)\n', (6562, 6570), True, 'import numpy as np\n'), ((9891, 9907), 'math.acos', 'math.acos', (['angle'], {}), '(angle)\n', (9900, 9907), False, 'import math\n'), ((4118, 4136), 'numpy.mean', 'np.mean', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (4125, 4136), True, 'import numpy as np\n'), ((6582, 6596), 'numpy.dot', 'np.dot', (['v2', 'v2'], {}), '(v2, v2)\n', (6588, 6596), True, 'import numpy as np\n'), ((8611, 8625), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8623, 8625), False, 'import sys\n'), ((9993, 10007), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (10005, 10007), False, 'import sys\n')] |
"""
Getting the data
================
In this section, we will dicuss how to get functional data to
use in scikit-fda. We will briefly describe the
:class:`~skfda.representation.grid.FDataGrid` class, which is the type that
scikit-fda uses for storing and working with functional data in discretized
form. We will discuss also how to import functional data from several sources
and show how to fetch and load existing datasets popular in the :term:`FDA`
literature.
.. Disable isort
isort:skip_file
"""
# Author: <NAME>
# License: MIT
#
# sphinx_gallery_thumbnail_number = 6
##############################################################################
# The FDataGrid class
# -------------------
#
# In order to use scikit-fda, first we need functional data to analyze.
# A common case is to have each functional observation measured at the same
# points.
# This kind of functional data is easily representable in scikit-fda using
# the :class:`~skfda.representation.grid.FDataGrid` class.
# The :class:`~skfda.representation.grid.FDataGrid` has two important
# attributes: ``data_matrix`` and ``grid_points``.
#
# The attribute ``grid_points`` is a tuple with the same length as the
# number of domain dimensions (that is, one for curves, two for surfaces...).
# Each of its elements is a 1D numpy :class:`~numpy.ndarray` containing the
# grid points for that particular dimension,
# .. math::
# ((t_1, \ldots, t_{M_i}))_{i=1}^p,
# where :math:`M_i` is the number of measurement points for each "argument"
# or domain coordinate of the function :math:`i` and :math:`p` is the domain
# dimension.
#
# The attribute ``data_matrix`` is a
# numpy :class:`~numpy.ndarray` containing the measured values of the
# functions in the grid spanned by the grid points. For functions
# :math:`\{x_i: \mathbb{R}^p \to \mathbb{R}^q\}_{i=1}^N` this is a tensor
# with dimensions :math:`N \times M_1 \times \ldots \times M_p \times q`.
##############################################################################
# In order to create a :class:`~skfda.representation.grid.FDataGrid`, these
# attributes may be provided. The attributes are converted to
# :class:`~numpy.ndarray` when necessary.
#
# .. note::
#
# The grid points can be omitted,
# and in that case their number is inferred from the dimensions of
# ``data_matrix`` and they are automatically assigned as equispaced points
# in the unitary cube in the domain set.
#
# In the common case of functions with domain dimension of 1, the list of
# grid points can be passed directly as ``grid_points``.
#
# If the codomain dimension is 1, the last dimension of ``data_matrix``
# can be dropped.
##############################################################################
# The following example shows the creation of a
# :class:`~skfda.representation.grid.FDataGrid` with two functions (curves)
# :math:`\{x_i: \mathbb{R} \to \mathbb{R}\}, i=1,2` measured at the same
# (non-equispaced) points.
import skfda
import matplotlib.pyplot as plt
grid_points = [0, 0.2, 0.5, 0.9, 1] # Grid points of the curves
data_matrix = [
[0, 0.2, 0.5, 0.9, 1], # First observation
[0, 0.04, 0.25, 0.81, 1], # Second observation
]
fd = skfda.FDataGrid(
data_matrix=data_matrix,
grid_points=grid_points,
)
fd.plot()
plt.show()
##############################################################################
# Advanced example
# ^^^^^^^^^^^^^^^^
#
# In order to better understand the FDataGrid structure, you can consider the
# following example, in which a :class:`~skfda.representation.grid.FDataGrid`
# object is created, containing just one function (vector-valued surface)
# :math:`x: \mathbb{R}^2 \to \mathbb{R}^4`.
grid_points_surface = [
[0.2, 0.5, 0.7], # Measurement points in first domain dimension
[0, 1.5], # Measurement points in second domain dimension
]
data_matrix_surface = [
# First observation
[
# 0.2
[
# Value at (0.2, 0)
[1, 2, 3, 4.1],
# Value at (0.2, 1.5)
[0, 1, -1.3, 2],
],
# 0.5
[
# Value at (0.5, 0)
[-2, 0, 5.5, 7],
# Value at (0.5, 1.5)
[2, 1.1, -1, -2],
],
# 0.7
[
# Value at (0.7, 0)
[0, 0, 1.1, 1],
# Value at (0.7, 1.5)
[-3, 5, -0.5, -2],
],
],
# This example has only one observation. Next observations would be
# added here.
]
fd = skfda.FDataGrid(
data_matrix=data_matrix_surface,
grid_points=grid_points_surface,
)
fd.plot()
plt.show()
##############################################################################
# Importing data
# --------------
#
# Usually one does not construct manually the functions, but instead uses
# measurements already formatted in a common format, such as comma-separated
# values (CSV), attribute-relation file format (ARFF) or Matlab and R formats.
#
# If your data is in one of these formats, you can import it into a numpy
# array using the IO functions available in
# `Numpy <https://numpy.org/devdocs/reference/routines.io.html>`_ (for simple
# text-based or binary formats, such as CSV) or in
# `Scipy <https://docs.scipy.org/doc/scipy/reference/io.html>`_ (for Matlab,
# Fortran or ARFF files). For importing data in the R format one can also
# use the package `RData <https://rdata.readthedocs.io>`_ with is already a
# dependency of scikit-fda, as it is used to load the example datasets.
##############################################################################
# Once your data has been introduced as a :class:`~numpy.ndarray` instance,
# you will need to give it the proper dimensions and use it to instantiate
# a functional data object.
##############################################################################
# .. note::
#
# :class:`Pandas DataFrames <pandas.DataFrame>` are also popular as
# datasets containers in the Python scientific ecosystem. If you have
# data in a Pandas DataFrame, you can extract its content as a Numpy
# array using the method :meth:`~pandas.DataFrame.to_numpy` of the
# DataFrame.
##############################################################################
# As an example, we will load the
# :func:`digits dataset <sklearn.datasets.load_digits>` of scikit-learn, which
# is a preprocessed subset of the MNIST dataset, containing digit images. The
# data is already a numpy array. As the data has been flattened into a 1D
# vector of pixels, we need to reshape the arrays to their original 8x8 shape.
# Then this array can be used to construct the digits as surfaces.
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
X = X.reshape(-1, 8, 8)
fd = skfda.FDataGrid(X)
# Plot the first 2 observations
fd[0].plot()
fd[1].plot()
plt.show()
##############################################################################
# Common datasets
# ---------------
#
# scikit-fda can download and import for you several of the most popular
# datasets in the :term:`FDA` literature, such as the Berkeley Growth
# dataset (function :func:`~skfda.datasets.fetch_growth`) or the Canadian
# Weather dataset (function :func:`~skfda.datasets.fetch_weather`). These
# datasets are often useful as benchmarks, in order to compare results
# between different algorithms, or simply as examples to use in teaching or
# research.
X, y = skfda.datasets.fetch_growth(return_X_y=True)
X.plot(group=y)
plt.show()
##############################################################################
# Datasets from CRAN
# ^^^^^^^^^^^^^^^^^^
#
# If you want to work with a dataset for which no fetching function exist, and
# you know that is available inside a R package in the CRAN repository, you
# can try using the function :func:`~skfda.datasets.fetch_cran`. This function
# will load the package, fetch the dataset and convert it to Python objects
# using the packages
# `scikit-datasets <https://github.com/daviddiazvico/scikit-datasets>`_ and
# `RData <https://rdata.readthedocs.io>`_. As datasets in CRAN follow no
# particular structure, you will need to know how it is structured internally
# in order to use it properly.
##############################################################################
# .. note::
#
# Functional data objects from some packages, such as
# `fda.usc <https://cran.r-project.org/web/packages/fda.usc/index.html>`_
# are automatically recognized as such and converted to
# :class:`~skfda.representation.grid.FDataGrid` instances. This
# behaviour can be disabled or customized to work with more packages.
data = skfda.datasets.fetch_cran("MCO", "fda.usc")
data["MCO"]["intact"].plot()
plt.show()
##############################################################################
# Datasets from the UEA & UCR Time Series Classification Repository
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The `UEA & UCR Time Series Classification Repository
# <http://www.timeseriesclassification.com/>`_ is a popular repository
# for classification problems involving time series data. The datasets used
# can be considered also as functional observations, where the functions
# involved have domain dimension of 1, and the grid points are
# equispaced. Thus, they have also been used in the :term:`FDA` literature.
# The original UCR datasets are univariate time series, while the new UEA
# datasets incorporate also vector-valued data.
# In scikit-fda, the function :func:`~skfda.datasets.fetch_ucr` can be used
# to obtain both kinds of datasets as
# :class:`~skfda.representation.grid.FDataGrid` instances.
# Load ArrowHead dataset from UCR
dataset = skfda.datasets.fetch_ucr("ArrowHead")
dataset["data"].plot()
plt.show()
##############################################################################
# Load BasicMotions dataset from UEA
dataset = skfda.datasets.fetch_ucr("BasicMotions")
dataset["data"].plot()
plt.show()
##############################################################################
# Synthetic data
# --------------
#
# Sometimes it is not enough to have real-world data at your disposal.
# Perhaps the messy nature of real-world data makes difficult to detect when
# a particular algorithm has a strange behaviour. Perhaps you want to see how
# it performs under a simplified model. Maybe you want to see what happens
# when your data has particular characteristics, for which no dataset is
# available. Or maybe you only want to illustrate a concept without having
# to introduce a particular set of data.
#
# In those cases, the ability to use generated data is desirable. To aid this
# use case, scikit-learn provides several functions that generate data
# according to some model. These functions are in the
# :doc:`datasets </modules/datasets>` module and have the prefix ``make_``.
# Maybe the most useful of those are the functions
# :func:`skfda.datasets.make_gaussian_process` and
# :func:`skfda.datasets.make_gaussian` which can be used to generate Gaussian
# processes and Gaussian fields with different covariance functions.
import numpy as np
cov = skfda.misc.covariances.Exponential(length_scale=0.1)
fd = skfda.datasets.make_gaussian_process(
start=0,
stop=4,
n_samples=5,
n_features=100,
mean=lambda t: np.power(t, 2),
cov=cov,
)
fd.plot()
plt.show()
##############################################################################
# In order to know all the available functionalities to load existing and
# synthetic datasets it is recommended to look at the documentation of the
# :doc:`datasets </modules/datasets>` module.
| [
"sklearn.datasets.load_digits",
"skfda.datasets.fetch_cran",
"matplotlib.pyplot.show",
"numpy.power",
"skfda.datasets.fetch_ucr",
"skfda.FDataGrid",
"skfda.misc.covariances.Exponential",
"skfda.datasets.fetch_growth"
] | [((3229, 3294), 'skfda.FDataGrid', 'skfda.FDataGrid', ([], {'data_matrix': 'data_matrix', 'grid_points': 'grid_points'}), '(data_matrix=data_matrix, grid_points=grid_points)\n', (3244, 3294), False, 'import skfda\n'), ((3317, 3327), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3325, 3327), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4621), 'skfda.FDataGrid', 'skfda.FDataGrid', ([], {'data_matrix': 'data_matrix_surface', 'grid_points': 'grid_points_surface'}), '(data_matrix=data_matrix_surface, grid_points=\n grid_points_surface)\n', (4550, 4621), False, 'import skfda\n'), ((4639, 4649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4647, 4649), True, 'import matplotlib.pyplot as plt\n'), ((6745, 6773), 'sklearn.datasets.load_digits', 'load_digits', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (6756, 6773), False, 'from sklearn.datasets import load_digits\n'), ((6804, 6822), 'skfda.FDataGrid', 'skfda.FDataGrid', (['X'], {}), '(X)\n', (6819, 6822), False, 'import skfda\n'), ((6882, 6892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6890, 6892), True, 'import matplotlib.pyplot as plt\n'), ((7470, 7514), 'skfda.datasets.fetch_growth', 'skfda.datasets.fetch_growth', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (7497, 7514), False, 'import skfda\n'), ((7532, 7542), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7540, 7542), True, 'import matplotlib.pyplot as plt\n'), ((8696, 8739), 'skfda.datasets.fetch_cran', 'skfda.datasets.fetch_cran', (['"""MCO"""', '"""fda.usc"""'], {}), "('MCO', 'fda.usc')\n", (8721, 8739), False, 'import skfda\n'), ((8770, 8780), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8778, 8780), True, 'import matplotlib.pyplot as plt\n'), ((9753, 9790), 'skfda.datasets.fetch_ucr', 'skfda.datasets.fetch_ucr', (['"""ArrowHead"""'], {}), "('ArrowHead')\n", (9777, 9790), False, 'import skfda\n'), ((9814, 9824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9822, 9824), True, 'import matplotlib.pyplot as plt\n'), ((9953, 9993), 'skfda.datasets.fetch_ucr', 'skfda.datasets.fetch_ucr', (['"""BasicMotions"""'], {}), "('BasicMotions')\n", (9977, 9993), False, 'import skfda\n'), ((10017, 10027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10025, 10027), True, 'import matplotlib.pyplot as plt\n'), ((11191, 11243), 'skfda.misc.covariances.Exponential', 'skfda.misc.covariances.Exponential', ([], {'length_scale': '(0.1)'}), '(length_scale=0.1)\n', (11225, 11243), False, 'import skfda\n'), ((11411, 11421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11419, 11421), True, 'import matplotlib.pyplot as plt\n'), ((11369, 11383), 'numpy.power', 'np.power', (['t', '(2)'], {}), '(t, 2)\n', (11377, 11383), True, 'import numpy as np\n')] |
# yolov4-tf2 wrapper
import tensorflow as tf
import pdb
##################
## Needed for my current setup
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
##################
# import keras
import keras
import sys
sys.path.insert(0, '../src/')
# import keras_retinanet
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
import cv2
import numpy as np
# Default classes:
labels_to_names = {0: 'Pedestrian', 1: 'Car'}
class retinanet_inference():
def __init__(self, weight_path, threshold=0.5, classe_filter=['Pedestrian', 'Car']):
# Model
self.weight_path = weight_path
self.threshold = threshold
self.model = None
self.classe_filter = classe_filter
self.classes_list = self.CLASSES = labels_to_names
self.label_limit = 1
# Load model
self.load_model()
# self.model.summary()
def load_model(self):
print("Loading model: {}".format(self.weight_path))
try:
self.model = models.load_model(self.weight_path, backbone_name='resnet50')
except Exception as e:
print("Unable to load weight file: {}".format(e))
print(" ...Done!")
def filter_prediction(self, boxes, scores, classes_pred):
"""Format prediction for tracking."""
clean_bboxs = []
clean_classes_pred = []
clean_scores = []
for bbox, score, cl in zip(boxes.tolist(), scores.tolist(), classes_pred.tolist()):
if cl > self.label_limit:
continue
if score > self.threshold:
label = self.CLASSES[cl]
bbox = list(map(int, bbox))
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
if area <= 1024:
print("area = {}".format(area))
continue
if label in self.classe_filter:
clean_bboxs.append(bbox)
clean_classes_pred.append(cl)
clean_scores.append(score)
return([clean_bboxs, clean_scores, clean_classes_pred])
def detect(self, cv2_image):
"""Run predict on the image after preprocessing."""
# Convert image for network
# img_inf = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
img_inf = preprocess_image(cv2_image)
img_inf, scale = resize_image(img_inf)
# Run inference
boxes, scores, labels = self.model.predict_on_batch(np.expand_dims(img_inf, axis=0))
# Convert bbox into image position
boxes /= scale
# Clean prediction output
bbox, scores, classes_pred = self.filter_prediction(boxes[0], scores[0], labels[0])
# Convert to Signate frame output here:
signate_detection = self.convert_to_signate(bbox, scores, classes_pred)
return([bbox, scores, classes_pred, signate_detection])
def convert_to_signate(self, bbox, scores, classes_pred):
"""Convert model output into signate frame format:
-input: bbox, scores, classes_pred
-ouput: {'Car':[],'pedestrian':[]}
"""
person_list = []
car_list = []
for bbox, score, cl in zip(bbox, scores, classes_pred):
if score > 0:
label = self.CLASSES[cl]
if label == "Pedestrian":
person_list.append({"box2d":bbox})
else:
car_list.append({"box2d":bbox})
# add in the frame (if not empty)
current_frame = {}
if car_list:
current_frame["Car"] = car_list
if person_list:
current_frame["Pedestrian"] = person_list
return(current_frame)
def display_on_frame(self, frame, boxes, scores, classes_pred):
"""Display all filtered bboxs and annotations on frame."""
for bbox, score, cl in zip(boxes, scores, classes_pred):
if score > 0:
label = self.CLASSES[cl]
color = (255, 0, 0) if label=="Pedestrian" else (0,0,255)
# Bbox processing
xmin, ymin, xmax, ymax = list(map(int, bbox))
# Filter classes
if label in self.classe_filter:
text = f'{self.CLASSES[cl]}: {score:0.2f}'
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), color, 4)
#cv2.putText(frame, text, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
return(frame)
| [
"keras_retinanet.utils.image.preprocess_image",
"sys.path.insert",
"tensorflow.config.experimental.set_memory_growth",
"keras_retinanet.models.load_model",
"numpy.expand_dims",
"tensorflow.config.experimental.list_physical_devices",
"keras_retinanet.utils.image.resize_image"
] | [((120, 171), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (164, 171), True, 'import tensorflow as tf\n'), ((319, 348), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../src/"""'], {}), "(0, '../src/')\n", (334, 348), False, 'import sys\n'), ((203, 257), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['device', '(True)'], {}), '(device, True)\n', (243, 257), True, 'import tensorflow as tf\n'), ((2497, 2524), 'keras_retinanet.utils.image.preprocess_image', 'preprocess_image', (['cv2_image'], {}), '(cv2_image)\n', (2513, 2524), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((2550, 2571), 'keras_retinanet.utils.image.resize_image', 'resize_image', (['img_inf'], {}), '(img_inf)\n', (2562, 2571), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((1165, 1226), 'keras_retinanet.models.load_model', 'models.load_model', (['self.weight_path'], {'backbone_name': '"""resnet50"""'}), "(self.weight_path, backbone_name='resnet50')\n", (1182, 1226), False, 'from keras_retinanet import models\n'), ((2657, 2688), 'numpy.expand_dims', 'np.expand_dims', (['img_inf'], {'axis': '(0)'}), '(img_inf, axis=0)\n', (2671, 2688), True, 'import numpy as np\n')] |
import multiprocessing as mp
import os
from threading import Thread
from human_tracker import camera_capture
from database import ImageDB
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import pandas as pd
import signal, sys
import datetime as dt
import shutil
import time
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt)')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/video/', 'path to input video or set to 0 for webcam')
flags.DEFINE_string('rtsp_path', 'data/rtsp/rtsp_cam.xlsx', 'default rtsp camera path')
flags.DEFINE_string('reid_db_path', "../reid/archive", 'default reid database path, where all of the samples from cam are saved into the db with timestamp')
#flags.DEFINE_string('cam_db_path', '../reid/database', 'default cam database path')
#flags.DEFINE_string('merge_db_path', "../reid/database/merge", 'default merged reid database path, where all of the samples from cam are saved into the db with timestamp')
#flags.DEFINE_string('output', './outputs/', 'path to output video')
flags.DEFINE_boolean('output', False, 'path to output video')
flags.DEFINE_string('output_format', 'MJPG', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.50, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('info', False, 'show detailed info of tracked objects')
flags.DEFINE_boolean('count', False, 'count objects being trascked on screen')
#flags.DEFINE_boolean('db', True, 'save information in database')
flags.DEFINE_boolean('trajectory', False, 'draw historical trajectories on every tracked human')
flags.DEFINE_integer('input_skip_frame', 8, 'number of frame to be skipped')
flags.DEFINE_integer('db_skip_frame', 8, 'number of frame to be skipped')
flags.DEFINE_boolean('saliant_sampling', True, 'select and store unique frame only into database')
flags.DEFINE_boolean('plot_graph', False, 'plot graph for soft threshold')
flags.DEFINE_integer('parallel_ps', 8, 'number of human tracker process to run')
flags.DEFINE_boolean('online', True, 'run online image extraction using rtsp')
flags.DEFINE_boolean('reid', True, 'set to True to run with REID, set to False if new labelled data are needed to be recorded')
# def db_process(*args):
# def signal_handler(sig, frame):
# name = mp.current_process().name
# print(str(name) + ': You pressed Ctrl+C!')
# db_list = []
# # args[0] is the camera list
# # args[1] is the length of camera list
# # args[2] is the cam path
# # args[3] is the db merge path
# # args[4] is the shared queue recording the database paths
# for i in range(args[1]):
# if args[0]:
# db_list.append(args[2] + "/Cam_" + str(args[0][i]) + ".db")
# # finish gathering the db_paths, run merge.
# print('Saving merge database..')
# now = dt.datetime.now()
# db_name = now.strftime("Reid_Interrputed_%Y%m%d.db")
# db_filepath = os.path.join(args[3], db_name)
# reid_db = ImageDB(db_name=db_filepath)
# reid_db.delete_dbfile()
# reid_db.create_table()
# reid_db.merge_data(db_list)
# sys.exit(0)
# signal.signal(signal.SIGINT, signal_handler)
# while True:
# db_list = []
# # args[0] is the length of camera list
# # args[1] is the shared queue recording the database paths
# while len(db_list) < args[1]:
# print("db_path_process: ", args[4].get())
# db_list.append(args[4].get())
# #time.sleep(1)
# # finish gathering the db_paths, run merge.
# print('Saving merge database..')
# now = dt.datetime.now()
# db_name = now.strftime("Reid_%Y%m%d.db")
# db_filepath = os.path.join(args[3], db_name)
# reid_db = ImageDB(db_name=db_filepath)
# reid_db.delete_dbfile()
# reid_db.create_table()
# reid_db.merge_data(db_list)
class MultiPs():
def __init__(self):
self.job = []
self.thread = []
self.cam = []
# shared resource
#self.db_queue = mp.Queue()
self.manager = mp.Manager()
self.stop_tracker = self.manager.Value('i',0)
self.stop_main_ps = self.manager.Value('i',1)
self.db_path = None
self.unique_id = self.manager.list()
def log_msg(self):
mp.log_to_stderr()
logger = mp.get_logger()
logger.setLevel(logging.DEBUG)
def new_job(self, name, target, *args):
print("args: ", args)
args = (*args, self.stop_main_ps, self.stop_tracker)
print("new args: ", args)
j = mp.Process(name=name, target=target, args=args)
j.daemon = True
self.job.append(j)
def new_thread(self, name, target, *args):
t = Thread(name=name, target=target, args=args)
t.daemon = True
self.thread.append(t)
def create_new_db(self):
#global db_path
#global reid
now = dt.datetime.now()
db_name = now.strftime("Reid_%Y%m%d.db")
db_path = os.path.join(FLAGS.reid_db_path, db_name).replace("\\","/")
reid_db = ImageDB(db_name=db_path)
print("reid db_path: ", db_path)
reid_db.delete_dbfile()
reid_db.create_table()
self.db_path = db_path
#reid = Reid(db_path)
# main process will run this database
def save_db(self):
renew_db = True
while self.stop_main_ps.value:
now = dt.datetime.now()
t = now.timetuple()
# t[6] consists of day name information. 0 = Monday. 4 = Friday.
#print("t[6]: ", t[6])
if t[6] == 2 or t[6] == 5:
if renew_db:
# stop human tracker processes
self.stop_tracker.value = 1
# create new database on Wednesday and Saturday, and only renew one time on each day.
self.create_new_db()
renew_db = False
print("New Database with timestamp [", now.strftime("%A, %d. %B %Y %I:%M%p"), ']')
# reset human tracker processes
self.stop_tracker.value = 0
else:
time.sleep(1)
else:
renew_db = True
#print("self.stop_tracker: ", self.stop_tracker.value)
#print("main process ticks..")
time.sleep(1)
print("save_db loop is ended..")
def signal_handler(self, sig, frame):
print('Main Program: You pressed Ctrl+C!')
# when ctrl + c, rename the database as interrupted
if os.path.isfile(self.db_path):
now = dt.datetime.now()
db_name = now.strftime("Reid_Interrupted_%Y%m%dT%H%M%S.db")
interrupt_path = os.path.join(FLAGS.reid_db_path, "Interrupt", db_name).replace("\\","/")
#os.rename(self.db_path, interrupt_path)
shutil.move(self.db_path, interrupt_path)
for j in self.job:
j.join()
sys.exit(0)
def cam_stream(mps):
mps.job.clear()
mps.new_job('camera_ch' + FLAGS.video, camera_capture, int(FLAGS.video))
for j in mps.job:
j.start()
for j in mps.job:
j.join()
def sequential_run(batch, cam, db_path, mps):
mps.job.clear()
#mps.new_job('database_ps', db_process, cam, FLAGS.parallel_ps, FLAGS.reid_db_path, FLAGS.merge_db_path)
mps.cam = cam
print("batch:", batch)
gpu_num = 0
for ch in batch:
mps.new_job('camera_ch' + ch, camera_capture, FLAGS.online, int(ch), gpu_num, db_path)
#gpu_num = 1 - gpu_num
for j in mps.job:
j.start()
# main process in while loop, save database when cutoff date is reached
mps.save_db()
for j in mps.job:
j.join()
def online_run(rtsp, cam, gpu, loc, db_path, mps):
mps.job.clear()
#mps.new_job('database_ps', db_process, cam, FLAGS.parallel_ps, FLAGS.reid_db_path, FLAGS.merge_db_path)
mps.cam = cam
for i in range(FLAGS.parallel_ps):
# cam[i]:int , rtsp[i]:str
mps.new_job('camera_ch' + str(cam[i]), camera_capture, FLAGS.online, cam[i], rtsp[i], gpu[i], loc[i], db_path)
print("New online process for cam " + str(cam[i]))
for j in mps.job:
j.start()
# main process in while loop, save database when cutoff date is reached
mps.save_db()
for j in mps.job:
j.join()
def get_rtsp(file):
table = pd.read_excel(file, dtype={'Camera RTSP Stream': str, 'Channel': int}, engine='openpyxl')
return table
def create_ps_list(vfile):
ch_list = []
for f in vfile:
filename = os.path.splitext(f)[0]
if filename.split('ch')[0] == '' and filename.split('ch')[-1].isdigit() == True:
print(filename.split('ch')[-1])
ch_list.append(filename.split('ch')[-1])
if len(ch_list) == 0:
print("No video file with 'ch' name. Please rename your input video with 'ch[channel number].mp4'.")
return -1
ch_list.sort(key=int)
ps_list = None
last_ps_num = len(ch_list) % FLAGS.parallel_ps
if last_ps_num != 0:
last_ps = ch_list[-last_ps_num:]
print("last_ps:", last_ps)
first_ps = ch_list[:-last_ps_num]
print("first_ps:", first_ps)
ps_list = np.asarray(first_ps).reshape(-1, FLAGS.parallel_ps).tolist()
ps_list.append(last_ps)
print(ps_list)
else:
ps_list = np.asarray(ch_list).reshape(-1, FLAGS.parallel_ps).tolist()
print(ps_list)
return ps_list
def main_single(_argv):
# initialize database
img_db = ImageDB()
img_db.create_table()
camera_capture(2)
def main(_argv):
mps = MultiPs()
signal.signal(signal.SIGINT, mps.signal_handler)
# mps.log_msg()
print("Parent Process PID: " + str(os.getpid()))
print("Initialize database..")
# create new database with timestamp
mps.create_new_db()
# initialize backup database
#db_path = None
# if FLAGS.db:
# db_path = "./database/Image_" + str(dt.datetime.now().strftime("%Y%m%dT%H%M%S")) + ".db"
# print("db_path main: ", db_path)
# img_db = ImageDB(db_path)
#img_db.delete_dbfile()
#img_db.create_table()
# online mode
if FLAGS.online:
table = get_rtsp(FLAGS.rtsp_path)
online_run(table.to_dict('dict')['rtsp'], table.to_dict('dict')['cam'], table.to_dict('dict')['gpu'], table.to_dict('dict')['loc'], mps.db_path, mps)
# offline mode
else:
if not FLAGS.video.isdigit():
# get video file info from video folder
vfile = os.listdir(FLAGS.video)
if len(vfile) == 0:
print("No files in the " + FLAGS.video)
return -1
ps_list = create_ps_list(vfile)
print("Start Multiprocessing..")
table = get_rtsp(FLAGS.rtsp_path)
# run new camera process
for batch in ps_list:
sequential_run(batch, table.to_dict('dict')['cam'], mps.db_path, mps)
else:
cam_stream(mps)
# for j in mps.job:
# j.start()
# for j in mps.job:
# j.join()
print("End of program.")
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| [
"multiprocessing.log_to_stderr",
"os.path.isfile",
"database.ImageDB",
"absl.flags.DEFINE_boolean",
"os.path.join",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_float",
"datetime.datetime.now",
"threading.Thread",
"absl.flags.FLAGS.video.isdigit",
"numpy.asarray",
"time.sleep",
"pandas.re... | [((311, 370), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""framework"""', '"""tf"""', '"""(tf, tflite, trt)"""'], {}), "('framework', 'tf', '(tf, tflite, trt)')\n", (330, 370), False, 'from absl import app, flags, logging\n'), ((371, 457), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""weights"""', '"""./checkpoints/yolov4-416"""', '"""path to weights file"""'], {}), "('weights', './checkpoints/yolov4-416',\n 'path to weights file')\n", (390, 457), False, 'from absl import app, flags, logging\n'), ((474, 527), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""size"""', '(416)', '"""resize images to"""'], {}), "('size', 416, 'resize images to')\n", (494, 527), False, 'from absl import app, flags, logging\n'), ((528, 584), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""tiny"""', '(False)', '"""yolo or yolo-tiny"""'], {}), "('tiny', False, 'yolo or yolo-tiny')\n", (548, 584), False, 'from absl import app, flags, logging\n'), ((585, 643), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model"""', '"""yolov4"""', '"""yolov3 or yolov4"""'], {}), "('model', 'yolov4', 'yolov3 or yolov4')\n", (604, 643), False, 'from absl import app, flags, logging\n'), ((644, 739), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""video"""', '"""./data/video/"""', '"""path to input video or set to 0 for webcam"""'], {}), "('video', './data/video/',\n 'path to input video or set to 0 for webcam')\n", (663, 739), False, 'from absl import app, flags, logging\n'), ((736, 827), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""rtsp_path"""', '"""data/rtsp/rtsp_cam.xlsx"""', '"""default rtsp camera path"""'], {}), "('rtsp_path', 'data/rtsp/rtsp_cam.xlsx',\n 'default rtsp camera path')\n", (755, 827), False, 'from absl import app, flags, logging\n'), ((824, 989), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""reid_db_path"""', '"""../reid/archive"""', '"""default reid database path, where all of the samples from cam are saved into the db with timestamp"""'], {}), "('reid_db_path', '../reid/archive',\n 'default reid database path, where all of the samples from cam are saved into the db with timestamp'\n )\n", (843, 989), False, 'from absl import app, flags, logging\n'), ((1308, 1369), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""output"""', '(False)', '"""path to output video"""'], {}), "('output', False, 'path to output video')\n", (1328, 1369), False, 'from absl import app, flags, logging\n'), ((1370, 1473), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_format"""', '"""MJPG"""', '"""codec used in VideoWriter when saving video to file"""'], {}), "('output_format', 'MJPG',\n 'codec used in VideoWriter when saving video to file')\n", (1389, 1473), False, 'from absl import app, flags, logging\n'), ((1470, 1518), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""iou"""', '(0.45)', '"""iou threshold"""'], {}), "('iou', 0.45, 'iou threshold')\n", (1488, 1518), False, 'from absl import app, flags, logging\n'), ((1519, 1570), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""score"""', '(0.5)', '"""score threshold"""'], {}), "('score', 0.5, 'score threshold')\n", (1537, 1570), False, 'from absl import app, flags, logging\n'), ((1572, 1638), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""dont_show"""', '(False)', '"""dont show video output"""'], {}), "('dont_show', False, 'dont show video output')\n", (1592, 1638), False, 'from absl import app, flags, logging\n'), ((1639, 1715), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""info"""', '(False)', '"""show detailed info of tracked objects"""'], {}), "('info', False, 'show detailed info of tracked objects')\n", (1659, 1715), False, 'from absl import app, flags, logging\n'), ((1716, 1794), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""count"""', '(False)', '"""count objects being trascked on screen"""'], {}), "('count', False, 'count objects being trascked on screen')\n", (1736, 1794), False, 'from absl import app, flags, logging\n'), ((1861, 1961), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""trajectory"""', '(False)', '"""draw historical trajectories on every tracked human"""'], {}), "('trajectory', False,\n 'draw historical trajectories on every tracked human')\n", (1881, 1961), False, 'from absl import app, flags, logging\n'), ((1958, 2034), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""input_skip_frame"""', '(8)', '"""number of frame to be skipped"""'], {}), "('input_skip_frame', 8, 'number of frame to be skipped')\n", (1978, 2034), False, 'from absl import app, flags, logging\n'), ((2035, 2108), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""db_skip_frame"""', '(8)', '"""number of frame to be skipped"""'], {}), "('db_skip_frame', 8, 'number of frame to be skipped')\n", (2055, 2108), False, 'from absl import app, flags, logging\n'), ((2109, 2211), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""saliant_sampling"""', '(True)', '"""select and store unique frame only into database"""'], {}), "('saliant_sampling', True,\n 'select and store unique frame only into database')\n", (2129, 2211), False, 'from absl import app, flags, logging\n'), ((2208, 2282), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""plot_graph"""', '(False)', '"""plot graph for soft threshold"""'], {}), "('plot_graph', False, 'plot graph for soft threshold')\n", (2228, 2282), False, 'from absl import app, flags, logging\n'), ((2283, 2368), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""parallel_ps"""', '(8)', '"""number of human tracker process to run"""'], {}), "('parallel_ps', 8, 'number of human tracker process to run'\n )\n", (2303, 2368), False, 'from absl import app, flags, logging\n'), ((2364, 2442), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""online"""', '(True)', '"""run online image extraction using rtsp"""'], {}), "('online', True, 'run online image extraction using rtsp')\n", (2384, 2442), False, 'from absl import app, flags, logging\n'), ((2443, 2579), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""reid"""', '(True)', '"""set to True to run with REID, set to False if new labelled data are needed to be recorded"""'], {}), "('reid', True,\n 'set to True to run with REID, set to False if new labelled data are needed to be recorded'\n )\n", (2463, 2579), False, 'from absl import app, flags, logging\n'), ((8909, 9002), 'pandas.read_excel', 'pd.read_excel', (['file'], {'dtype': "{'Camera RTSP Stream': str, 'Channel': int}", 'engine': '"""openpyxl"""'}), "(file, dtype={'Camera RTSP Stream': str, 'Channel': int},\n engine='openpyxl')\n", (8922, 9002), True, 'import pandas as pd\n'), ((10069, 10078), 'database.ImageDB', 'ImageDB', ([], {}), '()\n', (10076, 10078), False, 'from database import ImageDB\n'), ((10109, 10126), 'human_tracker.camera_capture', 'camera_capture', (['(2)'], {}), '(2)\n', (10123, 10126), False, 'from human_tracker import camera_capture\n'), ((10170, 10218), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'mps.signal_handler'], {}), '(signal.SIGINT, mps.signal_handler)\n', (10183, 10218), False, 'import signal, sys\n'), ((4550, 4562), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (4560, 4562), True, 'import multiprocessing as mp\n'), ((4776, 4794), 'multiprocessing.log_to_stderr', 'mp.log_to_stderr', ([], {}), '()\n', (4792, 4794), True, 'import multiprocessing as mp\n'), ((4812, 4827), 'multiprocessing.get_logger', 'mp.get_logger', ([], {}), '()\n', (4825, 4827), True, 'import multiprocessing as mp\n'), ((5049, 5096), 'multiprocessing.Process', 'mp.Process', ([], {'name': 'name', 'target': 'target', 'args': 'args'}), '(name=name, target=target, args=args)\n', (5059, 5096), True, 'import multiprocessing as mp\n'), ((5208, 5251), 'threading.Thread', 'Thread', ([], {'name': 'name', 'target': 'target', 'args': 'args'}), '(name=name, target=target, args=args)\n', (5214, 5251), False, 'from threading import Thread\n'), ((5395, 5412), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5410, 5412), True, 'import datetime as dt\n'), ((5558, 5582), 'database.ImageDB', 'ImageDB', ([], {'db_name': 'db_path'}), '(db_name=db_path)\n', (5565, 5582), False, 'from database import ImageDB\n'), ((7077, 7105), 'os.path.isfile', 'os.path.isfile', (['self.db_path'], {}), '(self.db_path)\n', (7091, 7105), False, 'import os\n'), ((7482, 7493), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7490, 7493), False, 'import signal, sys\n'), ((11728, 11741), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (11735, 11741), False, 'from absl import app, flags, logging\n'), ((5895, 5912), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5910, 5912), True, 'import datetime as dt\n'), ((7125, 7142), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (7140, 7142), True, 'import datetime as dt\n'), ((7382, 7423), 'shutil.move', 'shutil.move', (['self.db_path', 'interrupt_path'], {}), '(self.db_path, interrupt_path)\n', (7393, 7423), False, 'import shutil\n'), ((9101, 9120), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (9117, 9120), False, 'import os\n'), ((10992, 11013), 'absl.flags.FLAGS.video.isdigit', 'FLAGS.video.isdigit', ([], {}), '()\n', (11011, 11013), False, 'from absl.flags import FLAGS\n'), ((11093, 11116), 'os.listdir', 'os.listdir', (['FLAGS.video'], {}), '(FLAGS.video)\n', (11103, 11116), False, 'import os\n'), ((5480, 5521), 'os.path.join', 'os.path.join', (['FLAGS.reid_db_path', 'db_name'], {}), '(FLAGS.reid_db_path, db_name)\n', (5492, 5521), False, 'import os\n'), ((6855, 6868), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6865, 6868), False, 'import time\n'), ((10279, 10290), 'os.getpid', 'os.getpid', ([], {}), '()\n', (10288, 10290), False, 'import os\n'), ((6657, 6670), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6667, 6670), False, 'import time\n'), ((7244, 7298), 'os.path.join', 'os.path.join', (['FLAGS.reid_db_path', '"""Interrupt"""', 'db_name'], {}), "(FLAGS.reid_db_path, 'Interrupt', db_name)\n", (7256, 7298), False, 'import os\n'), ((9757, 9777), 'numpy.asarray', 'np.asarray', (['first_ps'], {}), '(first_ps)\n', (9767, 9777), True, 'import numpy as np\n'), ((9901, 9920), 'numpy.asarray', 'np.asarray', (['ch_list'], {}), '(ch_list)\n', (9911, 9920), True, 'import numpy as np\n')] |
import gzip
import json
import os
import pickle
import random
import re
import zipfile
import h5py
import jieba
import numpy as np
import requests
from bs4 import BeautifulSoup
from BUG.function.zhtools.langconv import Converter
from BUG.load_package import p
def load_data_cat(path):
train_dataset = h5py.File(path[0], "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File(path[1], "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape(train_set_y_orig.shape[0])
test_set_y_orig = test_set_y_orig.reshape(test_set_y_orig.shape[0])
try:
return p.asarray(train_set_x_orig), p.asarray(train_set_y_orig), p.asarray(test_set_x_orig), p.asarray(
test_set_y_orig), classes
except:
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = pickle.load(f, encoding='iso-8859-1')
X = datadict['mnist_dnn_parameters']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(ROOT, 'data_batch_%d' % (b,))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
try:
return p.asarray(Xtr), p.asarray(Ytr), p.asarray(Xte), p.asarray(Yte)
except:
return Xtr, Ytr, Xte, Yte
def one_hot(y, num_classes=None, dtype='float32'):
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def words_between_idx(doc):
chars = list(set(doc))
chars.sort()
return {ch: i for i, ch in enumerate(chars)}, {i: ch for i, ch in enumerate(chars)}
def lyric_download():
'''
根据歌手id下载歌词
:return:
'''
def download_by_music_id(music_id):
# 根据歌词id下载
url = 'http://music.163.com/api/song/lyric?' + 'id=' + str(music_id) + '&lv=1&kv=1&tv=-1'
r = requests.get(url)
json_obj = r.text
j = json.loads(json_obj)
lrc = j['lrc']['lyric']
pat1 = re.compile(r'\[.*\]') # 这里几行代码是把歌词中的空格和符号之类的去掉
lrc = re.sub(pat1, '', lrc)
pat2 = re.compile(r'.*\:.*')
lrc = re.sub(pat2, '', lrc)
pat3 = re.compile(r'.*\/.*')
lrc = re.sub(pat3, '', lrc)
lrc = lrc.strip()
return lrc
def get_music_ids_by_musican_id(singer_id): # 通过一个歌手id下载这个歌手的所有歌词
singer_url = 'http://music.163.com/artist?' + 'id=' + str(singer_id)
r = requests.get(singer_url).text
soupObj = BeautifulSoup(r, 'lxml')
song_ids = soupObj.find('textarea').text
jobj = json.loads(song_ids)
ids = {}
for item in jobj:
ids[item['name']] = item['id']
return ids
def download_lyric(uid):
music_ids = get_music_ids_by_musican_id(uid)
for key in music_ids:
try:
text = download_by_music_id(music_ids[key])
with open('%s.txt' % singer_id, 'a', encoding='utf-8') as f:
f.write('\n')
for t in text:
f.write(t)
except:
print('')
print("请输入歌手的id:")
singer_id = input()
download_lyric(singer_id)
def load_coco_data(base_dir='/content/sample_data/coco_captioning/',
max_train=None,
pca_features=True):
data = {}
caption_file = os.path.join(base_dir, 'coco2014_captions.h5')
with h5py.File(caption_file, 'r') as f:
for k, v in f.items():
data[k] = np.asarray(v)
if pca_features:
train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7_pca.h5')
else:
train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7.h5')
with h5py.File(train_feat_file, 'r') as f:
data['train_features'] = np.asarray(f['features'])
if pca_features:
val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7_pca.h5')
else:
val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7.h5')
with h5py.File(val_feat_file, 'r') as f:
data['val_features'] = np.asarray(f['features'])
dict_file = os.path.join(base_dir, 'coco2014_vocab.json')
with open(dict_file, 'r') as f:
dict_data = json.load(f)
for k, v in dict_data.items():
data[k] = v
train_url_file = os.path.join(base_dir, 'train2014_urls.txt')
with open(train_url_file, 'r') as f:
train_urls = np.asarray([line.strip() for line in f])
data['train_urls'] = train_urls
val_url_file = os.path.join(base_dir, 'val2014_urls.txt')
with open(val_url_file, 'r') as f:
val_urls = np.asarray([line.strip() for line in f])
data['val_urls'] = val_urls
# Maybe subsample the training mnist_dnn_parameters
if max_train is not None:
num_train = data['train_captions'].shape[0]
mask = np.random.randint(num_train, size=max_train)
data['train_captions'] = data['train_captions'][mask]
data['train_image_idxs'] = data['train_image_idxs'][mask]
return data
def decode_captions(captions, idx_to_word):
singleton = False
if captions.ndim == 1:
singleton = True
captions = captions[None]
decoded = []
N, T = captions.shape
for i in range(N):
words = []
for t in range(T):
word = idx_to_word[int(captions[i, t])]
if word != '<NULL>':
words.append(word)
if word == '<END>':
break
decoded.append(' '.join(words))
if singleton:
decoded = decoded[0]
return decoded
def minibatch(data, batch_size=100, split='train'):
split_size = data['%s_captions' % split].shape[0]
batch = []
for i in range(split_size // batch_size):
captions = data['%s_captions' % split][i * batch_size:(i + 1) * batch_size]
image_idxs = data['%s_image_idxs' % split][i * batch_size:(i + 1) * batch_size]
image_features = data['%s_features' % split][image_idxs]
urls = data['%s_urls' % split][image_idxs]
batch.append((p.asarray(captions[:, :-1]), p.asarray(captions[:, 1:]), p.asarray(image_features), urls))
return batch
def load_mnist(path):
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(os.path.join(path, fname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return x_train, y_train, x_test, y_test, 10
def load_poetry():
with open('/Users/oswin/Documents/BS/BUG/datasets/poetry.txt', "r", encoding='utf-8') as f:
data = f.readlines()
content = ''.join([line.split(':')[1] for line in data])
corpus_chars = content.replace('\n', ' ').replace('\r', ' ').replace(',', ' ').replace('。', ' ').replace('_', '')
corpus_chars = Converter('zh-hans').convert(corpus_chars[:10000])
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def load_data_jay_lyrics(path):
with zipfile.ZipFile(os.path.join(path, 'jaychou_lyrics.zip')) as zin:
with zin.open('jaychou_lyrics.txt') as f:
corpus_chars = Converter('zh-hans').convert(f.read().decode('utf-8'))
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = get_jieba_list(path, corpus_chars)
idx_to_char = list(set(corpus_chars))
idx_to_char.sort()
char_to_idx = {ch: i for i, ch in enumerate(idx_to_char)}
vocab_size = len(char_to_idx)
corpus_ix = [char_to_idx[ch] for ch in corpus_chars]
return corpus_ix, char_to_idx, idx_to_char, vocab_size
def load_data_ana():
with open('/Users/oswin/Documents/BS/BUG/datasets/anna.txt', 'r') as f:
text = f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
return encoded, vocab_to_int, int_to_vocab, len(vocab)
def load_data_gem_lyrics(path):
with zipfile.ZipFile(os.path.join(path, 'gem_lyrics.zip')) as zin:
with zin.open('gem_lyrics.txt') as f:
corpus_chars = Converter('zh-hans').convert(f.read().decode('utf-8'))
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = get_jieba_list(path, corpus_chars)
idx_to_char = list(set(corpus_chars))
idx_to_char.sort()
char_to_idx = {ch: i for i, ch in enumerate(idx_to_char)}
vocab_size = len(char_to_idx)
corpus_ix = [char_to_idx[ch] for ch in corpus_chars]
return corpus_ix, char_to_idx, idx_to_char, vocab_size
def data_iter_random(corpus_indices, batch_size, num_steps):
# 减1是因为输出的索引是相应输入的索引加1
num_examples = (len(corpus_indices) - 1) // num_steps
epoch_size = num_examples // batch_size
example_indices = list(range(num_examples))
random.shuffle(example_indices)
# 返回从pos开始的长为num_steps的序列
def _data(pos):
return corpus_indices[pos: pos + num_steps]
for i in range(epoch_size):
# 每次读取batch_size个随机样本
i = i * batch_size
batch_indices = example_indices[i: i + batch_size]
X = [_data(j * num_steps) for j in batch_indices]
Y = [_data(j * num_steps + 1) for j in batch_indices]
yield p.array(np.array(X)), np.array(Y)
def get_jieba_list(path, text):
file = os.path.join(path, 'jieba.cache')
if os.path.exists(file):
with open(file, 'rb+') as f:
return pickle.load(f)
with open(file, 'wb+') as f:
l = list(jieba.cut(text, cut_all=False))
pickle.dump(l, f)
return l
if __name__ == '__main__':
a = load_data_jay_lyrics('/Users/oswin/Documents/BS/BUG/datasets')[0]
pwrint(a)
| [
"pickle.dump",
"BUG.load_package.p.asarray",
"random.shuffle",
"pickle.load",
"numpy.random.randint",
"numpy.arange",
"os.path.join",
"json.loads",
"os.path.exists",
"numpy.max",
"numpy.reshape",
"requests.get",
"re.sub",
"h5py.File",
"jieba.cut",
"numpy.asarray",
"bs4.BeautifulSoup"... | [((309, 332), 'h5py.File', 'h5py.File', (['path[0]', '"""r"""'], {}), "(path[0], 'r')\n", (318, 332), False, 'import h5py\n'), ((356, 397), 'numpy.array', 'np.array', (["train_dataset['train_set_x'][:]"], {}), "(train_dataset['train_set_x'][:])\n", (364, 397), True, 'import numpy as np\n'), ((448, 489), 'numpy.array', 'np.array', (["train_dataset['train_set_y'][:]"], {}), "(train_dataset['train_set_y'][:])\n", (456, 489), True, 'import numpy as np\n'), ((535, 558), 'h5py.File', 'h5py.File', (['path[1]', '"""r"""'], {}), "(path[1], 'r')\n", (544, 558), False, 'import h5py\n'), ((581, 620), 'numpy.array', 'np.array', (["test_dataset['test_set_x'][:]"], {}), "(test_dataset['test_set_x'][:])\n", (589, 620), True, 'import numpy as np\n'), ((669, 708), 'numpy.array', 'np.array', (["test_dataset['test_set_y'][:]"], {}), "(test_dataset['test_set_y'][:])\n", (677, 708), True, 'import numpy as np\n'), ((748, 789), 'numpy.array', 'np.array', (["test_dataset['list_classes'][:]"], {}), "(test_dataset['list_classes'][:])\n", (756, 789), True, 'import numpy as np\n'), ((1837, 1855), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (1851, 1855), True, 'import numpy as np\n'), ((1866, 1884), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (1880, 1884), True, 'import numpy as np\n'), ((2158, 2182), 'numpy.array', 'np.array', (['y'], {'dtype': '"""int"""'}), "(y, dtype='int')\n", (2166, 2182), True, 'import numpy as np\n'), ((2440, 2479), 'numpy.zeros', 'np.zeros', (['(n, num_classes)'], {'dtype': 'dtype'}), '((n, num_classes), dtype=dtype)\n', (2448, 2479), True, 'import numpy as np\n'), ((2583, 2620), 'numpy.reshape', 'np.reshape', (['categorical', 'output_shape'], {}), '(categorical, output_shape)\n', (2593, 2620), True, 'import numpy as np\n'), ((4542, 4588), 'os.path.join', 'os.path.join', (['base_dir', '"""coco2014_captions.h5"""'], {}), "(base_dir, 'coco2014_captions.h5')\n", (4554, 4588), False, 'import os\n'), ((5288, 5333), 'os.path.join', 'os.path.join', (['base_dir', '"""coco2014_vocab.json"""'], {}), "(base_dir, 'coco2014_vocab.json')\n", (5300, 5333), False, 'import os\n'), ((5488, 5532), 'os.path.join', 'os.path.join', (['base_dir', '"""train2014_urls.txt"""'], {}), "(base_dir, 'train2014_urls.txt')\n", (5500, 5532), False, 'import os\n'), ((5692, 5734), 'os.path.join', 'os.path.join', (['base_dir', '"""val2014_urls.txt"""'], {}), "(base_dir, 'val2014_urls.txt')\n", (5704, 5734), False, 'import os\n'), ((9781, 9838), 'numpy.array', 'np.array', (['[vocab_to_int[c] for c in text]'], {'dtype': 'np.int32'}), '([vocab_to_int[c] for c in text], dtype=np.int32)\n', (9789, 9838), True, 'import numpy as np\n'), ((10779, 10810), 'random.shuffle', 'random.shuffle', (['example_indices'], {}), '(example_indices)\n', (10793, 10810), False, 'import random\n'), ((11276, 11309), 'os.path.join', 'os.path.join', (['path', '"""jieba.cache"""'], {}), "(path, 'jieba.cache')\n", (11288, 11309), False, 'import os\n'), ((11317, 11337), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (11331, 11337), False, 'import os\n'), ((1353, 1390), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""iso-8859-1"""'}), "(f, encoding='iso-8859-1')\n", (1364, 1390), False, 'import pickle\n'), ((1557, 1568), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1565, 1568), True, 'import numpy as np\n'), ((1707, 1749), 'os.path.join', 'os.path.join', (['ROOT', "('data_batch_%d' % (b,))"], {}), "(ROOT, 'data_batch_%d' % (b,))\n", (1719, 1749), False, 'import os\n'), ((1930, 1962), 'os.path.join', 'os.path.join', (['ROOT', '"""test_batch"""'], {}), "(ROOT, 'test_batch')\n", (1942, 1962), False, 'import os\n'), ((3044, 3061), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3056, 3061), False, 'import requests\n'), ((3101, 3121), 'json.loads', 'json.loads', (['json_obj'], {}), '(json_obj)\n', (3111, 3121), False, 'import json\n'), ((3169, 3191), 're.compile', 're.compile', (['"""\\\\[.*\\\\]"""'], {}), "('\\\\[.*\\\\]')\n", (3179, 3191), False, 'import re\n'), ((3231, 3252), 're.sub', 're.sub', (['pat1', '""""""', 'lrc'], {}), "(pat1, '', lrc)\n", (3237, 3252), False, 'import re\n'), ((3268, 3289), 're.compile', 're.compile', (['""".*\\\\:.*"""'], {}), "('.*\\\\:.*')\n", (3278, 3289), False, 'import re\n'), ((3304, 3325), 're.sub', 're.sub', (['pat2', '""""""', 'lrc'], {}), "(pat2, '', lrc)\n", (3310, 3325), False, 'import re\n'), ((3341, 3362), 're.compile', 're.compile', (['""".*\\\\/.*"""'], {}), "('.*\\\\/.*')\n", (3351, 3362), False, 'import re\n'), ((3377, 3398), 're.sub', 're.sub', (['pat3', '""""""', 'lrc'], {}), "(pat3, '', lrc)\n", (3383, 3398), False, 'import re\n'), ((3653, 3677), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r', '"""lxml"""'], {}), "(r, 'lxml')\n", (3666, 3677), False, 'from bs4 import BeautifulSoup\n'), ((3742, 3762), 'json.loads', 'json.loads', (['song_ids'], {}), '(song_ids)\n', (3752, 3762), False, 'import json\n'), ((4598, 4626), 'h5py.File', 'h5py.File', (['caption_file', '"""r"""'], {}), "(caption_file, 'r')\n", (4607, 4626), False, 'import h5py\n'), ((4748, 4800), 'os.path.join', 'os.path.join', (['base_dir', '"""train2014_vgg16_fc7_pca.h5"""'], {}), "(base_dir, 'train2014_vgg16_fc7_pca.h5')\n", (4760, 4800), False, 'import os\n'), ((4837, 4885), 'os.path.join', 'os.path.join', (['base_dir', '"""train2014_vgg16_fc7.h5"""'], {}), "(base_dir, 'train2014_vgg16_fc7.h5')\n", (4849, 4885), False, 'import os\n'), ((4895, 4926), 'h5py.File', 'h5py.File', (['train_feat_file', '"""r"""'], {}), "(train_feat_file, 'r')\n", (4904, 4926), False, 'import h5py\n'), ((4966, 4991), 'numpy.asarray', 'np.asarray', (["f['features']"], {}), "(f['features'])\n", (4976, 4991), True, 'import numpy as np\n'), ((5037, 5087), 'os.path.join', 'os.path.join', (['base_dir', '"""val2014_vgg16_fc7_pca.h5"""'], {}), "(base_dir, 'val2014_vgg16_fc7_pca.h5')\n", (5049, 5087), False, 'import os\n'), ((5122, 5168), 'os.path.join', 'os.path.join', (['base_dir', '"""val2014_vgg16_fc7.h5"""'], {}), "(base_dir, 'val2014_vgg16_fc7.h5')\n", (5134, 5168), False, 'import os\n'), ((5178, 5207), 'h5py.File', 'h5py.File', (['val_feat_file', '"""r"""'], {}), "(val_feat_file, 'r')\n", (5187, 5207), False, 'import h5py\n'), ((5245, 5270), 'numpy.asarray', 'np.asarray', (["f['features']"], {}), "(f['features'])\n", (5255, 5270), True, 'import numpy as np\n'), ((5390, 5402), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5399, 5402), False, 'import json\n'), ((6020, 6064), 'numpy.random.randint', 'np.random.randint', (['num_train'], {'size': 'max_train'}), '(num_train, size=max_train)\n', (6037, 6064), True, 'import numpy as np\n'), ((7619, 7644), 'gzip.open', 'gzip.open', (['paths[0]', '"""rb"""'], {}), "(paths[0], 'rb')\n", (7628, 7644), False, 'import gzip\n'), ((7733, 7758), 'gzip.open', 'gzip.open', (['paths[1]', '"""rb"""'], {}), "(paths[1], 'rb')\n", (7742, 7758), False, 'import gzip\n'), ((7893, 7918), 'gzip.open', 'gzip.open', (['paths[2]', '"""rb"""'], {}), "(paths[2], 'rb')\n", (7902, 7918), False, 'import gzip\n'), ((8006, 8031), 'gzip.open', 'gzip.open', (['paths[3]', '"""rb"""'], {}), "(paths[3], 'rb')\n", (8015, 8031), False, 'import gzip\n'), ((11501, 11518), 'pickle.dump', 'pickle.dump', (['l', 'f'], {}), '(l, f)\n', (11512, 11518), False, 'import pickle\n'), ((985, 1012), 'BUG.load_package.p.asarray', 'p.asarray', (['train_set_x_orig'], {}), '(train_set_x_orig)\n', (994, 1012), False, 'from BUG.load_package import p\n'), ((1014, 1041), 'BUG.load_package.p.asarray', 'p.asarray', (['train_set_y_orig'], {}), '(train_set_y_orig)\n', (1023, 1041), False, 'from BUG.load_package import p\n'), ((1043, 1069), 'BUG.load_package.p.asarray', 'p.asarray', (['test_set_x_orig'], {}), '(test_set_x_orig)\n', (1052, 1069), False, 'from BUG.load_package import p\n'), ((1071, 1097), 'BUG.load_package.p.asarray', 'p.asarray', (['test_set_y_orig'], {}), '(test_set_y_orig)\n', (1080, 1097), False, 'from BUG.load_package import p\n'), ((1988, 2002), 'BUG.load_package.p.asarray', 'p.asarray', (['Xtr'], {}), '(Xtr)\n', (1997, 2002), False, 'from BUG.load_package import p\n'), ((2004, 2018), 'BUG.load_package.p.asarray', 'p.asarray', (['Ytr'], {}), '(Ytr)\n', (2013, 2018), False, 'from BUG.load_package import p\n'), ((2020, 2034), 'BUG.load_package.p.asarray', 'p.asarray', (['Xte'], {}), '(Xte)\n', (2029, 2034), False, 'from BUG.load_package import p\n'), ((2036, 2050), 'BUG.load_package.p.asarray', 'p.asarray', (['Yte'], {}), '(Yte)\n', (2045, 2050), False, 'from BUG.load_package import p\n'), ((2389, 2398), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (2395, 2398), True, 'import numpy as np\n'), ((2496, 2508), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2505, 2508), True, 'import numpy as np\n'), ((3605, 3629), 'requests.get', 'requests.get', (['singer_url'], {}), '(singer_url)\n', (3617, 3629), False, 'import requests\n'), ((4686, 4699), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (4696, 4699), True, 'import numpy as np\n'), ((7582, 7607), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (7594, 7607), False, 'import os\n'), ((8550, 8570), 'BUG.function.zhtools.langconv.Converter', 'Converter', (['"""zh-hans"""'], {}), "('zh-hans')\n", (8559, 8570), False, 'from BUG.function.zhtools.langconv import Converter\n'), ((8941, 8981), 'os.path.join', 'os.path.join', (['path', '"""jaychou_lyrics.zip"""'], {}), "(path, 'jaychou_lyrics.zip')\n", (8953, 8981), False, 'import os\n'), ((9957, 9993), 'os.path.join', 'os.path.join', (['path', '"""gem_lyrics.zip"""'], {}), "(path, 'gem_lyrics.zip')\n", (9969, 9993), False, 'import os\n'), ((11395, 11409), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11406, 11409), False, 'import pickle\n'), ((11461, 11491), 'jieba.cut', 'jieba.cut', (['text'], {'cut_all': '(False)'}), '(text, cut_all=False)\n', (11470, 11491), False, 'import jieba\n'), ((7236, 7263), 'BUG.load_package.p.asarray', 'p.asarray', (['captions[:, :-1]'], {}), '(captions[:, :-1])\n', (7245, 7263), False, 'from BUG.load_package import p\n'), ((7265, 7291), 'BUG.load_package.p.asarray', 'p.asarray', (['captions[:, 1:]'], {}), '(captions[:, 1:])\n', (7274, 7291), False, 'from BUG.load_package import p\n'), ((7293, 7318), 'BUG.load_package.p.asarray', 'p.asarray', (['image_features'], {}), '(image_features)\n', (7302, 7318), False, 'from BUG.load_package import p\n'), ((11219, 11230), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (11227, 11230), True, 'import numpy as np\n'), ((9068, 9088), 'BUG.function.zhtools.langconv.Converter', 'Converter', (['"""zh-hans"""'], {}), "('zh-hans')\n", (9077, 9088), False, 'from BUG.function.zhtools.langconv import Converter\n'), ((10076, 10096), 'BUG.function.zhtools.langconv.Converter', 'Converter', (['"""zh-hans"""'], {}), "('zh-hans')\n", (10085, 10096), False, 'from BUG.function.zhtools.langconv import Converter\n'), ((11205, 11216), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (11213, 11216), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple, Union
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcls.core.visualization import imshow_infos
from mmcls.datasets.pipelines import Compose
from mmcls.models import build_classifier
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmfewshot.classification.models import BaseMetricClassifier
def init_classifier(config: Union[str, mmcv.Config],
checkpoint: Optional[str] = None,
device: str = 'cuda:0',
options: Optional[Dict] = None) -> nn.Module:
"""Prepare a few shot classifier from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None): Checkpoint path. If left as None, the model
will not load any weights. Default: None.
device (str): Runtime device. Default: 'cuda:0'.
options (dict | None): Options to override some settings in the
used config. Default: None.
Returns:
nn.Module: The constructed classifier.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if options is not None:
config.merge_from_dict(options)
model = build_classifier(config.model)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
load_checkpoint(model, checkpoint, map_location=map_loc)
# save the config in the model for convenience in later use
model.cfg = config
model.to(device)
model.eval()
return model
def process_support_images(model: nn.Module, support_imgs: List[str],
support_labels: List[str]) -> None:
"""Process support images.
Args:
model (nn.Module): Classifier model.
support_imgs (list[str]): The image filenames.
support_labels (list[str]): The class names of support images.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
pipeline = cfg.data.test.dataset.pipeline
if pipeline[0]['type'] != 'LoadImageFromFile':
pipeline[0]['type'] = 'LoadImageFromFile'
test_pipeline = Compose(pipeline)
model.CLASSES = list(set(support_labels))
cat_to_id = {cat: i for i, cat in enumerate(model.CLASSES)}
model.before_forward_support()
# forward support images
with torch.no_grad():
for img, label in zip(support_imgs, support_labels):
data = dict(
img_info=dict(filename=img),
gt_label=np.array(cat_to_id[label], dtype=np.int64),
img_prefix=None)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
model(mode='support', **data)
model.before_forward_query()
def inference_classifier(model: nn.Module, query_img: str) -> Dict:
"""Inference single image with the classifier.
Args:
model (nn.Module): The loaded classifier.
query_img (str): The image filename.
Returns:
dict: The classification results that contains
`pred_score` of each class.
"""
# only support methods without fine-tuning
if isinstance(model, BaseMetricClassifier):
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
pipeline = cfg.data.test.dataset.pipeline
if pipeline[0]['type'] != 'LoadImageFromFile':
pipeline[0]['type'] = 'LoadImageFromFile'
test_pipeline = Compose(pipeline)
data = dict(
img_info=dict(filename=query_img),
gt_label=np.array(-1, dtype=np.int64),
img_prefix=None)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# inference image
with torch.no_grad():
scores = model(mode='query', img=data['img'])[0]
result = {
model.CLASSES[i]: float(scores[i])
for i in range(scores.shape[0])
}
return result
else:
raise TypeError(
'currently, inference only support metric based methods')
def show_result_pyplot(img: str,
result: Dict,
fig_size: Tuple[int] = (15, 10),
wait_time: int = 0,
out_file: Optional[str] = None) -> np.ndarray:
"""Visualize the classification results on the image.
Args:
img (str): Image filename.
result (dict): The classification result.
fig_size (tuple): Figure size of the pyplot figure. Default: (15, 10).
wait_time (int): How many seconds to display the image. Default: 0.
out_file (str | None): Default: None
Returns:
np.ndarray: pyplot figure.
"""
img = mmcv.imread(img)
img = img.copy()
img = imshow_infos(
img,
result,
text_color='white',
font_size=25,
row_width=20,
win_name='',
show=True,
fig_size=fig_size,
wait_time=wait_time,
out_file=out_file)
return img
| [
"mmcv.parallel.collate",
"mmcv.parallel.scatter",
"mmcls.datasets.pipelines.Compose",
"mmcv.Config.fromfile",
"numpy.array",
"mmcv.runner.load_checkpoint",
"mmcls.core.visualization.imshow_infos",
"torch.no_grad",
"mmcv.imread",
"mmcls.models.build_classifier"
] | [((1530, 1560), 'mmcls.models.build_classifier', 'build_classifier', (['config.model'], {}), '(config.model)\n', (1546, 1560), False, 'from mmcls.models import build_classifier\n'), ((2486, 2503), 'mmcls.datasets.pipelines.Compose', 'Compose', (['pipeline'], {}), '(pipeline)\n', (2493, 2503), False, 'from mmcls.datasets.pipelines import Compose\n'), ((5416, 5432), 'mmcv.imread', 'mmcv.imread', (['img'], {}), '(img)\n', (5427, 5432), False, 'import mmcv\n'), ((5464, 5628), 'mmcls.core.visualization.imshow_infos', 'imshow_infos', (['img', 'result'], {'text_color': '"""white"""', 'font_size': '(25)', 'row_width': '(20)', 'win_name': '""""""', 'show': '(True)', 'fig_size': 'fig_size', 'wait_time': 'wait_time', 'out_file': 'out_file'}), "(img, result, text_color='white', font_size=25, row_width=20,\n win_name='', show=True, fig_size=fig_size, wait_time=wait_time,\n out_file=out_file)\n", (5476, 5628), False, 'from mmcls.core.visualization import imshow_infos\n'), ((1253, 1281), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['config'], {}), '(config)\n', (1273, 1281), False, 'import mmcv\n'), ((1653, 1709), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['model', 'checkpoint'], {'map_location': 'map_loc'}), '(model, checkpoint, map_location=map_loc)\n', (1668, 1709), False, 'from mmcv.runner import load_checkpoint\n'), ((2687, 2702), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2700, 2702), False, 'import torch\n'), ((3992, 4009), 'mmcls.datasets.pipelines.Compose', 'Compose', (['pipeline'], {}), '(pipeline)\n', (3999, 4009), False, 'from mmcls.datasets.pipelines import Compose\n'), ((4208, 4242), 'mmcv.parallel.collate', 'collate', (['[data]'], {'samples_per_gpu': '(1)'}), '([data], samples_per_gpu=1)\n', (4215, 4242), False, 'from mmcv.parallel import collate, scatter\n'), ((2995, 3029), 'mmcv.parallel.collate', 'collate', (['[data]'], {'samples_per_gpu': '(1)'}), '([data], samples_per_gpu=1)\n', (3002, 3029), False, 'from mmcv.parallel import collate, scatter\n'), ((4413, 4428), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4426, 4428), False, 'import torch\n'), ((4099, 4127), 'numpy.array', 'np.array', (['(-1)'], {'dtype': 'np.int64'}), '(-1, dtype=np.int64)\n', (4107, 4127), True, 'import numpy as np\n'), ((4346, 4369), 'mmcv.parallel.scatter', 'scatter', (['data', '[device]'], {}), '(data, [device])\n', (4353, 4369), False, 'from mmcv.parallel import collate, scatter\n'), ((2860, 2902), 'numpy.array', 'np.array', (['cat_to_id[label]'], {'dtype': 'np.int64'}), '(cat_to_id[label], dtype=np.int64)\n', (2868, 2902), True, 'import numpy as np\n'), ((3145, 3168), 'mmcv.parallel.scatter', 'scatter', (['data', '[device]'], {}), '(data, [device])\n', (3152, 3168), False, 'from mmcv.parallel import collate, scatter\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import ast
import argparse
import glob
import yaml
import copy
import numpy as np
from python.keypoint_preprocess import EvalAffine, TopDownEvalAffine, expand_crop
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
type=str,
default=None,
help=("Path of configure"),
required=True)
parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument(
"--image_dir",
type=str,
default=None,
help="Dir of image file, `image_file` has a higher priority.")
parser.add_argument(
"--video_file",
type=str,
default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser.add_argument(
"--video_dir",
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--model_dir", nargs='*', help="set model dir in pipeline")
parser.add_argument(
"--camera_id",
type=int,
default=-1,
help="device id of camera to predict.")
parser.add_argument(
"--enable_attr",
type=ast.literal_eval,
default=False,
help="Whether use attribute recognition.")
parser.add_argument(
"--enable_action",
type=ast.literal_eval,
default=False,
help="Whether use action recognition.")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
parser.add_argument(
"--run_mode",
type=str,
default='paddle',
help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
)
parser.add_argument(
"--enable_mkldnn",
type=ast.literal_eval,
default=False,
help="Whether use mkldnn with CPU.")
parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
parser.add_argument(
"--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.")
parser.add_argument(
"--trt_max_shape",
type=int,
default=1280,
help="max_shape for TensorRT.")
parser.add_argument(
"--trt_opt_shape",
type=int,
default=640,
help="opt_shape for TensorRT.")
parser.add_argument(
"--trt_calib_mode",
type=bool,
default=False,
help="If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True.")
parser.add_argument(
"--do_entrance_counting",
action='store_true',
help="Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support one-class"
"counting, multi-class counting is coming soon.")
parser.add_argument(
"--secs_interval",
type=int,
default=2,
help="The seconds interval to count after tracking")
parser.add_argument(
"--draw_center_traj",
action='store_true',
help="Whether drawing the trajectory of center")
return parser
class Times(object):
def __init__(self):
self.time = 0.
# start time
self.st = 0.
# end time
self.et = 0.
def start(self):
self.st = time.time()
def end(self, repeats=1, accumulative=True):
self.et = time.time()
if accumulative:
self.time += (self.et - self.st) / repeats
else:
self.time = (self.et - self.st) / repeats
def reset(self):
self.time = 0.
self.st = 0.
self.et = 0.
def value(self):
return round(self.time, 4)
class PipeTimer(Times):
def __init__(self):
super(PipeTimer, self).__init__()
self.total_time = Times()
self.module_time = {
'det': Times(),
'mot': Times(),
'attr': Times(),
'kpt': Times(),
'action': Times(),
'reid': Times()
}
self.img_num = 0
def get_total_time(self):
total_time = self.total_time.value()
total_time = round(total_time, 4)
average_latency = total_time / max(1, self.img_num)
qps = 0
if total_time > 0:
qps = 1 / average_latency
return total_time, average_latency, qps
def info(self):
total_time, average_latency, qps = self.get_total_time()
print("------------------ Inference Time Info ----------------------")
print("total_time(ms): {}, img_num: {}".format(total_time * 1000,
self.img_num))
for k, v in self.module_time.items():
v_time = round(v.value(), 4)
if v_time > 0:
print("{} time(ms): {}".format(k, v_time * 1000))
print("average latency time(ms): {:.2f}, QPS: {:2f}".format(
average_latency * 1000, qps))
return qps
def report(self, average=False):
dic = {}
dic['total'] = round(self.total_time.value() / max(1, self.img_num),
4) if average else self.total_time.value()
dic['det'] = round(self.module_time['det'].value() /
max(1, self.img_num),
4) if average else self.module_time['det'].value()
dic['mot'] = round(self.module_time['mot'].value() /
max(1, self.img_num),
4) if average else self.module_time['mot'].value()
dic['attr'] = round(self.module_time['attr'].value() /
max(1, self.img_num),
4) if average else self.module_time['attr'].value()
dic['kpt'] = round(self.module_time['kpt'].value() /
max(1, self.img_num),
4) if average else self.module_time['kpt'].value()
dic['action'] = round(
self.module_time['action'].value() / max(1, self.img_num),
4) if average else self.module_time['action'].value()
dic['img_num'] = self.img_num
return dic
def merge_model_dir(args, model_dir):
# set --model_dir DET=ppyoloe/ to overwrite the model_dir in config file
task_set = ['DET', 'ATTR', 'MOT', 'KPT', 'ACTION', 'REID']
if not model_dir:
return args
for md in model_dir:
md = md.strip()
k, v = md.split('=', 1)
k_upper = k.upper()
assert k_upper in task_set, 'Illegal type of task, expect task are: {}, but received {}'.format(
task_set, k)
args[k_upper].update({'model_dir': v})
return args
def merge_cfg(args):
with open(args.config) as f:
pred_config = yaml.safe_load(f)
def merge(cfg, arg):
merge_cfg = copy.deepcopy(cfg)
for k, v in cfg.items():
if k in arg:
merge_cfg[k] = arg[k]
else:
if isinstance(v, dict):
merge_cfg[k] = merge(v, arg)
return merge_cfg
args_dict = vars(args)
model_dir = args_dict.pop('model_dir')
pred_config = merge_model_dir(pred_config, model_dir)
pred_config = merge(pred_config, args_dict)
return pred_config
def print_arguments(cfg):
print('----------- Running Arguments -----------')
buffer = yaml.dump(cfg)
print(buffer)
print('------------------------------------------')
def get_test_images(infer_dir, infer_img):
"""
Get image path list in TEST mode
"""
assert infer_img is not None or infer_dir is not None, \
"--infer_img or --infer_dir should be set"
assert infer_img is None or os.path.isfile(infer_img), \
"{} is not a file".format(infer_img)
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
# infer_img has a higher priority
if infer_img and os.path.isfile(infer_img):
return [infer_img]
images = set()
infer_dir = os.path.abspath(infer_dir)
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
assert len(images) > 0, "no image found in {}".format(infer_dir)
print("Found {} inference images in total.".format(len(images)))
return images
def crop_image_with_det(batch_input, det_res, thresh=0.3):
boxes = det_res['boxes']
score = det_res['boxes'][:, 1]
boxes_num = det_res['boxes_num']
start_idx = 0
crop_res = []
for b_id, input in enumerate(batch_input):
boxes_num_i = boxes_num[b_id]
boxes_i = boxes[start_idx:start_idx + boxes_num_i, :]
score_i = score[start_idx:start_idx + boxes_num_i]
res = []
for box, s in zip(boxes_i, score_i):
if s > thresh:
crop_image, new_box, ori_box = expand_crop(input, box)
if crop_image is not None:
res.append(crop_image)
crop_res.append(res)
return crop_res
def normal_crop(image, rect):
imgh, imgw, c = image.shape
label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]
org_rect = [xmin, ymin, xmax, ymax]
if label != 0:
return None, None, None
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(imgw, xmax)
ymax = min(imgh, ymax)
return image[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax], org_rect
def crop_image_with_mot(input, mot_res, expand=True):
res = mot_res['boxes']
crop_res = []
new_bboxes = []
ori_bboxes = []
for box in res:
if expand:
crop_image, new_bbox, ori_bbox = expand_crop(input, box[1:])
else:
crop_image, new_bbox, ori_bbox = normal_crop(input, box[1:])
if crop_image is not None:
crop_res.append(crop_image)
new_bboxes.append(new_bbox)
ori_bboxes.append(ori_bbox)
return crop_res, new_bboxes, ori_bboxes
def parse_mot_res(input):
mot_res = []
boxes, scores, ids = input[0]
for box, score, i in zip(boxes[0], scores[0], ids[0]):
xmin, ymin, w, h = box
res = [i, 0, score, xmin, ymin, xmin + w, ymin + h]
mot_res.append(res)
return {'boxes': np.array(mot_res)}
def refine_keypoint_coordinary(kpts, bbox, coord_size):
"""
This function is used to adjust coordinate values to a fixed scale.
"""
tl = bbox[:, 0:2]
wh = bbox[:, 2:] - tl
tl = np.expand_dims(np.transpose(tl, (1, 0)), (2, 3))
wh = np.expand_dims(np.transpose(wh, (1, 0)), (2, 3))
target_w, target_h = coord_size
res = (kpts - tl) / wh * np.expand_dims(
np.array([[target_w], [target_h]]), (2, 3))
return res
def parse_mot_keypoint(input, coord_size):
parsed_skeleton_with_mot = {}
ids = []
skeleton = []
for tracker_id, kpt_seq in input:
ids.append(tracker_id)
kpts = np.array(kpt_seq.kpts, dtype=np.float32)[:, :, :2]
kpts = np.expand_dims(np.transpose(kpts, [2, 0, 1]),
-1) #T, K, C -> C, T, K, 1
bbox = np.array(kpt_seq.bboxes, dtype=np.float32)
skeleton.append(refine_keypoint_coordinary(kpts, bbox, coord_size))
parsed_skeleton_with_mot["mot_id"] = ids
parsed_skeleton_with_mot["skeleton"] = skeleton
return parsed_skeleton_with_mot
| [
"os.path.abspath",
"copy.deepcopy",
"argparse.ArgumentParser",
"os.path.isdir",
"yaml.dump",
"numpy.transpose",
"time.time",
"os.path.isfile",
"yaml.safe_load",
"numpy.array",
"python.keypoint_preprocess.expand_crop"
] | [((831, 875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (854, 875), False, 'import argparse\n'), ((8355, 8369), 'yaml.dump', 'yaml.dump', (['cfg'], {}), '(cfg)\n', (8364, 8369), False, 'import yaml\n'), ((9028, 9054), 'os.path.abspath', 'os.path.abspath', (['infer_dir'], {}), '(infer_dir)\n', (9043, 9054), False, 'import os\n'), ((9066, 9090), 'os.path.isdir', 'os.path.isdir', (['infer_dir'], {}), '(infer_dir)\n', (9079, 9090), False, 'import os\n'), ((4284, 4295), 'time.time', 'time.time', ([], {}), '()\n', (4293, 4295), False, 'import time\n'), ((4364, 4375), 'time.time', 'time.time', ([], {}), '()\n', (4373, 4375), False, 'import time\n'), ((7747, 7764), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (7761, 7764), False, 'import yaml\n'), ((7811, 7829), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (7824, 7829), False, 'import copy\n'), ((8686, 8711), 'os.path.isfile', 'os.path.isfile', (['infer_img'], {}), '(infer_img)\n', (8700, 8711), False, 'import os\n'), ((8796, 8820), 'os.path.isdir', 'os.path.isdir', (['infer_dir'], {}), '(infer_dir)\n', (8809, 8820), False, 'import os\n'), ((8938, 8963), 'os.path.isfile', 'os.path.isfile', (['infer_img'], {}), '(infer_img)\n', (8952, 8963), False, 'import os\n'), ((11434, 11451), 'numpy.array', 'np.array', (['mot_res'], {}), '(mot_res)\n', (11442, 11451), True, 'import numpy as np\n'), ((11675, 11699), 'numpy.transpose', 'np.transpose', (['tl', '(1, 0)'], {}), '(tl, (1, 0))\n', (11687, 11699), True, 'import numpy as np\n'), ((11733, 11757), 'numpy.transpose', 'np.transpose', (['wh', '(1, 0)'], {}), '(wh, (1, 0))\n', (11745, 11757), True, 'import numpy as np\n'), ((12294, 12336), 'numpy.array', 'np.array', (['kpt_seq.bboxes'], {'dtype': 'np.float32'}), '(kpt_seq.bboxes, dtype=np.float32)\n', (12302, 12336), True, 'import numpy as np\n'), ((10842, 10869), 'python.keypoint_preprocess.expand_crop', 'expand_crop', (['input', 'box[1:]'], {}), '(input, box[1:])\n', (10853, 10869), False, 'from python.keypoint_preprocess import EvalAffine, TopDownEvalAffine, expand_crop\n'), ((11856, 11890), 'numpy.array', 'np.array', (['[[target_w], [target_h]]'], {}), '([[target_w], [target_h]])\n', (11864, 11890), True, 'import numpy as np\n'), ((12109, 12149), 'numpy.array', 'np.array', (['kpt_seq.kpts'], {'dtype': 'np.float32'}), '(kpt_seq.kpts, dtype=np.float32)\n', (12117, 12149), True, 'import numpy as np\n'), ((12190, 12219), 'numpy.transpose', 'np.transpose', (['kpts', '[2, 0, 1]'], {}), '(kpts, [2, 0, 1])\n', (12202, 12219), True, 'import numpy as np\n'), ((10049, 10072), 'python.keypoint_preprocess.expand_crop', 'expand_crop', (['input', 'box'], {}), '(input, box)\n', (10060, 10072), False, 'from python.keypoint_preprocess import EvalAffine, TopDownEvalAffine, expand_crop\n')] |
from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError, OpenMCError
from . import _dll, Nuclide
from .core import _FortranObjectWithID
from .error import _error_handler
__all__ = ['Material', 'materials']
# Material functions
_dll.openmc_extend_materials.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
_dll.openmc_extend_materials.restype = c_int
_dll.openmc_extend_materials.errcheck = _error_handler
_dll.openmc_get_material_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_material_index.restype = c_int
_dll.openmc_get_material_index.errcheck = _error_handler
_dll.openmc_material_add_nuclide.argtypes = [
c_int32, c_char_p, c_double]
_dll.openmc_material_add_nuclide.restype = c_int
_dll.openmc_material_add_nuclide.errcheck = _error_handler
_dll.openmc_material_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_material_get_id.restype = c_int
_dll.openmc_material_get_id.errcheck = _error_handler
_dll.openmc_material_get_densities.argtypes = [
c_int32, POINTER(POINTER(c_int)), POINTER(POINTER(c_double)),
POINTER(c_int)]
_dll.openmc_material_get_densities.restype = c_int
_dll.openmc_material_get_densities.errcheck = _error_handler
_dll.openmc_material_get_density.argtypes = [c_int32, POINTER(c_double)]
_dll.openmc_material_get_density.restype = c_int
_dll.openmc_material_get_density.errcheck = _error_handler
_dll.openmc_material_get_volume.argtypes = [c_int32, POINTER(c_double)]
_dll.openmc_material_get_volume.restype = c_int
_dll.openmc_material_get_volume.errcheck = _error_handler
_dll.openmc_material_set_density.argtypes = [c_int32, c_double, c_char_p]
_dll.openmc_material_set_density.restype = c_int
_dll.openmc_material_set_density.errcheck = _error_handler
_dll.openmc_material_set_densities.argtypes = [
c_int32, c_int, POINTER(c_char_p), POINTER(c_double)]
_dll.openmc_material_set_densities.restype = c_int
_dll.openmc_material_set_densities.errcheck = _error_handler
_dll.openmc_material_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_material_set_id.restype = c_int
_dll.openmc_material_set_id.errcheck = _error_handler
_dll.openmc_material_set_volume.argtypes = [c_int32, c_double]
_dll.openmc_material_set_volume.restype = c_int
_dll.openmc_material_set_volume.errcheck = _error_handler
_dll.n_materials.argtypes = []
_dll.n_materials.restype = c_size_t
class Material(_FortranObjectWithID):
"""Material stored internally.
This class exposes a material that is stored internally in the OpenMC
library. To obtain a view of a material with a given ID, use the
:data:`openmc.capi.materials` mapping.
Parameters
----------
uid : int or None
Unique ID of the tally
new : bool
When `index` is None, this argument controls whether a new object is
created or a view to an existing object is returned.
index : int or None
Index in the `materials` array.
Attributes
----------
id : int
ID of the material
nuclides : list of str
List of nuclides in the material
densities : numpy.ndarray
Array of densities in atom/b-cm
"""
__instances = WeakValueDictionary()
def __new__(cls, uid=None, new=True, index=None):
mapping = materials
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A material with ID={} has already '
'been allocated.'.format(uid))
index = c_int32()
_dll.openmc_extend_materials(1, index, None)
index = index.value
else:
index = mapping[uid]._index
elif index == -1:
# Special value indicates void material
return None
if index not in cls.__instances:
instance = super(Material, cls).__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
mat_id = c_int32()
_dll.openmc_material_get_id(self._index, mat_id)
return mat_id.value
@id.setter
def id(self, mat_id):
_dll.openmc_material_set_id(self._index, mat_id)
@property
def volume(self):
volume = c_double()
try:
_dll.openmc_material_get_volume(self._index, volume)
except OpenMCError:
return None
return volume.value
@volume.setter
def volume(self, volume):
_dll.openmc_material_set_volume(self._index, volume)
@property
def nuclides(self):
return self._get_densities()[0]
return nuclides
@property
def density(self):
density = c_double()
try:
_dll.openmc_material_get_density(self._index, density)
except OpenMCError:
return None
return density.value
@property
def densities(self):
return self._get_densities()[1]
def _get_densities(self):
"""Get atom densities in a material.
Returns
-------
list of string
List of nuclide names
numpy.ndarray
Array of densities in atom/b-cm
"""
# Allocate memory for arguments that are written to
nuclides = POINTER(c_int)()
densities = POINTER(c_double)()
n = c_int()
# Get nuclide names and densities
_dll.openmc_material_get_densities(self._index, nuclides, densities, n)
# Convert to appropriate types and return
nuclide_list = [Nuclide(nuclides[i]).name for i in range(n.value)]
density_array = as_array(densities, (n.value,))
return nuclide_list, density_array
def add_nuclide(self, name, density):
"""Add a nuclide to a material.
Parameters
----------
name : str
Name of nuclide, e.g. 'U235'
density : float
Density in atom/b-cm
"""
_dll.openmc_material_add_nuclide(self._index, name.encode(), density)
def set_density(self, density, units='atom/b-cm'):
"""Set density of a material.
Parameters
----------
density : float
Density
units : {'atom/b-cm', 'g/cm3'}
Units for density
"""
_dll.openmc_material_set_density(self._index, density, units.encode())
def set_densities(self, nuclides, densities):
"""Set the densities of a list of nuclides in a material
Parameters
----------
nuclides : iterable of str
Nuclide names
densities : iterable of float
Corresponding densities in atom/b-cm
"""
# Convert strings to an array of char*
nucs = (c_char_p * len(nuclides))()
nucs[:] = [x.encode() for x in nuclides]
# Get numpy array as a double*
d = np.asarray(densities)
dp = d.ctypes.data_as(POINTER(c_double))
_dll.openmc_material_set_densities(self._index, len(nuclides), nucs, dp)
class _MaterialMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_material_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return Material(index=index.value)
def __iter__(self):
for i in range(len(self)):
yield Material(index=i).id
def __len__(self):
return _dll.n_materials()
def __repr__(self):
return repr(dict(self))
materials = _MaterialMapping()
| [
"ctypes.c_int32",
"ctypes.c_double",
"ctypes.c_int",
"numpy.asarray",
"numpy.ctypeslib.as_array",
"ctypes.POINTER",
"weakref.WeakValueDictionary"
] | [((493, 509), 'ctypes.POINTER', 'POINTER', (['c_int32'], {}), '(c_int32)\n', (500, 509), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((511, 527), 'ctypes.POINTER', 'POINTER', (['c_int32'], {}), '(c_int32)\n', (518, 527), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((681, 697), 'ctypes.POINTER', 'POINTER', (['c_int32'], {}), '(c_int32)\n', (688, 697), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((1039, 1055), 'ctypes.POINTER', 'POINTER', (['c_int32'], {}), '(c_int32)\n', (1046, 1055), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((1273, 1287), 'ctypes.POINTER', 'POINTER', (['c_int'], {}), '(c_int)\n', (1280, 1287), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((1455, 1472), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1462, 1472), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((1635, 1652), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1642, 1652), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((2010, 2027), 'ctypes.POINTER', 'POINTER', (['c_char_p'], {}), '(c_char_p)\n', (2017, 2027), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((2029, 2046), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (2036, 2046), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((3352, 3373), 'weakref.WeakValueDictionary', 'WeakValueDictionary', ([], {}), '()\n', (3371, 3373), False, 'from weakref import WeakValueDictionary\n'), ((1224, 1238), 'ctypes.POINTER', 'POINTER', (['c_int'], {}), '(c_int)\n', (1231, 1238), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((1249, 1266), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1256, 1266), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((4483, 4492), 'ctypes.c_int32', 'c_int32', ([], {}), '()\n', (4490, 4492), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((4731, 4741), 'ctypes.c_double', 'c_double', ([], {}), '()\n', (4739, 4741), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((5168, 5178), 'ctypes.c_double', 'c_double', ([], {}), '()\n', (5176, 5178), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((5803, 5810), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (5808, 5810), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((6084, 6115), 'numpy.ctypeslib.as_array', 'as_array', (['densities', '(n.value,)'], {}), '(densities, (n.value,))\n', (6092, 6115), False, 'from numpy.ctypeslib import as_array\n'), ((7335, 7356), 'numpy.asarray', 'np.asarray', (['densities'], {}), '(densities)\n', (7345, 7356), True, 'import numpy as np\n'), ((7571, 7580), 'ctypes.c_int32', 'c_int32', ([], {}), '()\n', (7578, 7580), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((5734, 5748), 'ctypes.POINTER', 'POINTER', (['c_int'], {}), '(c_int)\n', (5741, 5748), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((5771, 5788), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (5778, 5788), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((7387, 7404), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (7394, 7404), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n'), ((3876, 3885), 'ctypes.c_int32', 'c_int32', ([], {}), '()\n', (3883, 3885), False, 'from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\n')] |
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from scipy.ndimage.interpolation import zoom
from .care_standard import CARE
from ..data import PercentileNormalizer, PadAndCropResizer
from ..utils import _raise, axes_dict
class UpsamplingCARE(CARE):
"""CARE network for combined image restoration and upsampling of one dimension.
Extends :class:`csbdeep.models.CARE` by replacing prediction
(:func:`predict`, :func:`predict_probabilistic`) to first upsample Z before image restoration.
"""
def predict(self, img, axes, factor, normalizer=PercentileNormalizer(), resizer=PadAndCropResizer(), n_tiles=None):
"""Apply neural network to raw image with low-resolution Z axis.
See :func:`CARE.predict` for documentation.
Parameters
----------
factor : float
Upsampling factor for Z axis. It is important that this is chosen in correspondence
to the subsampling factor used during training data generation.
"""
img = self._upsample(img, axes, factor)
return super(UpsamplingCARE, self).predict(img, axes, normalizer, resizer, n_tiles)
def predict_probabilistic(self, img, axes, factor, normalizer=PercentileNormalizer(), resizer=PadAndCropResizer(), n_tiles=None):
"""Apply neural network to raw image with low-resolution Z axis for probabilistic prediction.
See :func:`CARE.predict_probabilistic` for documentation.
Parameters
----------
factor : float
Upsampling factor for Z axis. It is important that this is chosen in correspondence
to the subsampling factor used during training data generation.
"""
img = self._upsample(img, axes, factor)
return super(UpsamplingCARE, self).predict_probabilistic(img, axes, normalizer, resizer, n_tiles)
@staticmethod
def _upsample(img, axes, factor, axis='Z'):
factors = np.ones(img.ndim)
factors[axes_dict(axes)[axis]] = factor
return zoom(img,factors,order=1)
| [
"scipy.ndimage.interpolation.zoom",
"numpy.ones"
] | [((1991, 2008), 'numpy.ones', 'np.ones', (['img.ndim'], {}), '(img.ndim)\n', (1998, 2008), True, 'import numpy as np\n'), ((2072, 2099), 'scipy.ndimage.interpolation.zoom', 'zoom', (['img', 'factors'], {'order': '(1)'}), '(img, factors, order=1)\n', (2076, 2099), False, 'from scipy.ndimage.interpolation import zoom\n')] |
from util import mean_max_similarity_semantic
from natsort import natsorted
import numpy as np
import os
import glob
import sys
def L2_norm_semantic(semantic_embedding):
ret = []
for embedding in semantic_embedding:
ret.append(embedding / np.linalg.norm(embedding))
return np.array(ret)
def semantic_based_searching(topic_embeddings, shot_id_list):
result = []
semantic_feat_root_path = '../features/VGG19-1K'
for idx, shot_id in enumerate(shot_id_list):
print(f'{idx} - processing {shot_id}')
video_id = shot_id.split('_')[0][4:]
semantic_embedding = []
for semantic_path in natsorted(glob.glob(os.path.join(semantic_feat_root_path, f'video{video_id}', f'{shot_id}', '*npy'))):
semantic_embedding.append(np.load(semantic_path))
l2_norm_semantic_embedding = L2_norm_semantic(semantic_embedding)
result.append((shot_id, mean_max_similarity_semantic(
topic_embeddings, l2_norm_semantic_embedding)))
result = sorted(result, key=lambda x: x[1], reverse=True)
return result
if __name__ == '__main__':
topics_data_folder = '../data/raw_data/queries/person-action-2019'
topics_feat_folder = '../features/Query_feature/2019/vgg19-1K'
config_folder = '../result/config_vggface2_2019_linear_svm_vgg16_pool5_gap_with_example_video'
final_result_folder = f'{config_folder}_with_semantic'
# query_ids = ['9249', '9250', '9251', '9252', '9253', '9254', '9255', '9256', '9257', '9258', '9259', '9260', '9261', '9262',
# '9263', '9264', '9265', '9266', '9267', '9268', '9269', '9270', '9271', '9272', '9273', '9274', '9275', '9276', '9277', '9278']
query_ids = [sys.argv[1]]
for query_id in query_ids:
# Get query embeddings
action_examples_path = os.path.join(
topics_data_folder, query_id, 'video_action')
query_embeddings = []
for clip_path in glob.glob(os.path.join(action_examples_path, '*mp4')):
clip_name = os.path.splitext(os.path.basename(clip_path))[0]
query_embedding = np.load(os.path.join(
topics_feat_folder, f'{clip_name}.npy'))
l2_norm_query_embedding = L2_norm_semantic(query_embedding)
query_embeddings.append(l2_norm_query_embedding)
person = os.path.basename(glob.glob(os.path.join(
topics_data_folder, query_id, 'person_img', '*'))[0]).split('.')[0]
person_result_path = os.path.join(
config_folder, person, 'stage 1', 'result.txt')
shot_id_list = []
with open(person_result_path, 'r') as f:
for line in f:
_, _, audio_id, _, _, _ = line.rstrip().split()
shot_id_list.append(audio_id)
result = semantic_based_searching(query_embeddings, shot_id_list)
save_path = os.path.join(final_result_folder, query_id, 'stage 1')
if not os.path.exists(save_path):
os.makedirs(save_path, exist_ok=True)
with open(os.path.join(save_path, 'result.txt'), 'w') as f:
for i, r in enumerate(result):
f.write(' '.join((query_id, 'Q0', r[0], str(
i+1), str(r[1]), 'STANDARD')) + '\n')
| [
"numpy.load",
"os.makedirs",
"os.path.basename",
"os.path.exists",
"numpy.linalg.norm",
"numpy.array",
"util.mean_max_similarity_semantic",
"os.path.join"
] | [((296, 309), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (304, 309), True, 'import numpy as np\n'), ((1806, 1864), 'os.path.join', 'os.path.join', (['topics_data_folder', 'query_id', '"""video_action"""'], {}), "(topics_data_folder, query_id, 'video_action')\n", (1818, 1864), False, 'import os\n'), ((2474, 2534), 'os.path.join', 'os.path.join', (['config_folder', 'person', '"""stage 1"""', '"""result.txt"""'], {}), "(config_folder, person, 'stage 1', 'result.txt')\n", (2486, 2534), False, 'import os\n'), ((2857, 2911), 'os.path.join', 'os.path.join', (['final_result_folder', 'query_id', '"""stage 1"""'], {}), "(final_result_folder, query_id, 'stage 1')\n", (2869, 2911), False, 'import os\n'), ((1944, 1986), 'os.path.join', 'os.path.join', (['action_examples_path', '"""*mp4"""'], {}), "(action_examples_path, '*mp4')\n", (1956, 1986), False, 'import os\n'), ((2927, 2952), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (2941, 2952), False, 'import os\n'), ((2966, 3003), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (2977, 3003), False, 'import os\n'), ((258, 283), 'numpy.linalg.norm', 'np.linalg.norm', (['embedding'], {}), '(embedding)\n', (272, 283), True, 'import numpy as np\n'), ((667, 746), 'os.path.join', 'os.path.join', (['semantic_feat_root_path', 'f"""video{video_id}"""', 'f"""{shot_id}"""', '"""*npy"""'], {}), "(semantic_feat_root_path, f'video{video_id}', f'{shot_id}', '*npy')\n", (679, 746), False, 'import os\n'), ((788, 810), 'numpy.load', 'np.load', (['semantic_path'], {}), '(semantic_path)\n', (795, 810), True, 'import numpy as np\n'), ((919, 993), 'util.mean_max_similarity_semantic', 'mean_max_similarity_semantic', (['topic_embeddings', 'l2_norm_semantic_embedding'], {}), '(topic_embeddings, l2_norm_semantic_embedding)\n', (947, 993), False, 'from util import mean_max_similarity_semantic\n'), ((2101, 2153), 'os.path.join', 'os.path.join', (['topics_feat_folder', 'f"""{clip_name}.npy"""'], {}), "(topics_feat_folder, f'{clip_name}.npy')\n", (2113, 2153), False, 'import os\n'), ((3023, 3060), 'os.path.join', 'os.path.join', (['save_path', '"""result.txt"""'], {}), "(save_path, 'result.txt')\n", (3035, 3060), False, 'import os\n'), ((2030, 2057), 'os.path.basename', 'os.path.basename', (['clip_path'], {}), '(clip_path)\n', (2046, 2057), False, 'import os\n'), ((2350, 2411), 'os.path.join', 'os.path.join', (['topics_data_folder', 'query_id', '"""person_img"""', '"""*"""'], {}), "(topics_data_folder, query_id, 'person_img', '*')\n", (2362, 2411), False, 'import os\n')] |
import gym
import numpy as np
from smarts.core.sensors import Observation
class Reward(gym.Wrapper):
def __init__(self, env: gym.Env):
super().__init__(env)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
obs, env_reward, done, info = self.env.step(action)
wrapped_reward = {
agent_id: self._reward(obs[agent_id], agent_reward)
for agent_id, agent_reward in env_reward.items()
}
return obs, wrapped_reward, done, info
def _reward(self, obs: Observation, env_reward: np.float64) -> np.float32:
reward = 0
# Penalty for driving off road
if obs.events.off_road:
reward -= 200
return np.float32(reward)
# Penalty for colliding
if len(obs.events.collisions) > 0:
reward -= 200
return np.float32(reward)
# Reward for distance travelled
reward += env_reward
return np.float32(reward)
| [
"numpy.float32"
] | [((1005, 1023), 'numpy.float32', 'np.float32', (['reward'], {}), '(reward)\n', (1015, 1023), True, 'import numpy as np\n'), ((760, 778), 'numpy.float32', 'np.float32', (['reward'], {}), '(reward)\n', (770, 778), True, 'import numpy as np\n'), ((900, 918), 'numpy.float32', 'np.float32', (['reward'], {}), '(reward)\n', (910, 918), True, 'import numpy as np\n')] |
import argparse
import numpy as np
from sklearn.cluster import KMeans
import sklearn.decomposition
from mnist import load_mnist
import gmm
import classifier
import kmeans as kmeans_
parser = argparse.ArgumentParser(
prog='em',
description='train model with em'
)
parser.add_argument('--path', default='/home/data/ml/mnist',
help='path to the mnist data')
parser.add_argument('--k', default=10, type=int,
help='number of components')
args = parser.parse_args()
def compare_precisions_by_nb_of_components():
kmeans = kmeans_.load_kmeans('kmeans-20.dat')
train_data, train_labels = load_mnist(dataset='training', path=args.path)
train_data = np.reshape(train_data, (train_data.shape[0], 784))
test_data, test_labels = load_mnist(dataset='testing', path=args.path)
test_data = np.reshape(test_data, (test_data.shape[0], 784))
d = 40
reducer = sklearn.decomposition.PCA(n_components=d)
reducer.fit(train_data)
train_data_reduced = reducer.transform(train_data)
test_data_reduced = reducer.transform(test_data)
kmeans_reduced = reducer.transform(kmeans)
label_set = set(train_labels)
precisions = []
ks = list(range(1, 11)) + [15, 20, 30]
for k in ks:
print('learning {} components'.format(k))
model = classifier.classifier(k, covariance_type='full',
model_type='gmm',
means_init_heuristic='kmeans',
means=kmeans_reduced,
verbose=False)
model.fit(train_data_reduced, train_labels)
predicted_labels = model.predict(test_data_reduced, label_set)
expected_labels = test_labels
precision = np.mean(predicted_labels == expected_labels)
precisions.append((k, precision))
print('precision: {}'.format(precision))
print(precisions)
def main():
compare_precisions_by_nb_of_components()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"kmeans.load_kmeans",
"mnist.load_mnist",
"numpy.mean",
"numpy.reshape",
"classifier.classifier"
] | [((194, 263), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""em"""', 'description': '"""train model with em"""'}), "(prog='em', description='train model with em')\n", (217, 263), False, 'import argparse\n'), ((575, 611), 'kmeans.load_kmeans', 'kmeans_.load_kmeans', (['"""kmeans-20.dat"""'], {}), "('kmeans-20.dat')\n", (594, 611), True, 'import kmeans as kmeans_\n'), ((644, 690), 'mnist.load_mnist', 'load_mnist', ([], {'dataset': '"""training"""', 'path': 'args.path'}), "(dataset='training', path=args.path)\n", (654, 690), False, 'from mnist import load_mnist\n'), ((708, 758), 'numpy.reshape', 'np.reshape', (['train_data', '(train_data.shape[0], 784)'], {}), '(train_data, (train_data.shape[0], 784))\n', (718, 758), True, 'import numpy as np\n'), ((788, 833), 'mnist.load_mnist', 'load_mnist', ([], {'dataset': '"""testing"""', 'path': 'args.path'}), "(dataset='testing', path=args.path)\n", (798, 833), False, 'from mnist import load_mnist\n'), ((850, 898), 'numpy.reshape', 'np.reshape', (['test_data', '(test_data.shape[0], 784)'], {}), '(test_data, (test_data.shape[0], 784))\n', (860, 898), True, 'import numpy as np\n'), ((1337, 1475), 'classifier.classifier', 'classifier.classifier', (['k'], {'covariance_type': '"""full"""', 'model_type': '"""gmm"""', 'means_init_heuristic': '"""kmeans"""', 'means': 'kmeans_reduced', 'verbose': '(False)'}), "(k, covariance_type='full', model_type='gmm',\n means_init_heuristic='kmeans', means=kmeans_reduced, verbose=False)\n", (1358, 1475), False, 'import classifier\n'), ((1807, 1851), 'numpy.mean', 'np.mean', (['(predicted_labels == expected_labels)'], {}), '(predicted_labels == expected_labels)\n', (1814, 1851), True, 'import numpy as np\n')] |
import numpy
import six
import chainer
from chainer import backend
from chainer import utils
import chainerx
def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
"""Asserts if some corresponding element of x and y differs too much.
This function can handle both CPU and GPU arrays simultaneously.
Args:
x: Left-hand-side array.
y: Right-hand-side array.
atol (float): Absolute tolerance.
rtol (float): Relative tolerance.
verbose (bool): If ``True``, it outputs verbose messages on error.
"""
x = backend.CpuDevice().send(utils.force_array(x))
y = backend.CpuDevice().send(utils.force_array(y))
try:
numpy.testing.assert_allclose(
x, y, atol=atol, rtol=rtol, verbose=verbose)
except AssertionError as e:
f = six.StringIO()
f.write(str(e) + '\n\n')
f.write(
'assert_allclose failed: \n' +
' shape: {} {}\n'.format(x.shape, y.shape) +
' dtype: {} {}\n'.format(x.dtype, y.dtype))
if x.shape == y.shape:
xx = x if x.ndim != 0 else x.reshape((1,))
yy = y if y.ndim != 0 else y.reshape((1,))
err = numpy.abs(xx - yy)
i = numpy.unravel_index(numpy.argmax(err), err.shape)
f.write(
' i: {}\n'.format(i) +
' x[i]: {}\n'.format(xx[i]) +
' y[i]: {}\n'.format(yy[i]) +
' err[i]: {}\n'.format(err[i]))
opts = numpy.get_printoptions()
try:
numpy.set_printoptions(threshold=10000)
f.write('x: ' + numpy.array2string(x, prefix='x: ') + '\n')
f.write('y: ' + numpy.array2string(y, prefix='y: ') + '\n')
finally:
numpy.set_printoptions(**opts)
raise AssertionError(f.getvalue())
def _as_noncontiguous_array(array):
# This is a temporary function used by tests to convert contiguous arrays
# to non-contiguous arrays.
#
# This functions can be removed if e.g. BackendConfig starts supporting
# contiguousness configurations and the array conversion method takes that
# into account. Note that that would also mean rewriting tests to use the
# backend injector in the first place.
def as_noncontiguous_array(a):
if a is None:
return None
if a.size <= 1:
return a
device = backend.get_device_from_array(a)
xp = device.xp
with chainer.using_device(device):
ret = xp.empty(
(a.shape[0] * 2,) + a.shape[1:], dtype=a.dtype)
ret[::2] = a
ret = ret[::2]
if device.xp is chainerx:
assert not ret.is_contiguous
else:
assert not ret.flags.c_contiguous
return ret
if isinstance(array, (list, tuple)):
return type(array)([_as_noncontiguous_array(arr) for arr in array])
else:
return as_noncontiguous_array(array)
| [
"chainer.utils.force_array",
"numpy.set_printoptions",
"numpy.abs",
"numpy.argmax",
"chainer.backend.CpuDevice",
"numpy.array2string",
"six.StringIO",
"chainer.backend.get_device_from_array",
"numpy.testing.assert_allclose",
"chainer.using_device",
"numpy.get_printoptions"
] | [((598, 618), 'chainer.utils.force_array', 'utils.force_array', (['x'], {}), '(x)\n', (615, 618), False, 'from chainer import utils\n'), ((653, 673), 'chainer.utils.force_array', 'utils.force_array', (['y'], {}), '(y)\n', (670, 673), False, 'from chainer import utils\n'), ((692, 766), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['x', 'y'], {'atol': 'atol', 'rtol': 'rtol', 'verbose': 'verbose'}), '(x, y, atol=atol, rtol=rtol, verbose=verbose)\n', (721, 766), False, 'import numpy\n'), ((2423, 2455), 'chainer.backend.get_device_from_array', 'backend.get_device_from_array', (['a'], {}), '(a)\n', (2452, 2455), False, 'from chainer import backend\n'), ((573, 592), 'chainer.backend.CpuDevice', 'backend.CpuDevice', ([], {}), '()\n', (590, 592), False, 'from chainer import backend\n'), ((628, 647), 'chainer.backend.CpuDevice', 'backend.CpuDevice', ([], {}), '()\n', (645, 647), False, 'from chainer import backend\n'), ((824, 838), 'six.StringIO', 'six.StringIO', ([], {}), '()\n', (836, 838), False, 'import six\n'), ((1510, 1534), 'numpy.get_printoptions', 'numpy.get_printoptions', ([], {}), '()\n', (1532, 1534), False, 'import numpy\n'), ((2492, 2520), 'chainer.using_device', 'chainer.using_device', (['device'], {}), '(device)\n', (2512, 2520), False, 'import chainer\n'), ((1206, 1224), 'numpy.abs', 'numpy.abs', (['(xx - yy)'], {}), '(xx - yy)\n', (1215, 1224), False, 'import numpy\n'), ((1560, 1599), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'threshold': '(10000)'}), '(threshold=10000)\n', (1582, 1599), False, 'import numpy\n'), ((1773, 1803), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {}), '(**opts)\n', (1795, 1803), False, 'import numpy\n'), ((1261, 1278), 'numpy.argmax', 'numpy.argmax', (['err'], {}), '(err)\n', (1273, 1278), False, 'import numpy\n'), ((1628, 1663), 'numpy.array2string', 'numpy.array2string', (['x'], {'prefix': '"""x: """'}), "(x, prefix='x: ')\n", (1646, 1663), False, 'import numpy\n'), ((1700, 1735), 'numpy.array2string', 'numpy.array2string', (['y'], {'prefix': '"""y: """'}), "(y, prefix='y: ')\n", (1718, 1735), False, 'import numpy\n')] |
import numpy as np
def randomization(n):
"""
Arg:
n - an integer
Returns:
A - a randomly-generated nx1 Numpy array.
"""
A = np.random.random((n,1))
return A
raise NotImplementedError
def operations(h, w):
"""
Takes two inputs, h and w, and makes two Numpy arrays A and B of size
h x w, and returns A, B, and s, the sum of A and B.
Arg:
h - an integer describing the height of A and B
w - an integer describing the width of A and B
Returns (in this order):
A - a randomly-generated h x w Numpy array.
B - a randomly-generated h x w Numpy array.
s - the sum of A and B.
"""
#Your code here
A = np.random.random((h,w))
B = np.random.random((h,w))
s = np.add(A,B)
return A,B,s
raise NotImplementedError
def norm(A, B):
"""
Takes two Numpy column arrays, A and B, and returns the L2 norm of their
sum.
Arg:
A - a Numpy array
B - a Numpy array
Returns:
s - the L2 norm of A+B.
"""
#Your code here
sum = np.add(A,B)
s = np.linalg.norm(sum)
return s
raise NotImplementedError
def neural_network(inputs, weights):
"""
Takes an input vector and runs it through a 1-layer neural network
with a given weight matrix and returns the output.
Arg:
inputs - 2 x 1 NumPy array
weights - 2 x 1 NumPy array
Returns (in this order):
out - a 1 x 1 NumPy array, representing the output of the neural network
"""
#Your code here
p = np.multiply(inputs, weights)
out1 = np.array(([np.sum(p)]))
out = np.array(([np.tanh(out1)]))
return out
raise NotImplementedError
def scalar_function(x, y):
"""
Returns the f(x,y) defined in the problem statement.
"""
#Your code here
if x <= y:
out = x*y
else:
out = x/y
return out
raise NotImplementedError
def vector_function(x, y):
"""
Make sure vector_function can deal with vector input x,y
"""
#Your code here
v = np.vectorize(scalar_function)
return v(x,y)
raise NotImplementedError
| [
"numpy.multiply",
"numpy.vectorize",
"numpy.sum",
"numpy.tanh",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.add"
] | [((157, 181), 'numpy.random.random', 'np.random.random', (['(n, 1)'], {}), '((n, 1))\n', (173, 181), True, 'import numpy as np\n'), ((698, 722), 'numpy.random.random', 'np.random.random', (['(h, w)'], {}), '((h, w))\n', (714, 722), True, 'import numpy as np\n'), ((730, 754), 'numpy.random.random', 'np.random.random', (['(h, w)'], {}), '((h, w))\n', (746, 754), True, 'import numpy as np\n'), ((762, 774), 'numpy.add', 'np.add', (['A', 'B'], {}), '(A, B)\n', (768, 774), True, 'import numpy as np\n'), ((1072, 1084), 'numpy.add', 'np.add', (['A', 'B'], {}), '(A, B)\n', (1078, 1084), True, 'import numpy as np\n'), ((1092, 1111), 'numpy.linalg.norm', 'np.linalg.norm', (['sum'], {}), '(sum)\n', (1106, 1111), True, 'import numpy as np\n'), ((1556, 1584), 'numpy.multiply', 'np.multiply', (['inputs', 'weights'], {}), '(inputs, weights)\n', (1567, 1584), True, 'import numpy as np\n'), ((2064, 2093), 'numpy.vectorize', 'np.vectorize', (['scalar_function'], {}), '(scalar_function)\n', (2076, 2093), True, 'import numpy as np\n'), ((1607, 1616), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (1613, 1616), True, 'import numpy as np\n'), ((1642, 1655), 'numpy.tanh', 'np.tanh', (['out1'], {}), '(out1)\n', (1649, 1655), True, 'import numpy as np\n')] |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Intermittent Resource sizing class
This Python class contains methods and attributes specific for technology analysis within StorageVet.
"""
import cvxpy as cvx
from dervet.MicrogridDER.DERExtension import DERExtension
from dervet.MicrogridDER.ContinuousSizing import ContinuousSizing
from storagevet.Technology import PVSystem
from storagevet.ErrorHandling import *
import numpy as np
import pandas as pd
class IntermittentResourceSizing(PVSystem.PV, DERExtension, ContinuousSizing):
""" An intermittent resource, with sizing optimization
"""
def __init__(self, params):
""" Initialize all technology with the following attributes.
Args:
params (dict): Dict of parameters for initialization
"""
TellUser.debug(f"Initializing {__name__}")
PVSystem.PV.__init__(self, params)
DERExtension.__init__(self, params)
ContinuousSizing.__init__(self, params)
self.nu = params['nu'] / 100
self.gamma = params['gamma'] / 100
self.curtail = params['curtail']
self.max_rated_capacity = params['max_rated_capacity']
self.min_rated_capacity = params['min_rated_capacity']
self.ppa = params['PPA']
self.ppa_cost = params['PPA_cost']
self.ppa_inflation = params['PPA_inflation_rate'] / 100
if not self.rated_capacity:
self.rated_capacity = cvx.Variable(name=f'{self.name}rating', integer=True)
self.inv_max = self.rated_capacity
self.size_constraints += [cvx.NonPos(-self.rated_capacity)]
if self.min_rated_capacity:
self.size_constraints += [cvx.NonPos(self.min_rated_capacity - self.rated_capacity)]
if self.max_rated_capacity:
self.size_constraints += [cvx.NonPos(self.rated_capacity - self.max_rated_capacity)]
def get_discharge(self, mask):
""" The effective discharge of this DER
Args:
mask (DataFrame): A boolean array that is true for indices corresponding to time_series data included
in the subs data set
Returns: the discharge as a function of time for the
"""
if self.being_sized():
return cvx.Parameter(shape=sum(mask), name=f'{self.name}/rated gen', value=self.gen_per_rated.loc[mask].values) * self.rated_capacity
else:
return super().get_discharge(mask)
def get_capex(self, solution=False):
capex = super().get_capex()
if solution:
try:
capex = capex.value
except AttributeError:
capex = capex
return capex
def constraints(self, mask, **kwargs):
""" Builds the master constraint list for the subset of timeseries data being optimized.
Returns:
A list of constraints that corresponds the battery's physical constraints and its service constraints
"""
constraints = super().constraints(mask, **kwargs)
constraints += self.size_constraints
return constraints
def objective_function(self, mask, annuity_scalar=1):
""" Generates the objective function related to a technology. Default includes O&M which can be 0
Args:
mask (Series): Series of booleans used, the same length as case.power_kw
annuity_scalar (float): a scalar value to be multiplied by any yearly cost or benefit that helps capture the cost/benefit over
the entire project lifetime (only to be set iff sizing, else alpha should not affect the aobject function)
Returns:
self.costs (Dict): Dict of objective costs
"""
costs = super().objective_function(mask, annuity_scalar)
costs.update(self.sizing_objective())
return costs
def timeseries_report(self):
""" Summaries the optimization results for this DER.
Returns: A timeseries dataframe with user-friendly column headers that summarize the results
pertaining to this instance
"""
results = super().timeseries_report()
if self.being_sized() and not self.curtail:
# convert expressions into values
tech_id = self.unique_tech_id()
results[tech_id + ' Electric Generation (kW)'] = self.maximum_generation()
results[tech_id + ' Maximum (kW)'] = self.maximum_generation()
return results
def maximum_generation(self, label_selection=None, sizing=False):
""" The most that the PV system could discharge.
Args:
label_selection: A single label, e.g. 5 or 'a',
a list or array of labels, e.g. ['a', 'b', 'c'],
a boolean array of the same length as the axis being sliced, e.g. [True, False, True]
a callable function with one argument (the calling Series or DataFrame)
Returns: valid array output for indexing (one of the above) of the max generation profile
"""
PV_gen = super().maximum_generation(label_selection)
if sizing:
try:
PV_gen = PV_gen.value
except AttributeError:
pass
return PV_gen
def set_size(self):
""" Save value of size variables of DERs
"""
self.rated_capacity = self.get_rated_capacity(solution=True)
self.inv_max = self.inv_rated_capacity(sizing=True)
def inv_rated_capacity(self, sizing=False):
"""
Returns: the maximum energy times two for PV inverter rating
"""
if not sizing:
return self.rated_capacity
else:
try:
max_rated = self.rated_capacity.value
except AttributeError:
max_rated = self.rated_capacity
return max_rated
def get_rated_capacity(self, solution=False):
"""
Returns: the maximum energy that can be attained
"""
if not solution:
return self.rated_capacity
else:
try:
max_rated = self.rated_capacity.value
except AttributeError:
max_rated = self.rated_capacity
return max_rated
def sizing_summary(self):
"""
Returns: A dictionary describe this DER's size and captial costs.
"""
try:
rated_capacity = self.rated_capacity.value
except AttributeError:
rated_capacity = self.rated_capacity
sizing_results = {
'DER': self.name,
'Power Capacity (kW)': rated_capacity,
'Capital Cost ($/kW)': self.capital_cost_function}
# warn about tight sizing margins
if isinstance(self.rated_capacity, cvx.Variable):
sizing_margin1 = (abs(self.rated_capacity.value - self.max_rated_capacity) - 0.05 * self.max_rated_capacity)
sizing_margin2 = (abs(self.rated_capacity.value - self.min_rated_capacity) - 0.05 * self.min_rated_capacity)
if (sizing_margin1 < 0).any() or (sizing_margin2 < 0).any():
TellUser.warning(f"Difference between the optimal {self.name} rated capacity and user upper/lower "
"bound constraints is less than 5% of the value of user upper/lower bound constraints")
return sizing_results
def update_for_evaluation(self, input_dict):
""" Updates price related attributes with those specified in the input_dictionary
Args:
input_dict: hold input data, keys are the same as when initialized
"""
super().update_for_evaluation(input_dict)
cost_per_kw = input_dict.get('ccost_kW')
if cost_per_kw is not None:
self.capital_cost_function = cost_per_kw
def sizing_error(self):
"""
Returns: True if there is an input error
"""
if self.min_rated_capacity > self.max_rated_capacity:
TellUser.error(f'{self.unique_tech_id()} requires min_rated_capacity < max_rated_capacity.')
return True
def max_power_defined(self):
return self.is_power_sizing() and not self.max_rated_capacity
def replacement_cost(self):
"""
Returns: the capex of this DER for optimization
"""
try:
rated_capacity = self.rated_capacity.value
except AttributeError:
rated_capacity = self.rated_capacity
return np.dot(self.replacement_cost_function, [rated_capacity])
def proforma_report(self, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame):
Returns: A DateFrame of with each year in opt_year as the index and
the corresponding value this stream provided.
"""
if self.ppa:
analysis_years = self.variables_df.index.year.unique()
pro_forma = pd.DataFrame()
ppa_label = f"{self.unique_tech_id()} PPA"
# for each year of analysis
for year in analysis_years:
subset_max_production = self.maximum_generation(results.index.year == year)
# sum up total annual solar production (kWh)
total_annual_production = subset_max_production.sum() * self.dt
# multiply with Solar PPA Cost ($/kWh), and set at YEAR's value
pro_forma.loc[pd.Period(year, freq='y'), ppa_label] = total_annual_production * -self.ppa_cost
# fill forward
pro_forma = fill_forward_func(pro_forma, self.ppa_inflation)
# apply PPA inflation rate
pro_forma = apply_inflation_rate_func(pro_forma, self.ppa_inflation, min(analysis_years))
else:
pro_forma = super().proforma_report(apply_inflation_rate_func, fill_forward_func,
results)
return pro_forma
def tax_contribution(self, depreciation_schedules, year_idx, start_year):
if not self.ppa:
return super().tax_contribution(depreciation_schedules, year_idx, start_year)
def replacement_report(self, end_year, escalation_func):
if not self.ppa:
return super().replacement_report(end_year, escalation_func)
else:
return pd.Series()
def decommissioning_report(self, end_year):
if not self.ppa:
return super().decommissioning_report(end_year)
else:
return pd.Series()
def salvage_value_report(self, end_year):
if not self.ppa:
return super().salvage_value_report(end_year)
else:
return pd.Series()
| [
"pandas.DataFrame",
"dervet.MicrogridDER.ContinuousSizing.ContinuousSizing.__init__",
"cvxpy.NonPos",
"storagevet.Technology.PVSystem.PV.__init__",
"pandas.Period",
"pandas.Series",
"cvxpy.Variable",
"dervet.MicrogridDER.DERExtension.DERExtension.__init__",
"numpy.dot"
] | [((2372, 2406), 'storagevet.Technology.PVSystem.PV.__init__', 'PVSystem.PV.__init__', (['self', 'params'], {}), '(self, params)\n', (2392, 2406), False, 'from storagevet.Technology import PVSystem\n'), ((2415, 2450), 'dervet.MicrogridDER.DERExtension.DERExtension.__init__', 'DERExtension.__init__', (['self', 'params'], {}), '(self, params)\n', (2436, 2450), False, 'from dervet.MicrogridDER.DERExtension import DERExtension\n'), ((2459, 2498), 'dervet.MicrogridDER.ContinuousSizing.ContinuousSizing.__init__', 'ContinuousSizing.__init__', (['self', 'params'], {}), '(self, params)\n', (2484, 2498), False, 'from dervet.MicrogridDER.ContinuousSizing import ContinuousSizing\n'), ((10044, 10100), 'numpy.dot', 'np.dot', (['self.replacement_cost_function', '[rated_capacity]'], {}), '(self.replacement_cost_function, [rated_capacity])\n', (10050, 10100), True, 'import numpy as np\n'), ((2958, 3011), 'cvxpy.Variable', 'cvx.Variable', ([], {'name': 'f"""{self.name}rating"""', 'integer': '(True)'}), "(name=f'{self.name}rating', integer=True)\n", (2970, 3011), True, 'import cvxpy as cvx\n'), ((10660, 10674), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10672, 10674), True, 'import pandas as pd\n'), ((12053, 12064), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (12062, 12064), True, 'import pandas as pd\n'), ((12232, 12243), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (12241, 12243), True, 'import pandas as pd\n'), ((12407, 12418), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (12416, 12418), True, 'import pandas as pd\n'), ((3097, 3129), 'cvxpy.NonPos', 'cvx.NonPos', (['(-self.rated_capacity)'], {}), '(-self.rated_capacity)\n', (3107, 3129), True, 'import cvxpy as cvx\n'), ((3213, 3270), 'cvxpy.NonPos', 'cvx.NonPos', (['(self.min_rated_capacity - self.rated_capacity)'], {}), '(self.min_rated_capacity - self.rated_capacity)\n', (3223, 3270), True, 'import cvxpy as cvx\n'), ((3354, 3411), 'cvxpy.NonPos', 'cvx.NonPos', (['(self.rated_capacity - self.max_rated_capacity)'], {}), '(self.rated_capacity - self.max_rated_capacity)\n', (3364, 3411), True, 'import cvxpy as cvx\n'), ((11154, 11179), 'pandas.Period', 'pd.Period', (['year'], {'freq': '"""y"""'}), "(year, freq='y')\n", (11163, 11179), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import posthoc_learn.banalg as banalg
from posthoc_learn.config import posthoc_config as config
from posthoc_learn.conban_dataset import ConBanDataset
#=================
# Plot Cumulative Regret
#=================
def plot(title, regret, labels):
"""
@param title: graph title
@param regret: T+1 x len(bandits) cumulative regret
@param labels: label[i] for bandits[i]
Plots regret curve.
"""
plt.title(title)
t = np.arange(regret.shape[0])
for i, l in enumerate(labels):
plt.plot(t, regret[:, i], label=l)
T = (regret.shape[0] - 1)
plt.xlim(0, T)
plt.xlabel("Attempts")
plt.ylabel("Cumulative Regret")
plt.legend()
plt.show()
#=================
# General Bandit Alg
#=================
def run(bandits, contexts, posthocs, loss, noise=0):
"""
@param bandits: list of initialized bandit algorithms
@param contexts: T x dF
@param posthocs: T x dG
@param loss: len(K)
"""
# Define constants
T = contexts.shape[0]
regrets = np.zeros((T + 1, len(bandits)))
print("Running for {0} rounds!".format(T))
for t in range(T):
if t % 100 == 0:
print("Round: %d" % t)
# Choose arm for each bandit
I_t = []
for bandit in bandits:
ret, _ = bandit.choice(t, contexts[t, :])
I_t.append(ret)
# Update bandits
for i, bandit in enumerate(bandits):
regrets[t+1, i] = loss[t, I_t[i]] - np.amin(loss[t, :])
bandit.update(contexts[t, :], I_t[i], loss[t, I_t[i]] + np.random.normal(0, noise), posthocs[t, :])
# Finished, return error array
print("Finished T=%d rounds!" % T)
cum_regrets = np.cumsum(regrets, axis=0)
return cum_regrets
def gen_posthoc(dG, rewards):
# Generate a random invertible matrix reward[i, :] = A * posthocs[i, :]
# (n x T) = (n x dg) * (dg x T)
T, n = rewards.shape
# Generate random matrix
A = np.random.rand(n, dG)
# Make sure it inverts
Ainv = np.linalg.inv(A.T @ A) @ A.T
assert Ainv.shape == (dG, n)
posthocs = (Ainv @ rewards.T).T
assert posthocs.shape == (T, dG)
return posthocs, Ainv
VAL_SIZE = 10000
def gen_context(mndata, dF, trueTest=False):
print("Loading Training Set...")
images, labels = mndata.load_training()
if trueTest:
print("Loading Test Set...")
images_test, labels_test = mndata.load_testing()
else:
print("Loading Validation Set...")
images_test = images[len(images) - VAL_SIZE:len(images)]
images = images[0:len(images) - VAL_SIZE]
labels_test = labels[len(labels) - VAL_SIZE:len(labels)]
labels = labels[0:len(labels) - VAL_SIZE]
# Format labels
labels = np.array(labels)
labels_test = np.array(labels_test)
Ttrain = len(labels)
Ttest = len(labels_test)
print("T_train=%d" % Ttrain)
print("T_val=%d" % Ttest)
n = labels.max() + 1
# Create 1-hot rewards
rewards = np.zeros((Ttrain, n))
rewards[np.arange(labels.size),labels] = 1
rewards_test = np.zeros((Ttest, n))
rewards_test[np.arange(labels_test.size),labels_test] = 1
# PCA Contexts
images = np.array(images)
images_test = np.array(images_test)
print("Performing PCA...")
pca = PCA(n_components=dF)
contexts = pca.fit_transform(images)
contexts_test = pca.transform(images_test)
assert contexts.shape == (Ttrain, dF)
assert contexts_test.shape == (Ttest, dF)
return contexts, rewards, contexts_test, rewards_test
def main(dF, dG):
cacheFile = "cache_"+str(dF)+".npz"
if Path(cacheFile).exists():
print("Loading from Cache...")
with np.load(cacheFile) as data:
contexts = data["contexts"]
rewards = data["rewards"]
contexts_test = data["contexts_test"]
rewards_test = data["rewards_test"]
else:
# Import MNIST
print("Loading MNIST...")
mndata = MNIST('./mnist')
mndata.gz = True
# Load Contexts / Rewards
contexts, rewards, contexts_test, rewards_test = gen_context(mndata, dF)
print("Saving Cache...")
np.savez_compressed(cacheFile, contexts=contexts, rewards=rewards, contexts_test=contexts_test, rewards_test=rewards_test)
# Convert rewards to losses
losses = 1 - rewards
losses_test = 1 - rewards_test
T, K = losses.shape
# Best Linear Fit Possible
"""
print("Best Linear Fit...")
print("Generating Matrices")
A = np.eye(dF)
b = np.zeros((dF, K))
for t in range(contexts.shape[0]):
A += np.outer(contexts[t, :], contexts[t, :])
b += np.outer(contexts[t, :], losses[t, :])
assert A.shape == (dF, dF)
assert b.shape == (dF, K)
print("Doing Fit")
phi = np.linalg.solve(A, b)
assert phi.shape == (dF, K)
print("Testing...")
err_count = 1.0
for i in range(contexts_test.shape[0]):
ans = contexts_test[i, :] @ phi
assert ans.size == K
if np.argmin(ans) != np.argmin(losses_test[i, :]):
err_count += 1.0
print("Error Rate: " + str(err_count / float(contexts_test.shape[0])))
"""
# Generate Post-Hoc Contexts
posthocs, _ = gen_posthoc(dG, losses_test)
# Constants
fLambda = 1E7
gLambda = 1E-7
# Define bandits
bandits = []
# Vanilla Greedy
bandits.append(banalg.LinUCB(K, dF, dG, fLambda, 0, 0.01))
bandits.append(banalg.LinUCB(K, dF, dG, fLambda, gLambda, 0.01))
# Vanilla TS
#bandits.append(banalg.Thompson(K, dF, dG, fLambda, 0, var))
# Post Hoc Greedy
#bandits.append(banalg.EpsilonGreedy(K, dF, dG, fLambda, gLambda, 0.1))
# Post Hoc Only
#bandits.append(banalg.Thompson(K, dF, dG, 0, gLambda, 0.01))
# Run experiment
print("Running Experiment...")
# Sample with replacement for bootstrapping
indices = np.random.randint(contexts_test.shape[0], size=contexts_test.shape[0])
regrets = run(bandits, contexts_test[indices, :], posthocs[indices, :], losses_test[indices, :])
# Plot Cumulative Regret
labels = []
for bandit in bandits:
labels.append(bandit.label)
title = "Augmented Contextual Bandit Regret"
#plot(title, regrets, labels)
# Save Regret Data
np.savez("regrets_{0}_{1}_{2}_{3}.npz".format(T, K, dF, dG),
regrets = regrets)
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: main.py <dF> <dG>")
sys.exit(-1)
main(int(sys.argv[1]), int(sys.argv[2]))
| [
"matplotlib.pyplot.title",
"numpy.load",
"numpy.amin",
"numpy.savez_compressed",
"numpy.random.randint",
"numpy.arange",
"pathlib.Path",
"numpy.random.normal",
"posthoc_learn.banalg.LinUCB",
"numpy.cumsum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.linalg.inv",
"matplotl... | [((640, 656), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (649, 656), True, 'import matplotlib.pyplot as plt\n'), ((665, 691), 'numpy.arange', 'np.arange', (['regret.shape[0]'], {}), '(regret.shape[0])\n', (674, 691), True, 'import numpy as np\n'), ((805, 819), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'T'], {}), '(0, T)\n', (813, 819), True, 'import matplotlib.pyplot as plt\n'), ((824, 846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Attempts"""'], {}), "('Attempts')\n", (834, 846), True, 'import matplotlib.pyplot as plt\n'), ((851, 882), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Regret"""'], {}), "('Cumulative Regret')\n", (861, 882), True, 'import matplotlib.pyplot as plt\n'), ((887, 899), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (897, 899), True, 'import matplotlib.pyplot as plt\n'), ((904, 914), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (912, 914), True, 'import matplotlib.pyplot as plt\n'), ((1922, 1948), 'numpy.cumsum', 'np.cumsum', (['regrets'], {'axis': '(0)'}), '(regrets, axis=0)\n', (1931, 1948), True, 'import numpy as np\n'), ((2179, 2200), 'numpy.random.rand', 'np.random.rand', (['n', 'dG'], {}), '(n, dG)\n', (2193, 2200), True, 'import numpy as np\n'), ((2975, 2991), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2983, 2991), True, 'import numpy as np\n'), ((3010, 3031), 'numpy.array', 'np.array', (['labels_test'], {}), '(labels_test)\n', (3018, 3031), True, 'import numpy as np\n'), ((3216, 3237), 'numpy.zeros', 'np.zeros', (['(Ttrain, n)'], {}), '((Ttrain, n))\n', (3224, 3237), True, 'import numpy as np\n'), ((3304, 3324), 'numpy.zeros', 'np.zeros', (['(Ttest, n)'], {}), '((Ttest, n))\n', (3312, 3324), True, 'import numpy as np\n'), ((3420, 3436), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (3428, 3436), True, 'import numpy as np\n'), ((3455, 3476), 'numpy.array', 'np.array', (['images_test'], {}), '(images_test)\n', (3463, 3476), True, 'import numpy as np\n'), ((6139, 6209), 'numpy.random.randint', 'np.random.randint', (['contexts_test.shape[0]'], {'size': 'contexts_test.shape[0]'}), '(contexts_test.shape[0], size=contexts_test.shape[0])\n', (6156, 6209), True, 'import numpy as np\n'), ((735, 769), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'regret[:, i]'], {'label': 'l'}), '(t, regret[:, i], label=l)\n', (743, 769), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2262), 'numpy.linalg.inv', 'np.linalg.inv', (['(A.T @ A)'], {}), '(A.T @ A)\n', (2253, 2262), True, 'import numpy as np\n'), ((4408, 4534), 'numpy.savez_compressed', 'np.savez_compressed', (['cacheFile'], {'contexts': 'contexts', 'rewards': 'rewards', 'contexts_test': 'contexts_test', 'rewards_test': 'rewards_test'}), '(cacheFile, contexts=contexts, rewards=rewards,\n contexts_test=contexts_test, rewards_test=rewards_test)\n', (4427, 4534), True, 'import numpy as np\n'), ((5637, 5679), 'posthoc_learn.banalg.LinUCB', 'banalg.LinUCB', (['K', 'dF', 'dG', 'fLambda', '(0)', '(0.01)'], {}), '(K, dF, dG, fLambda, 0, 0.01)\n', (5650, 5679), True, 'import posthoc_learn.banalg as banalg\n'), ((5701, 5749), 'posthoc_learn.banalg.LinUCB', 'banalg.LinUCB', (['K', 'dF', 'dG', 'fLambda', 'gLambda', '(0.01)'], {}), '(K, dF, dG, fLambda, gLambda, 0.01)\n', (5714, 5749), True, 'import posthoc_learn.banalg as banalg\n'), ((6727, 6739), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (6735, 6739), False, 'import sys\n'), ((3250, 3272), 'numpy.arange', 'np.arange', (['labels.size'], {}), '(labels.size)\n', (3259, 3272), True, 'import numpy as np\n'), ((3342, 3369), 'numpy.arange', 'np.arange', (['labels_test.size'], {}), '(labels_test.size)\n', (3351, 3369), True, 'import numpy as np\n'), ((3843, 3858), 'pathlib.Path', 'Path', (['cacheFile'], {}), '(cacheFile)\n', (3847, 3858), False, 'from pathlib import Path\n'), ((3921, 3939), 'numpy.load', 'np.load', (['cacheFile'], {}), '(cacheFile)\n', (3928, 3939), True, 'import numpy as np\n'), ((1697, 1716), 'numpy.amin', 'np.amin', (['loss[t, :]'], {}), '(loss[t, :])\n', (1704, 1716), True, 'import numpy as np\n'), ((1785, 1811), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise'], {}), '(0, noise)\n', (1801, 1811), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# --------------------------------------------
# Author: <NAME>
# Date: 2021/6/13 17:46
# Description:
# --------------------------------------------
import mkl
import faiss
import pickle
import numpy as np
from typing import List, Dict
from .constant import INDEX_PATH, FEATURES_PATH
mkl.get_max_threads()
class SearchIndex(object):
def __init__(self, index_path:str=INDEX_PATH, feature_path:str=FEATURES_PATH):
assert index_path, "index path is not exists"
assert feature_path, "feature path is not exists"
self._index = faiss.read_index(index_path)
self._features = self.load_features(feature_path)
self._id2label = {v.id:k for k, v in self._features.items()}
def load_features(self, path:str):
with open(path, 'rb') as reader:
features = pickle.load(reader)
return features
def search_by_labels(self, labels:List[str], k:int=5) -> List[Dict]:
vectors = [self._features[label].vector for label in labels if label in self._features]
ids = [self._features[label].id for label in labels if label in self._features]
results = self._search(ids, labels, vectors, k+1)
return results
def search_by_vectors(self, vectors:List[float], k:int=5) -> List[Dict]:
ids = [-1] * len(vectors)
labels = ['null'] * len(vectors)
results = self._search(ids, labels, vectors, k)
return results
def _search(self, ids:List[int], labels:List[str], vectors:List[float], k:int) -> List[Dict]:
def pack_neighbor(id, score):
return {'id':int(id), 'label': str(self._id2label[id]), 'score': float(score)}
def pack_result(id, label, vector, neighbors):
return {'id':id, 'label': label, 'vector': vector.tolist(), 'neighbors': neighbors}
results = []
vectors = [np.array(vec, dtype=np.float32) for vec in vectors]
vectors = np.atleast_2d(vectors)
scores, neighbors = self._index.search(vectors, k) if vectors.size > 0 else ([], [])
for id, label, vector, score, neighbor in zip(ids, labels, vectors, scores, neighbors):
neighbor_score = zip(neighbor, score)
neighbor_score = [(n_id, n_score) for n_id, n_score in neighbor_score if n_id != id and n_id != -1]
neighbor_score = [pack_neighbor(n_id, n_score) for n_id, n_score in neighbor_score]
results.append(pack_result(id, label, vector, neighbor_score))
return results
| [
"faiss.read_index",
"pickle.load",
"numpy.array",
"mkl.get_max_threads",
"numpy.atleast_2d"
] | [((318, 339), 'mkl.get_max_threads', 'mkl.get_max_threads', ([], {}), '()\n', (337, 339), False, 'import mkl\n'), ((587, 615), 'faiss.read_index', 'faiss.read_index', (['index_path'], {}), '(index_path)\n', (603, 615), False, 'import faiss\n'), ((1952, 1974), 'numpy.atleast_2d', 'np.atleast_2d', (['vectors'], {}), '(vectors)\n', (1965, 1974), True, 'import numpy as np\n'), ((847, 866), 'pickle.load', 'pickle.load', (['reader'], {}), '(reader)\n', (858, 866), False, 'import pickle\n'), ((1882, 1913), 'numpy.array', 'np.array', (['vec'], {'dtype': 'np.float32'}), '(vec, dtype=np.float32)\n', (1890, 1913), True, 'import numpy as np\n')] |
import numpy as np
from scipy.interpolate import griddata
# functions to massage the raw data including data from bad channels.
def fill_bad_channel(pdata, rpos, zpos, good_channels, cutoff):
# cutoff = 0.003 [m]
# ## fake data
# dist = np.sqrt((rpos - 1.8)**2 + (zpos - 0)**2)
# pdata = 0.1*(1 - (dist/0.5)**2)
# pdata = pdata * good_channels
# remove NaN
pdata[np.isnan(pdata)] = 0
# recovery
for c in range(pdata.size):
if good_channels[c] == 0:
dist = np.sqrt((rpos - rpos[c])**2 + (zpos - zpos[c])**2)
dfct = np.exp(-2*(dist/cutoff)**4) * good_channels
pdata[c] = np.sum(pdata * dfct)/np.sum(dfct)
return pdata
def interp_pdata(pdata, rpos, zpos, istep, imethod):
# interpolation
# istep = 0.002
# imethod = 'cubic'
ri = np.arange(np.min(rpos), np.max(rpos), istep)
zi = np.arange(np.min(zpos), np.max(zpos), istep)
ri, zi = np.meshgrid(ri, zi)
pi = griddata((rpos,zpos),pdata,(ri,zi),method=imethod)
return ri, zi, pi | [
"numpy.meshgrid",
"numpy.sum",
"scipy.interpolate.griddata",
"numpy.isnan",
"numpy.min",
"numpy.max",
"numpy.exp",
"numpy.sqrt"
] | [((949, 968), 'numpy.meshgrid', 'np.meshgrid', (['ri', 'zi'], {}), '(ri, zi)\n', (960, 968), True, 'import numpy as np\n'), ((978, 1033), 'scipy.interpolate.griddata', 'griddata', (['(rpos, zpos)', 'pdata', '(ri, zi)'], {'method': 'imethod'}), '((rpos, zpos), pdata, (ri, zi), method=imethod)\n', (986, 1033), False, 'from scipy.interpolate import griddata\n'), ((394, 409), 'numpy.isnan', 'np.isnan', (['pdata'], {}), '(pdata)\n', (402, 409), True, 'import numpy as np\n'), ((847, 859), 'numpy.min', 'np.min', (['rpos'], {}), '(rpos)\n', (853, 859), True, 'import numpy as np\n'), ((861, 873), 'numpy.max', 'np.max', (['rpos'], {}), '(rpos)\n', (867, 873), True, 'import numpy as np\n'), ((901, 913), 'numpy.min', 'np.min', (['zpos'], {}), '(zpos)\n', (907, 913), True, 'import numpy as np\n'), ((915, 927), 'numpy.max', 'np.max', (['zpos'], {}), '(zpos)\n', (921, 927), True, 'import numpy as np\n'), ((520, 574), 'numpy.sqrt', 'np.sqrt', (['((rpos - rpos[c]) ** 2 + (zpos - zpos[c]) ** 2)'], {}), '((rpos - rpos[c]) ** 2 + (zpos - zpos[c]) ** 2)\n', (527, 574), True, 'import numpy as np\n'), ((590, 623), 'numpy.exp', 'np.exp', (['(-2 * (dist / cutoff) ** 4)'], {}), '(-2 * (dist / cutoff) ** 4)\n', (596, 623), True, 'import numpy as np\n'), ((657, 677), 'numpy.sum', 'np.sum', (['(pdata * dfct)'], {}), '(pdata * dfct)\n', (663, 677), True, 'import numpy as np\n'), ((678, 690), 'numpy.sum', 'np.sum', (['dfct'], {}), '(dfct)\n', (684, 690), True, 'import numpy as np\n')] |
# local imports
import time
import yaml
import json
import numpy as np
import os
def yaml_import(file_directory):
with open(file_directory) as f:
return yaml.full_load(f)
def write_json(json_data, file_name):
with open('../static/{}.json'.format(file_name), 'w') as f:
json.dump(json_data, f)
def load_json(file_name):
with open('../static/{}.json'.format(file_name)) as json_file:
return json.load(json_file)
def runtime_print(start_time):
end_time = time.time()
print('total run time: {} sec'.format(round(end_time - start_time, 2)))
def to_tuple(item):
item = item.replace(',', '')
item = item.replace('(', '')
item = item.replace(')', '')
item = item.split(' ')
# create tuple from the two separate strings
return float(item[0]), float(item[1])
def import_numpy(is_test=False):
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if not is_test:
with open('feature_extraction/extraction_outputs/data_array.npy', 'rb') as f:
data = np.load(f)
return data
else:
with open('feature_extraction/extraction_outputs/test_array.npy', 'rb') as g:
data = np.load(g)
return data
def remove_spacebar_from_file(file, folder):
dir_path = '{}/feature_extraction/essentia_extraction/{}'.format(os.path.dirname(os.path.realpath(__file__)), folder)
os.chdir(dir_path)
file_nospace = file.replace(" ", "")
os.rename(file, file_nospace)
def export_history(history):
lowest_losses = []
lowest_loss = 0
highest_accs = []
highest_acc = 0
for _, loss in enumerate(history['loss']):
if _:
if loss < lowest_loss:
lowest_loss = loss
lowest_losses.append('epoch: {}, lowest loss: {}'.format(_, round(lowest_loss, 3)))
else:
lowest_loss = loss
lowest_losses.append('epoch: {}, lowest loss: {}'.format(_, round(lowest_loss, 3)))
for _, acc in enumerate(history['accuracy']):
if _:
if acc > highest_acc:
highest_acc = acc
highest_accs.append('epoch: {}, highest accuracy: {}'.format(_, round(highest_acc, 7)))
else:
highest_acc = acc
highest_accs.append('epoch: {}, highest accuracy: {}'.format(_, round(highest_acc, 7)))
f = open("static/training_results.txt", "w+")
for i, acc in enumerate(highest_accs):
if not i:
f.write('Highest accuracy history: \n')
else:
f.write('{}\n'.format(acc))
for i, loss in enumerate(lowest_losses):
if not i:
f.write('\nHighest loss history: \n')
else:
f.write('{}\n'.format(loss))
f.close()
| [
"json.dump",
"numpy.load",
"json.load",
"os.rename",
"yaml.full_load",
"os.path.realpath",
"time.time",
"os.chdir"
] | [((524, 535), 'time.time', 'time.time', ([], {}), '()\n', (533, 535), False, 'import time\n'), ((1460, 1478), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (1468, 1478), False, 'import os\n'), ((1526, 1555), 'os.rename', 'os.rename', (['file', 'file_nospace'], {}), '(file, file_nospace)\n', (1535, 1555), False, 'import os\n'), ((177, 194), 'yaml.full_load', 'yaml.full_load', (['f'], {}), '(f)\n', (191, 194), False, 'import yaml\n'), ((312, 335), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (321, 335), False, 'import json\n'), ((451, 471), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (460, 471), False, 'import json\n'), ((933, 959), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (949, 959), False, 'import os\n'), ((1092, 1102), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1099, 1102), True, 'import numpy as np\n'), ((1246, 1256), 'numpy.load', 'np.load', (['g'], {}), '(g)\n', (1253, 1256), True, 'import numpy as np\n'), ((1418, 1444), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1434, 1444), False, 'import os\n')] |
"""
This module contains the different solvers that are available for Pastas.
All solvers inherit from the BaseSolver class, which contains general method
for selecting the correct time series to misfit and options to weight the
residuals or noise series.
To solve a model the following syntax can be used:
>>> ml.solve(solver=ps.LeastSquares)
"""
from logging import getLogger
import numpy as np
from pandas import DataFrame
from scipy.linalg import svd
from scipy.optimize import least_squares
logger = getLogger(__name__)
class BaseSolver:
_name = "BaseSolver"
__doc__ = """All solver instances inherit from the BaseSolver class.
Attributes
----------
model: pastas.Model instance
pcor: pandas.DataFrame
Pandas DataFrame with the correlation between the optimized parameters.
pcov: pandas.DataFrame
Pandas DataFrame with the correlation between the optimized parameters.
nfev: int
Number of times the model is called during optimization.
result: object
The object returned by the minimization method that is used. It depends
on the solver what is actually returned.
"""
def __init__(self, ml, pcov=None, nfev=None, obj_func=None, **kwargs):
self.ml = ml
self.pcov = pcov # Covariances of the parameters
if pcov is None:
self.pcor = None # Correlation between parameters
else:
self.pcor = self._get_correlations(pcov)
self.nfev = nfev # number of function evaluations
self.obj_func = obj_func
self.result = None # Object returned by the optimization method
def misfit(self, p, noise, weights=None, callback=None,
returnseparate=False):
"""This method is called by all solvers to obtain a series that are
minimized in the optimization process. It handles the application of
the weights, a noisemodel and other optimization options.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
noise: Boolean
weights: pandas.Series, optional
pandas Series by which the residual or noise series are
multiplied. Typically values between 0 and 1.
callback: ufunc, optional
function that is called after each iteration. the parameters are
provided to the func. E.g. "callback(parameters)"
returnseparate: bool, optional
return residuals, noise, noiseweights
Returns
-------
rv:
residuals series (if noise=False) or noise series (if noise=True)
"""
# Get the residuals or the noise
if noise:
rv = self.ml.noise(p) * \
self.ml.noise_weights(p)
else:
rv = self.ml.residuals(p)
# Determine if weights need to be applied
if weights is not None:
weights = weights.reindex(rv.index)
weights.fillna(1.0, inplace=True)
rv = rv.multiply(weights)
if callback:
callback(p)
if returnseparate:
return self.ml.residuals(p).values, \
self.ml.noise(p).values, \
self.ml.noise_weights(p).values
return rv.values
def prediction_interval(self, n=1000, alpha=0.05, **kwargs):
"""Method to calculate the prediction interval for the simulation.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
prediction interval (for alpha=0.05)
Notes
-----
Add residuals assuming a Normal distribution with standard deviation
equal to the standard deviation of the residuals.
"""
sigr = self.ml.residuals().std()
data = self._get_realizations(func=self.ml.simulate, n=n, name=None,
**kwargs)
data = data + sigr * np.random.randn(data.shape[0], data.shape[1])
q = [alpha / 2, 1 - alpha / 2]
rv = data.quantile(q, axis=1).transpose()
return rv
def ci_simulation(self, n=1000, alpha=0.05, **kwargs):
"""Method to calculate the confidence interval for the simulation.
Returns
-------
Notes
-----
The confidence interval shows the uncertainty in the simulation due
to parameter uncertainty. In other words, there is a 95% probability
that the true best-fit line for the observed data lies within the
95% confidence interval.
"""
return self._get_confidence_interval(func=self.ml.simulate, n=n,
alpha=alpha, **kwargs)
def ci_block_response(self, name, n=1000, alpha=0.05, **kwargs):
dt = self.ml.get_block_response(name=name).index.values
return self._get_confidence_interval(func=self.ml.get_block_response,
n=n, alpha=alpha, name=name,
dt=dt, **kwargs)
def ci_step_response(self, name, n=1000, alpha=0.05, **kwargs):
dt = self.ml.get_block_response(name=name).index.values
return self._get_confidence_interval(func=self.ml.get_step_response,
n=n, alpha=alpha, name=name,
dt=dt, **kwargs)
def ci_contribution(self, name, n=1000, alpha=0.05, **kwargs):
return self._get_confidence_interval(func=self.ml.get_contribution,
n=n, alpha=alpha, name=name,
**kwargs)
def _get_realizations(self, func, n=None, name=None, **kwargs):
"""Internal method to obtain n number of parameter realizations."""
if name:
kwargs["name"] = name
parameter_sample = self._get_parameter_sample(n=n, name=name)
data = {}
for i, p in enumerate(parameter_sample):
data[i] = func(p=p, **kwargs)
return DataFrame.from_dict(data, orient="columns")
def _get_confidence_interval(self, func, n=None, name=None, alpha=0.05,
**kwargs):
"""Internal method to obtain a confidence interval."""
q = [alpha / 2, 1 - alpha / 2]
data = self._get_realizations(func=func, n=n, name=name, **kwargs)
return data.quantile(q=q, axis=1).transpose()
def _get_parameter_sample(self, name=None, n=None):
"""Internal method to obtain a parameter sets.
Parameters
----------
n: int, optional
Number of random samples drawn from the bivariate normal
distribution.
name: str, optional
Name of the stressmodel or model component to obtain the
parameters for.
Returns
-------
numpy.ndarray
Numpy array with N parameter samples.
"""
p = self.ml.get_parameters(name=name)
pcov = self._get_covariance_matrix(name=name)
if name is None:
parameters = self.ml.parameters
else:
parameters = self.ml.parameters.loc[
self.ml.parameters.name == name]
pmin = parameters.pmin.fillna(-np.inf).values
pmax = parameters.pmax.fillna(np.inf).values
if n is None:
# only use parameters that are varied.
n = int(10 ** parameters.vary.sum())
samples = np.zeros((0, p.size))
# Start truncated multivariate sampling
it = 0
while samples.shape[0] < n:
s = np.random.multivariate_normal(p, pcov, size=(n,),
check_valid="ignore")
accept = s[(np.min(s - pmin, axis=1) >= 0) &
(np.max(s - pmax, axis=1) <= 0)]
samples = np.concatenate((samples, accept), axis=0)
# Make sure there's no endless while loop
if it > 10:
break
else:
it += 1
return samples[:n, :]
def _get_covariance_matrix(self, name=None):
"""Internal method to obtain the covariance matrix from the model.
Parameters
----------
name: str, optional
Name of the stressmodel or model component to obtain the
parameters for.
Returns
-------
pcov: pandas.DataFrame
Pandas DataFrame with the covariances for the parameters.
"""
if name:
index = self.ml.parameters.loc[self.ml.parameters.loc[:,
"name"] == name].index
else:
index = self.ml.parameters.index
pcov = self.pcov.reindex(index=index, columns=index).fillna(0)
return pcov
@staticmethod
def _get_correlations(pcov):
"""Internal method to obtain the parameter correlations from the
covariance matrix.
Parameters
----------
pcov: pandas.DataFrame
n x n Pandas DataFrame with the covariances.
Returns
-------
pcor: pandas.DataFrame
n x n Pandas DataFrame with the correlations.
"""
index = pcov.index
pcov = pcov.to_numpy()
v = np.sqrt(np.diag(pcov))
with np.errstate(divide='ignore', invalid='ignore'):
corr = pcov / np.outer(v, v)
corr[pcov == 0] = 0
pcor = DataFrame(data=corr, index=index, columns=index)
return pcor
def to_dict(self):
data = {
"name": self._name,
"pcov": self.pcov,
"nfev": self.nfev,
"obj_func": self.obj_func
}
return data
class LeastSquares(BaseSolver):
_name = "LeastSquares"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solver based on Scipy's least_squares method [scipy_ref]_.
Notes
-----
This class is the default solve method called by the pastas Model solve
method. All kwargs provided to the Model.solve() method are forwarded
to the solver. From there, they are forwarded to Scipy least_squares
solver.
Examples
--------
>>> ml.solve(solver=ps.LeastSquares)
References
----------
.. [scipy_ref] https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, **kwargs):
self.vary = self.ml.parameters.vary.values.astype(bool)
self.initial = self.ml.parameters.initial.values.copy()
parameters = self.ml.parameters.loc[self.vary]
# Set the boundaries
bounds = (np.where(parameters.pmin.isnull(), -np.inf, parameters.pmin),
np.where(parameters.pmax.isnull(), np.inf, parameters.pmax))
self.result = least_squares(self.objfunction, bounds=bounds,
x0=parameters.initial.values,
args=(noise, weights, callback), **kwargs)
self.pcov = DataFrame(self._get_covariances(self.result.jac,
self.result.cost),
index=parameters.index, columns=parameters.index)
self.pcor = self._get_correlations(self.pcov)
self.nfev = self.result.nfev
self.obj_func = self.result.cost
# Prepare return values
success = self.result.success
optimal = self.initial
optimal[self.vary] = self.result.x
stderr = np.zeros(len(optimal)) * np.nan
stderr[self.vary] = np.sqrt(np.diag(self.pcov))
return success, optimal, stderr
def objfunction(self, p, noise, weights, callback):
par = self.initial
par[self.vary] = p
return self.misfit(p=par, noise=noise, weights=weights,
callback=callback)
def _get_covariances(self, jacobian, cost, absolute_sigma=False):
"""Internal method to get the covariance matrix from the jacobian.
Parameters
----------
jacobian: numpy.ndarray
cost: float
absolute_sigma: bool
Default is False
Returns
-------
pcov: numpy.array
numpy array with the covariance matrix.
Notes
-----
This method is copied from Scipy, please refer to:
https://github.com/scipy/scipy/blob/v1.0.0/scipy/optimize/optimize.py
"""
cost = 2 * cost # res.cost is half sum of squares!
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(jacobian, full_matrices=False)
threshold = np.finfo(float).eps * max(jacobian.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s ** 2, VT)
n_param = self.ml.parameters.index.size
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = np.zeros((n_param, n_param), dtype=float)
pcov.fill(np.inf)
warn_cov = True
elif not absolute_sigma:
if self.ml.oseries.series.index.size > n_param:
s_sq = cost / (self.ml.oseries.series.index.size - n_param)
pcov = pcov * s_sq
else:
pcov.fill(np.inf)
warn_cov = True
if warn_cov:
logger.warning(
'Covariance of the parameters could not be estimated')
return pcov
class LmfitSolve(BaseSolver):
_name = "LmfitSolve"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solving the model using the LmFit solver [LM]_.
This is basically a wrapper around the scipy solvers, adding some
cool functionality for boundary conditions.
References
----------
.. [LM] https://github.com/lmfit/lmfit-py/
"""
try:
global lmfit
import lmfit as lmfit # Import Lmfit here, so it is no dependency
except ImportError:
msg = "lmfit not installed. Please install lmfit first."
raise ImportError(msg)
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, method="leastsq",
**kwargs):
# Deal with the parameters
parameters = lmfit.Parameters()
p = self.ml.parameters.loc[:, ['initial', 'pmin', 'pmax', 'vary']]
for k in p.index:
pp = np.where(p.loc[k].isnull(), None, p.loc[k])
parameters.add(k, value=pp[0], min=pp[1], max=pp[2], vary=pp[3])
# Create the Minimizer object and minimize
self.mini = lmfit.Minimizer(userfcn=self.objfunction, calc_covar=True,
fcn_args=(noise, weights, callback),
params=parameters, **kwargs)
self.result = self.mini.minimize(method=method)
# Set all parameter attributes
pcov = None
if hasattr(self.result, "covar"):
if self.result.covar is not None:
pcov = self.result.covar
names = self.result.var_names
self.pcov = DataFrame(pcov, index=names, columns=names, dtype=float)
self.pcor = self._get_correlations(self.pcov)
# Set all optimization attributes
self.nfev = self.result.nfev
self.obj_func = self.result.chisqr
if hasattr(self.result, "success"):
success = self.result.success
else:
success = True
optimal = np.array([p.value for p in self.result.params.values()])
stderr = np.array([p.stderr for p in self.result.params.values()])
idx = None
if "is_weighted" in kwargs:
if not kwargs["is_weighted"]:
idx = -1
return success, optimal[:idx], stderr[:idx]
def objfunction(self, parameters, noise, weights, callback):
p = np.array([p.value for p in parameters.values()])
return self.misfit(p=p, noise=noise, weights=weights,
callback=callback)
class LmfitSolveNew(BaseSolver):
_name = "LmfitSolve"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solving the model using the LmFit solver [LM]_.
This is basically a wrapper around the scipy solvers, adding some
cool functionality for boundary conditions.
References
----------
.. [LM] https://github.com/lmfit/lmfit-py/
"""
try:
global lmfit
import lmfit as lmfit # Import Lmfit here, so it is no dependency
except ImportError:
msg = "lmfit not installed. Please install lmfit first."
raise ImportError(msg)
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, method="leastsq",
**kwargs):
# Deal with the parameters
parameters = lmfit.Parameters()
p = self.ml.parameters.loc[:, ['initial', 'pmin', 'pmax', 'vary']]
for k in p.index:
pp = np.where(p.loc[k].isnull(), None, p.loc[k])
parameters.add(k, value=pp[0], min=pp[1], max=pp[2], vary=pp[3])
# Create the Minimizer object and minimize
self.mini = lmfit.Minimizer(userfcn=self.objfunction, calc_covar=True,
fcn_args=(noise, weights, callback),
params=parameters, **kwargs)
self.result = self.mini.minimize(method=method)
# Set all parameter attributes
pcov = None
if hasattr(self.result, "covar"):
if self.result.covar is not None:
pcov = self.result.covar
names = self.result.var_names
self.pcov = DataFrame(pcov, index=names, columns=names)
self.pcor = self._get_correlations(self.pcov)
# Set all optimization attributes
self.nfev = self.result.nfev
self.obj_func = self.result.chisqr
if hasattr(self.result, "success"):
success = self.result.success
else:
success = True
optimal = np.array([p.value for p in self.result.params.values()])
stderr = np.array([p.stderr for p in self.result.params.values()])
idx = None
if "is_weighted" in kwargs:
if not kwargs["is_weighted"]:
idx = -1
return success, optimal[:idx], stderr[:idx]
def objfunction(self, parameters, noise, weights, callback):
param = np.array([p.value for p in parameters.values()])
res, noise, weights = self.misfit(param, noise, weights, callback,
returnseparate=True)
var_res = np.var(res, ddof=1)
weighted_noise = noise * weights
extraterm = np.sum(np.log(var_res / weights ** 2))
rv = np.sum(weighted_noise ** 2) / var_res + extraterm
return rv
class MonteCarlo(BaseSolver):
_name = "MonteCarlo"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self):
optimal = None
stderr = None
success = True
return success, optimal, stderr
| [
"numpy.sum",
"scipy.linalg.svd",
"numpy.diag",
"lmfit.Minimizer",
"pandas.DataFrame",
"lmfit.Parameters",
"numpy.random.randn",
"scipy.optimize.least_squares",
"numpy.finfo",
"numpy.max",
"numpy.var",
"pandas.DataFrame.from_dict",
"numpy.min",
"numpy.dot",
"numpy.concatenate",
"numpy.o... | [((514, 533), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (523, 533), False, 'from logging import getLogger\n'), ((6274, 6317), 'pandas.DataFrame.from_dict', 'DataFrame.from_dict', (['data'], {'orient': '"""columns"""'}), "(data, orient='columns')\n", (6293, 6317), False, 'from pandas import DataFrame\n'), ((7717, 7738), 'numpy.zeros', 'np.zeros', (['(0, p.size)'], {}), '((0, p.size))\n', (7725, 7738), True, 'import numpy as np\n'), ((9716, 9764), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'corr', 'index': 'index', 'columns': 'index'}), '(data=corr, index=index, columns=index)\n', (9725, 9764), False, 'from pandas import DataFrame\n'), ((11244, 11367), 'scipy.optimize.least_squares', 'least_squares', (['self.objfunction'], {'bounds': 'bounds', 'x0': 'parameters.initial.values', 'args': '(noise, weights, callback)'}), '(self.objfunction, bounds=bounds, x0=parameters.initial.values,\n args=(noise, weights, callback), **kwargs)\n', (11257, 11367), False, 'from scipy.optimize import least_squares\n'), ((13034, 13068), 'scipy.linalg.svd', 'svd', (['jacobian'], {'full_matrices': '(False)'}), '(jacobian, full_matrices=False)\n', (13037, 13068), False, 'from scipy.linalg import svd\n'), ((13207, 13232), 'numpy.dot', 'np.dot', (['(VT.T / s ** 2)', 'VT'], {}), '(VT.T / s ** 2, VT)\n', (13213, 13232), True, 'import numpy as np\n'), ((14811, 14829), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (14827, 14829), True, 'import lmfit as lmfit\n'), ((15141, 15269), 'lmfit.Minimizer', 'lmfit.Minimizer', ([], {'userfcn': 'self.objfunction', 'calc_covar': '(True)', 'fcn_args': '(noise, weights, callback)', 'params': 'parameters'}), '(userfcn=self.objfunction, calc_covar=True, fcn_args=(noise,\n weights, callback), params=parameters, **kwargs)\n', (15156, 15269), True, 'import lmfit as lmfit\n'), ((15642, 15698), 'pandas.DataFrame', 'DataFrame', (['pcov'], {'index': 'names', 'columns': 'names', 'dtype': 'float'}), '(pcov, index=names, columns=names, dtype=float)\n', (15651, 15698), False, 'from pandas import DataFrame\n'), ((17460, 17478), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (17476, 17478), True, 'import lmfit as lmfit\n'), ((17790, 17918), 'lmfit.Minimizer', 'lmfit.Minimizer', ([], {'userfcn': 'self.objfunction', 'calc_covar': '(True)', 'fcn_args': '(noise, weights, callback)', 'params': 'parameters'}), '(userfcn=self.objfunction, calc_covar=True, fcn_args=(noise,\n weights, callback), params=parameters, **kwargs)\n', (17805, 17918), True, 'import lmfit as lmfit\n'), ((18291, 18334), 'pandas.DataFrame', 'DataFrame', (['pcov'], {'index': 'names', 'columns': 'names'}), '(pcov, index=names, columns=names)\n', (18300, 18334), False, 'from pandas import DataFrame\n'), ((19253, 19272), 'numpy.var', 'np.var', (['res'], {'ddof': '(1)'}), '(res, ddof=1)\n', (19259, 19272), True, 'import numpy as np\n'), ((7855, 7926), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['p', 'pcov'], {'size': '(n,)', 'check_valid': '"""ignore"""'}), "(p, pcov, size=(n,), check_valid='ignore')\n", (7884, 7926), True, 'import numpy as np\n'), ((8108, 8149), 'numpy.concatenate', 'np.concatenate', (['(samples, accept)'], {'axis': '(0)'}), '((samples, accept), axis=0)\n', (8122, 8149), True, 'import numpy as np\n'), ((9556, 9569), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (9563, 9569), True, 'import numpy as np\n'), ((9584, 9630), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (9595, 9630), True, 'import numpy as np\n'), ((12019, 12037), 'numpy.diag', 'np.diag', (['self.pcov'], {}), '(self.pcov)\n', (12026, 12037), True, 'import numpy as np\n'), ((13389, 13430), 'numpy.zeros', 'np.zeros', (['(n_param, n_param)'], {'dtype': 'float'}), '((n_param, n_param), dtype=float)\n', (13397, 13430), True, 'import numpy as np\n'), ((19341, 19371), 'numpy.log', 'np.log', (['(var_res / weights ** 2)'], {}), '(var_res / weights ** 2)\n', (19347, 19371), True, 'import numpy as np\n'), ((4149, 4194), 'numpy.random.randn', 'np.random.randn', (['data.shape[0]', 'data.shape[1]'], {}), '(data.shape[0], data.shape[1])\n', (4164, 4194), True, 'import numpy as np\n'), ((9658, 9672), 'numpy.outer', 'np.outer', (['v', 'v'], {}), '(v, v)\n', (9666, 9672), True, 'import numpy as np\n'), ((19386, 19413), 'numpy.sum', 'np.sum', (['(weighted_noise ** 2)'], {}), '(weighted_noise ** 2)\n', (19392, 19413), True, 'import numpy as np\n'), ((13089, 13104), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (13097, 13104), True, 'import numpy as np\n'), ((7997, 8021), 'numpy.min', 'np.min', (['(s - pmin)'], {'axis': '(1)'}), '(s - pmin, axis=1)\n', (8003, 8021), True, 'import numpy as np\n'), ((8054, 8078), 'numpy.max', 'np.max', (['(s - pmax)'], {'axis': '(1)'}), '(s - pmax, axis=1)\n', (8060, 8078), True, 'import numpy as np\n')] |
"""
nitrogen.linalg.packed
-------------------------
Packed-storage matrices and routines.
The lower triangle is stored in row-major order.
(For symmetric matrices, this is equivalent to upper triangle
column-major order. For Hermitian matrices, this is
the conjugate of the upper triangle in column-major order.)
"""
import numpy as np
import warnings
def k2IJ(k):
"""
Calculate the full 2D index (`I`,`J`) for a given
packed 1D index `k`. The returned index is always
in the lower triangle.
Parameters
----------
k : int
The packed 1D index.
Returns
-------
I,J : np.uint64
The unpacked 2D index.
"""
I = np.uint64((np.sqrt(8*k+1)-1)/2)
J = np.uint64(k - (I*(I+1))//2)
return I,J
def IJ2k(I,J):
"""
Assuming a symmetric array, calculate the 1D
packed storage index for the (I,J) = (J,I) element
"""
# Let (i,j) be the equivalent position
# in the lower triangle
i = max(I,J)
j = min(I,J)
k = np.uint64( (i*(i+1))//2 + j)
return k
def n2N(n):
"""
Calculate the square matrix rank N
for a packed storage size n
Parameters
----------
n : int
The packed length.
Returns
-------
N : np.uint64
The matrix rank.
"""
N = np.uint64((np.sqrt(8*n+1)-1)/2)
return N
def symfull(P):
"""
Return the full array for a symmetric array
in packed storage
Parameters
----------
P : ndarray
The lower triangle of a symmetric array in packed storage.
Returns
-------
ndarray
The full symmetric matrix
"""
n = P.shape[0]
base = P.shape[1:]
N = n2N(n)
full = np.ndarray((N,N)+base, dtype = P.dtype)
full.fill(0)
k = 0
for i in range(N):
for j in range(i+1):
full[i,j] += P[k]
if i != j:
full[j,i] += P[k]
k += 1
return full
def trilfull(L):
"""
Return the full array for a lower triangle array.
in packed storage
Parameters
----------
L : ndarray
The lower triangle in packed storage
Returns
-------
ndarray
The full lower triangle array.
"""
n = L.shape[0]
base = L.shape[1:]
N = n2N(n)
full = np.ndarray((N,N)+base, dtype = L.dtype)
full.fill(0)
k = 0
for i in range(N):
for j in range(i+1):
full[i,j] += L[k] # lower triangle element
k += 1
return full
def inv_sp(A, out = None):
"""
Inverse of a real symmetric, positive definite matrix in packed format.
(Complex symmetric matrices may not be defined.)
Parameters
----------
A : (np, ...) ndarray
The packed storage array.
out : (np, ...) ndarray
Output buffer. If None, this will be created.
If out = A, then in-place inversion is performed
Returns
-------
out : ndarray
The result.
"""
if A.ndim < 1:
raise ValueError('A must be at least one-dimensional')
if out is None:
out = A.copy() # out is ready for in-place routines
elif out is A:
pass # out is already ok for in-place
else:
np.copyto(out, A)
chol_sp(out, out = out) # out <-- Cholesky decomposition
inv_tp(out, out = out) # out <-- inverse of Cholesky
ltl_tp(out, out = out) # out <-- inverse of A
return out
def chol_sp(A, out = None):
"""
Cholesky decomposition of a symmetric matrix in packed format.
If real symmetric, then A should be positive definite.
If complex symmetric (*not* Hermitian), then H should have
non-zero pivots.
Parameters
----------
A : (np, ...) ndarray
A is stored in 1D packed format (see :mod:`nitrogen.linalg.packed`)
out : (np, ...) ndarray
Output buffer. If None, this will be created.
If out = H, then in-place decomposition is performed
Returns
-------
out : ndarray
The lower triangle Cholesky decomposition L in packed storage.
H = L @ L.T
"""
if A.ndim < 1:
raise ValueError('A must be at least one-dimensional')
if out is None:
out = A.copy() # out is ready for in-place routine
elif out is A:
pass # out is already ok for in-place
else:
np.copyto(out, A)
# Perform in-place routine
_chol_sp_unblocked(out)
return out
def _chol_sp_unblocked(H):
"""
An unblocked, in-place implementation of Cholesky
decomposition for symmetric matrices H.
Parameters
----------
H : (np, ...) ndarray
H is a symmetric matrix in 1D packed storage.
(Lower triangle row-packed)
Returns
-------
ndarray
The in-place result.
Notes
-----
This routine uses a standard Cholesky algorithm
for *real* symmetric matrices. It can be
analytically continued to complex symmetric matrices
in some cases, but this behavior is not necessarily
stable. Use caution!
"""
n = H.shape[0] # the packed size
N = n2N(n) # The full matrix rank
L = np.ndarray((N,N), dtype = np.ndarray)
# Copy references to packed array elements to the lower
# triangle of a full "reference" matrix
# References above the diagonal are undefined.
k = 0
for i in range(N):
for j in range(i+1):
L[i,j] = H[k:(k+1)] # Leave a singleton leading dimension
k += 1
# Compute the Cholesky decomposition
# with a simple unblocked algorithm
tol = 1e-10
pivmax = np.abs( np.sqrt(L[0,0]) ) # for pivot threshold checking
for j in range(N):
r = L[j,:j] # The j^th row, left of the diagonal
# L[j,j] <-- sqrt(d - r @ r.T)
np.sqrt( L[j,j] - r @ r.T, out = L[j,j]) # overwrite
# Check that the pivot is sufficiently large
if (np.abs(L[j,j]) / pivmax < tol).any() :
warnings.warn("Small diagonal (less than rel. tol. = {:.4E} encountered in Cholesky decomposition".format(tol))
# Store the new maximum pivot
pivmax = np.maximum(pivmax, np.abs(L[j,j]))
# Calculate the column below the diagonal element j
#B = L[j+1:,:j] # The block between r and c
#c = L[j+1:,j] # The j^th column, below the diagonal
for i in range(j+1,N):
Bi = L[i,:j] # An
ci = L[i, j] # An
#L[j+1:,j] = (c - B @ r.T) / L[j,j]
np.divide( (ci - Bi @ r.T), L[j,j], out = L[i,j])
return H
def inv_tp(L, out = None):
"""
Invert a triangular matrix in lower row-packed (or upper column-packed)
storage.
Parameters
----------
L : (np, ...) ndarray
L is stored in 1D packed format (see :mod:`nitrogen.linalg.packed`)
out : (np, ...) ndarray
Output buffer. If None, this will be created.
If out = L, then in-place inversion is performed
Returns
-------
out : ndarray
The inverse of the triangular matrix in packed storage.
"""
if L.ndim < 1:
raise ValueError('L must be at least one-dimensional')
if out is None:
out = L.copy() # out is ready for in-place routine
elif out is L:
pass # out is already ok for in-place
else:
np.copyto(out, L)
# Now perform in-place on `out`
_inv_tp_unblocked(out)
return out
def _inv_tp_unblocked(L):
"""
Invert a lower triangular matrix in row-packed storage.
Parameters
----------
L : (np, ...) ndarray
L is a triangular matrix in 1D packed storage.
(Row-packed for lower, column-packed for upper)
Returns
-------
ndarray
The in-place result.
"""
n = L.shape[0] # the packed size
N = n2N(n) # the full matrix rank
one = np.uint64(1)
X = np.ndarray((N,N), dtype = np.ndarray)
# Copy references to packed element arrays to the lower
# triangle of a full "reference" matrix
# Elements above the diagonal are not defined!
k = 0
for i in range(N):
for j in range(i+1):
X[i,j] = L[k:(k+1)] # Leave a singleton leading dimension
k += 1
# Compute the triangular inverse
# with a simple in-place element by element algorithm
abstol = 1e-15
# In-place lower triangle inversion
for j in range(N - one, -1,-1):
# Compute j^th diagonal element
if (np.abs(X[j,j]) < abstol).any():
warnings.warn(f"Small diagonal (less than abs. tol. = {abstol:.4E})" \
"encounted in triangle inversion")
#X[j,j] = 1.0/X[j,j]
np.copyto(X[j,j], 1.0/ X[j,j])
# Compute j^th column, below diagonal
for i in range(N - one, j, -1):
np.multiply( -X[j,j], X[i, j+1:i+1] @ X[j+1:i+1, j], out = X[i,j])
return L
def llt_tp(L, out = None):
"""
L @ L.T of a lower triangular matrix.
Parameters
----------
L : (np, ...) ndarray
Lower triangular matrix in packed storage.
out : (np, ...) ndarray
Output buffer. If None, this will be created.
If out = L, then in-place multiplication is performed
Returns
-------
out : ndarray
The symmetric result in packed storage.
"""
if L.ndim < 1:
raise ValueError('L must be at least one-dimensional')
if out is None:
out = L.copy() # out is ready for in-place routine
elif out is L:
pass # out is already ok for in-place
else:
np.copyto(out, L)
# Perform in-place L @ L.T
_llt_tp_unblocked(out)
return out
def _llt_tp_unblocked(L):
"""
An unblocked, in-place routine for multiplying
L @ L.T where L is a lower triangular matrix
in packed row-order storage.
This is equivalent to U.T @ U where U is in
upper triangular packed column-order storage.
The resulting symmetric matrix is returned in
packed storage.
Parameters
----------
L : (np, ...) ndarray
A lower triangular matrix in 1D packed
row-order storage.
Returns
-------
ndarray
The in-place result.
"""
# Calculate matrix dimensions
n = L.shape[0] # the packed size
N = n2N(n) # the full matrix rank
one = np.uint64(1)
A = np.ndarray((N,N), dtype = np.ndarray)
# Copy references to adarrays to the lower
# triangle of a full "reference" matrix
# References above the diagonal are undefined.
k = 0
for i in range(N):
for j in range(i+1):
A[i,j] = L[k:(k+1)] # leave a singleton leading dimension
k += 1
# This is similar to a "reverse Cholesky decomposition"
# so we will work in the opposite direction as that
for j in range(N-one, -1, -1):
r = A[j,:j] # The j^th row, left of the diagonal
for i in range(N-one, j, -1):
Bi = A[i,:j] # An ndarray of ndarray
ci = A[i, j] # An ndarray
# ci <-- Ljj * ci + Bi @ r.T
np.add(A[j,j] * ci, Bi @ r.T, out = A[i,j])
np.add(A[j,j]**2, r @ r.T, out = A[j,j])
return L
def ltl_tp(L, out = None):
"""
L.T @ L with a lower triangular matrix.
Parameters
----------
L : (np, ...) ndarray
Lower triangular matrix in packed storage.
out : (np, ...) ndarray
Output buffer. If None, this will be created.
If out = L, then in-place multiplication is performed
Returns
-------
out : ndarray
The symmetric result in packed storage.
"""
if L.ndim < 1:
raise ValueError('L must be at least one-dimensional')
if out is None:
out = L.copy() # out is ready for in-place routine
elif out is L:
pass # out is already ok for in-place
else:
np.copyto(out, L)
# Perform in-place L @ L.T
_ltl_tp_unblocked(out)
return out
def _ltl_tp_unblocked(L):
"""
An unblocked, in-place routine for multiplying
L.T @ L where L is a lower triangular matrix
in packed row-order storage.
This is equivalent to U @ U.T where U is in
upper triangular packed column-order storage.
The resulting symmetric matrix is returned in
packed storage.
Parameters
----------
L : ndarray
A lower triangular matrix in 1D packed
row-order storage.
Returns
-------
ndarray
The in-place result.
"""
# Calculate matrix dimensions
n = L.shape[0] # the packed size
N = n2N(n) # the full matrix rank
A = np.ndarray((N,N), dtype = np.ndarray)
# Copy references to adarrays to the lower
# triangle of a full "reference" matrix
# References above the diagonal are undefined.
k = 0
for i in range(N):
for j in range(i+1):
A[i,j] = L[k:(k+1)] # retain leading singleton dimension
k += 1
# This is the "converse" of the llt routine
# for L @ L.T
for i in range(N):
for j in range(i+1):
# Compute A[i,j]
# This is the dot product of the
# i^th row of L.T and the
# j^th column of L
#
# The i^th row of L.T is zero until
# its i^th element
#
# The j^th column of L is zero until
# its j^th element
#
# So the dot product need only begin
# at the max(i,j)^th element
#
# By the loop ranges, j is always <= i
# so max(i,j) = i, and we can begin
# the dot product with the i^th element
# The first factor is the
# i^th row of L.T beginning at its i^th element
# This is the transpose of the i^th column of
# L beginning at its i^th element, which is in
# the lower triangle, so A's reference is OK
F1 = (A[i:,i]).T
# The second factor is the j^th column
# of L beginning at its i^th element, which is
# also in the lower triangle, so OK
F2 = A[i:,j]
np.copyto(A[i,j], F1 @ F2)
return L
def trAB_sp(A, B, out = None):
"""
The trace of A @ B, in symmetric packed stroage.
Parameters
----------
A,B : (np, ...) ndarray
Symmetric matrix in packed storage.
out : (...) ndarray, optional
Output buffer. If None, this will be created.
If scalar, this is ignored.
Returns
-------
out : ndarray or scalar
The result.
"""
if A.shape != B.shape:
raise ValueError("A and B must be the same shape")
if A.ndim < 1:
raise ValueError("A and B must have at least one dimension.")
# Calculate matrix dimensions
n = A.shape[0] # the packed size
N = n2N(n) # the full matrix rank
if out is None:
if A.ndim == 1: # Scalar result
out = 0
else: # A.ndim > 1
out = np.zeros(A.shape[1:], dtype = A.dtype)
else: # out is a buffer
if A.ndim == 1:
raise ValueError("output cannot be buffered for scalar result.")
else:
out.fill(0.0)
# Loop over lower triangle only
k = 0
for i in range(N):
for j in range(i+1):
if i == j: # Diagonal element
out += A[k] * B[k]
else: # Off-diagonal, counts twice
out += 2.0 * (A[k] * B[k])
k += 1
return out
| [
"numpy.divide",
"numpy.uint64",
"numpy.abs",
"numpy.multiply",
"numpy.zeros",
"numpy.add",
"numpy.copyto",
"numpy.ndarray",
"warnings.warn",
"numpy.sqrt"
] | [((724, 755), 'numpy.uint64', 'np.uint64', (['(k - I * (I + 1) // 2)'], {}), '(k - I * (I + 1) // 2)\n', (733, 755), True, 'import numpy as np\n'), ((1031, 1062), 'numpy.uint64', 'np.uint64', (['(i * (i + 1) // 2 + j)'], {}), '(i * (i + 1) // 2 + j)\n', (1040, 1062), True, 'import numpy as np\n'), ((1748, 1788), 'numpy.ndarray', 'np.ndarray', (['((N, N) + base)'], {'dtype': 'P.dtype'}), '((N, N) + base, dtype=P.dtype)\n', (1758, 1788), True, 'import numpy as np\n'), ((2362, 2402), 'numpy.ndarray', 'np.ndarray', (['((N, N) + base)'], {'dtype': 'L.dtype'}), '((N, N) + base, dtype=L.dtype)\n', (2372, 2402), True, 'import numpy as np\n'), ((5283, 5319), 'numpy.ndarray', 'np.ndarray', (['(N, N)'], {'dtype': 'np.ndarray'}), '((N, N), dtype=np.ndarray)\n', (5293, 5319), True, 'import numpy as np\n'), ((8078, 8090), 'numpy.uint64', 'np.uint64', (['(1)'], {}), '(1)\n', (8087, 8090), True, 'import numpy as np\n'), ((8104, 8140), 'numpy.ndarray', 'np.ndarray', (['(N, N)'], {'dtype': 'np.ndarray'}), '((N, N), dtype=np.ndarray)\n', (8114, 8140), True, 'import numpy as np\n'), ((10639, 10651), 'numpy.uint64', 'np.uint64', (['(1)'], {}), '(1)\n', (10648, 10651), True, 'import numpy as np\n'), ((10661, 10697), 'numpy.ndarray', 'np.ndarray', (['(N, N)'], {'dtype': 'np.ndarray'}), '((N, N), dtype=np.ndarray)\n', (10671, 10697), True, 'import numpy as np\n'), ((13039, 13075), 'numpy.ndarray', 'np.ndarray', (['(N, N)'], {'dtype': 'np.ndarray'}), '((N, N), dtype=np.ndarray)\n', (13049, 13075), True, 'import numpy as np\n'), ((5752, 5768), 'numpy.sqrt', 'np.sqrt', (['L[0, 0]'], {}), '(L[0, 0])\n', (5759, 5768), True, 'import numpy as np\n'), ((5946, 5985), 'numpy.sqrt', 'np.sqrt', (['(L[j, j] - r @ r.T)'], {'out': 'L[j, j]'}), '(L[j, j] - r @ r.T, out=L[j, j])\n', (5953, 5985), True, 'import numpy as np\n'), ((8928, 8961), 'numpy.copyto', 'np.copyto', (['X[j, j]', '(1.0 / X[j, j])'], {}), '(X[j, j], 1.0 / X[j, j])\n', (8937, 8961), True, 'import numpy as np\n'), ((11506, 11548), 'numpy.add', 'np.add', (['(A[j, j] ** 2)', '(r @ r.T)'], {'out': 'A[j, j]'}), '(A[j, j] ** 2, r @ r.T, out=A[j, j])\n', (11512, 11548), True, 'import numpy as np\n'), ((3316, 3333), 'numpy.copyto', 'np.copyto', (['out', 'A'], {}), '(out, A)\n', (3325, 3333), True, 'import numpy as np\n'), ((4465, 4482), 'numpy.copyto', 'np.copyto', (['out', 'A'], {}), '(out, A)\n', (4474, 4482), True, 'import numpy as np\n'), ((6320, 6335), 'numpy.abs', 'np.abs', (['L[j, j]'], {}), '(L[j, j])\n', (6326, 6335), True, 'import numpy as np\n'), ((6688, 6734), 'numpy.divide', 'np.divide', (['(ci - Bi @ r.T)', 'L[j, j]'], {'out': 'L[i, j]'}), '(ci - Bi @ r.T, L[j, j], out=L[i, j])\n', (6697, 6734), True, 'import numpy as np\n'), ((7534, 7551), 'numpy.copyto', 'np.copyto', (['out', 'L'], {}), '(out, L)\n', (7543, 7551), True, 'import numpy as np\n'), ((8750, 8860), 'warnings.warn', 'warnings.warn', (['f"""Small diagonal (less than abs. tol. = {abstol:.4E})encounted in triangle inversion"""'], {}), "(\n f'Small diagonal (less than abs. tol. = {abstol:.4E})encounted in triangle inversion'\n )\n", (8763, 8860), False, 'import warnings\n'), ((9066, 9139), 'numpy.multiply', 'np.multiply', (['(-X[j, j])', '(X[i, j + 1:i + 1] @ X[j + 1:i + 1, j])'], {'out': 'X[i, j]'}), '(-X[j, j], X[i, j + 1:i + 1] @ X[j + 1:i + 1, j], out=X[i, j])\n', (9077, 9139), True, 'import numpy as np\n'), ((9844, 9861), 'numpy.copyto', 'np.copyto', (['out', 'L'], {}), '(out, L)\n', (9853, 9861), True, 'import numpy as np\n'), ((11445, 11488), 'numpy.add', 'np.add', (['(A[j, j] * ci)', '(Bi @ r.T)'], {'out': 'A[i, j]'}), '(A[j, j] * ci, Bi @ r.T, out=A[i, j])\n', (11451, 11488), True, 'import numpy as np\n'), ((12253, 12270), 'numpy.copyto', 'np.copyto', (['out', 'L'], {}), '(out, L)\n', (12262, 12270), True, 'import numpy as np\n'), ((14618, 14645), 'numpy.copyto', 'np.copyto', (['A[i, j]', '(F1 @ F2)'], {}), '(A[i, j], F1 @ F2)\n', (14627, 14645), True, 'import numpy as np\n'), ((15490, 15526), 'numpy.zeros', 'np.zeros', (['A.shape[1:]'], {'dtype': 'A.dtype'}), '(A.shape[1:], dtype=A.dtype)\n', (15498, 15526), True, 'import numpy as np\n'), ((695, 713), 'numpy.sqrt', 'np.sqrt', (['(8 * k + 1)'], {}), '(8 * k + 1)\n', (702, 713), True, 'import numpy as np\n'), ((1341, 1359), 'numpy.sqrt', 'np.sqrt', (['(8 * n + 1)'], {}), '(8 * n + 1)\n', (1348, 1359), True, 'import numpy as np\n'), ((8706, 8721), 'numpy.abs', 'np.abs', (['X[j, j]'], {}), '(X[j, j])\n', (8712, 8721), True, 'import numpy as np\n'), ((6074, 6089), 'numpy.abs', 'np.abs', (['L[j, j]'], {}), '(L[j, j])\n', (6080, 6089), True, 'import numpy as np\n')] |
import torch
import networkx as nx
import numpy as np
from oil.utils.utils import export,FixedNumpySeed
from src.systems.rigid_body import RigidBody, BodyGraph, project_onto_constraints
from src.animation import Animation
import copy
@export
class ChainPendulum(RigidBody):
d=2
dt=.03
integration_time=3
def __init__(self, links=2, beams=False, m=None, l=None):
self.body_graph = BodyGraph()#nx.Graph()
self.arg_string = f"n{links}{'b' if beams else ''}m{m or 'r'}l{l or 'r'}"
assert not beams, "beams temporarily not supported"
with FixedNumpySeed(0):
ms = [.6+.8*np.random.rand() for _ in range(links)] if m is None else links*[m]
ls = [.6+.8*np.random.rand() for _ in range(links)] if l is None else links*[l]
self.ms = copy.deepcopy(ms)
self.body_graph.add_extended_nd(0, m=ms.pop(), d=0, tether=(torch.zeros(2),ls.pop()))
for i in range(1, links):
self.body_graph.add_extended_nd(i, m=ms.pop(), d=0)
self.body_graph.add_edge(i - 1, i, l=ls.pop())
self.D =self.n = links
self.angular_dims = range(links)
def body2globalCoords(self, angles_omega):
d = 2
n = len(self.body_graph.nodes)
N = angles_omega.shape[0]
pvs = torch.zeros(N, 2, n, d,device=angles_omega.device,dtype=angles_omega.dtype)
global_position_velocity = torch.zeros(N, 2, d,device=angles_omega.device,dtype=angles_omega.dtype)
length = self.body_graph.nodes[0]["tether"][1]
global_position_velocity[:, 0, :] = self.body_graph.nodes[0]["tether"][0][None]
global_position_velocity += self.joint2cartesian(length, angles_omega[..., 0])
pvs[:, :, 0] = global_position_velocity
for (_, j), length in nx.get_edge_attributes(self.body_graph, "l").items():
global_position_velocity += self.joint2cartesian(length, angles_omega[..., j])
pvs[:, :, j] = global_position_velocity
return pvs
def joint2cartesian(self, length, angle_omega):
position_vel = torch.zeros(angle_omega.shape[0], 2, 2,device=angle_omega.device,dtype=angle_omega.dtype)
position_vel[:, 0, 0] = length * angle_omega[:, 0].sin()
position_vel[:, 1, 0] = length * angle_omega[:, 0].cos() * angle_omega[:, 1]
position_vel[:, 0, 1] = -length * angle_omega[:, 0].cos()
position_vel[:, 1, 1] = length * angle_omega[:, 0].sin() * angle_omega[:, 1]
return position_vel
def cartesian2angle(self, rel_pos_vel):
x, y = rel_pos_vel[:, 0].T
vx, vy = rel_pos_vel[:, 1].T
angle = torch.atan2(x, -y)
omega = torch.where(angle < 1e-2, vx / (-y), vy / x)
angle_unwrapped = torch.from_numpy(np.unwrap(angle.numpy(), axis=0)).to(
x.device, x.dtype
)
return torch.stack([angle_unwrapped, omega], dim=1)
def global2bodyCoords(self, global_pos_vel):
N, _, n, d = global_pos_vel.shape
*bsT2, n, d = global_pos_vel.shape
angles_omega = torch.zeros(
*bsT2, n, device=global_pos_vel.device, dtype=global_pos_vel.dtype
)
start_position_velocity = torch.zeros(*bsT2, d,device=angles_omega.device,dtype=angles_omega.dtype)
start_position_velocity[..., 0, :] = self.body_graph.nodes[0]["tether"][0][None]
rel_pos_vel = global_pos_vel[..., 0, :] - start_position_velocity
angles_omega[..., 0] += self.cartesian2angle(rel_pos_vel)
start_position_velocity += rel_pos_vel
for (_, j), length in nx.get_edge_attributes(self.body_graph, "l").items():
rel_pos_vel = global_pos_vel[..., j, :] - start_position_velocity
angles_omega[..., j] += self.cartesian2angle(rel_pos_vel)
start_position_velocity += rel_pos_vel
return angles_omega
def sample_initial_conditions(self, N):
n = len(self.body_graph.nodes)
angles_and_angvel = torch.randn(N, 2, n) # (N,2,n)
z = self.body2globalCoords(angles_and_angvel)
#z = torch.randn(N,2,n,2)
z[:,0] += .2*torch.randn(N,n,2)
z[:,1] = (.5*z[:,1] + .4*torch.randn(N,n,2))*3
try: return project_onto_constraints(self.body_graph,z,tol=1e-5)
except OverflowError: return self.sample_initial_conditions(N)
# return
def potential(self, x):
""" Gravity potential """
return 9.81*(self.M.float() @ x)[..., 1].sum(1)
def kinetic(self, x):
"""Kinetic energy"""
return ((0.5 * self.M.float() @ x**2)).sum((1, 2))
def total_energy(self, z):
return self.potential(z[:, 0]) + self.kinetic(z[:, 1])
def __str__(self):
return f"{self.__class__}{self.arg_string}"
def __repr__(self):
return str(self)
@property
def animator(self):
return PendulumAnimation
class ChainPendulumV2(ChainPendulum):
def __init__(self, links=2, beams=False, m=1, l=1):
self.body_graph = BodyGraph()#nx.Graph()
self.arg_string = f"n{links}{'b' if beams else ''}m{m}l{l}"
beam_moments = torch.tensor([m*l*l/12])
if beams:
self.body_graph.add_extended_nd(0, m=m,moments=beam_moments, d=1)
self.body_graph.add_joint(0,torch.tensor([l/2]))
for i in range(1, links):
self.body_graph.add_extended_nd(i, m=m,moments=beam_moments, d=1)
self.body_graph.add_joint(i-1,torch.tensor([-l/2]),i,torch.tensor([l/2]))
else:
self.body_graph.add_node(0, m=m, tether=torch.zeros(2), l=l)
for i in range(1, links):
self.body_graph.add_node(i, m=m)
self.body_graph.add_edge(i - 1, i, l=l)
@export
class PendulumAnimation(Animation):
def __init__(self, qt, body):
super().__init__(qt, body)
self.body = body
self.G = body.body_graph
empty = self.qt.shape[-1] * [[]]
n_beams = len(nx.get_node_attributes(self.G, "tether")) + len(self.G.edges)
self.objects["beams"] = sum(
[self.ax.plot(*empty, "-",color='k') for _ in range(n_beams)], []
)
self.objects["pts"] = sum([self.ax.plot(*empty, "o", ms=10*body.ms[i]**2,c=self.colors[i]) for i in range(self.qt.shape[1])], [])
def update(self, i=0):
beams = [
np.stack([self.qt[i, k, :], self.qt[i, l, :]], axis=1)
for (k, l) in self.G.edges
] + [
np.stack([loc.cpu().data.numpy(), self.qt[i, k, :]], axis=1)
for k, (loc,_) in nx.get_node_attributes(self.G, "tether").items()
]
for beam, line in zip(beams, self.objects["beams"]):
line.set_data(*beam[:2])
if self.qt.shape[-1] == 3:
line.set_3d_properties(beam[2])
return super().update(i) | [
"numpy.stack",
"copy.deepcopy",
"torch.stack",
"torch.where",
"oil.utils.utils.FixedNumpySeed",
"numpy.random.rand",
"src.systems.rigid_body.BodyGraph",
"torch.randn",
"networkx.get_edge_attributes",
"src.systems.rigid_body.project_onto_constraints",
"networkx.get_node_attributes",
"torch.zero... | [((405, 416), 'src.systems.rigid_body.BodyGraph', 'BodyGraph', ([], {}), '()\n', (414, 416), False, 'from src.systems.rigid_body import RigidBody, BodyGraph, project_onto_constraints\n'), ((804, 821), 'copy.deepcopy', 'copy.deepcopy', (['ms'], {}), '(ms)\n', (817, 821), False, 'import copy\n'), ((1294, 1371), 'torch.zeros', 'torch.zeros', (['N', '(2)', 'n', 'd'], {'device': 'angles_omega.device', 'dtype': 'angles_omega.dtype'}), '(N, 2, n, d, device=angles_omega.device, dtype=angles_omega.dtype)\n', (1305, 1371), False, 'import torch\n'), ((1405, 1479), 'torch.zeros', 'torch.zeros', (['N', '(2)', 'd'], {'device': 'angles_omega.device', 'dtype': 'angles_omega.dtype'}), '(N, 2, d, device=angles_omega.device, dtype=angles_omega.dtype)\n', (1416, 1479), False, 'import torch\n'), ((2078, 2174), 'torch.zeros', 'torch.zeros', (['angle_omega.shape[0]', '(2)', '(2)'], {'device': 'angle_omega.device', 'dtype': 'angle_omega.dtype'}), '(angle_omega.shape[0], 2, 2, device=angle_omega.device, dtype=\n angle_omega.dtype)\n', (2089, 2174), False, 'import torch\n'), ((2630, 2648), 'torch.atan2', 'torch.atan2', (['x', '(-y)'], {}), '(x, -y)\n', (2641, 2648), False, 'import torch\n'), ((2665, 2707), 'torch.where', 'torch.where', (['(angle < 0.01)', '(vx / -y)', '(vy / x)'], {}), '(angle < 0.01, vx / -y, vy / x)\n', (2676, 2707), False, 'import torch\n'), ((2846, 2890), 'torch.stack', 'torch.stack', (['[angle_unwrapped, omega]'], {'dim': '(1)'}), '([angle_unwrapped, omega], dim=1)\n', (2857, 2890), False, 'import torch\n'), ((3049, 3128), 'torch.zeros', 'torch.zeros', (['*bsT2', 'n'], {'device': 'global_pos_vel.device', 'dtype': 'global_pos_vel.dtype'}), '(*bsT2, n, device=global_pos_vel.device, dtype=global_pos_vel.dtype)\n', (3060, 3128), False, 'import torch\n'), ((3185, 3260), 'torch.zeros', 'torch.zeros', (['*bsT2', 'd'], {'device': 'angles_omega.device', 'dtype': 'angles_omega.dtype'}), '(*bsT2, d, device=angles_omega.device, dtype=angles_omega.dtype)\n', (3196, 3260), False, 'import torch\n'), ((3958, 3978), 'torch.randn', 'torch.randn', (['N', '(2)', 'n'], {}), '(N, 2, n)\n', (3969, 3978), False, 'import torch\n'), ((4996, 5007), 'src.systems.rigid_body.BodyGraph', 'BodyGraph', ([], {}), '()\n', (5005, 5007), False, 'from src.systems.rigid_body import RigidBody, BodyGraph, project_onto_constraints\n'), ((5110, 5140), 'torch.tensor', 'torch.tensor', (['[m * l * l / 12]'], {}), '([m * l * l / 12])\n', (5122, 5140), False, 'import torch\n'), ((583, 600), 'oil.utils.utils.FixedNumpySeed', 'FixedNumpySeed', (['(0)'], {}), '(0)\n', (597, 600), False, 'from oil.utils.utils import export, FixedNumpySeed\n'), ((4099, 4119), 'torch.randn', 'torch.randn', (['N', 'n', '(2)'], {}), '(N, n, 2)\n', (4110, 4119), False, 'import torch\n'), ((4193, 4248), 'src.systems.rigid_body.project_onto_constraints', 'project_onto_constraints', (['self.body_graph', 'z'], {'tol': '(1e-05)'}), '(self.body_graph, z, tol=1e-05)\n', (4217, 4248), False, 'from src.systems.rigid_body import RigidBody, BodyGraph, project_onto_constraints\n'), ((1786, 1830), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['self.body_graph', '"""l"""'], {}), "(self.body_graph, 'l')\n", (1808, 1830), True, 'import networkx as nx\n'), ((3565, 3609), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['self.body_graph', '"""l"""'], {}), "(self.body_graph, 'l')\n", (3587, 3609), True, 'import networkx as nx\n'), ((5271, 5292), 'torch.tensor', 'torch.tensor', (['[l / 2]'], {}), '([l / 2])\n', (5283, 5292), False, 'import torch\n'), ((5967, 6007), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.G', '"""tether"""'], {}), "(self.G, 'tether')\n", (5989, 6007), True, 'import networkx as nx\n'), ((6350, 6404), 'numpy.stack', 'np.stack', (['[self.qt[i, k, :], self.qt[i, l, :]]'], {'axis': '(1)'}), '([self.qt[i, k, :], self.qt[i, l, :]], axis=1)\n', (6358, 6404), True, 'import numpy as np\n'), ((890, 904), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (901, 904), False, 'import torch\n'), ((4151, 4171), 'torch.randn', 'torch.randn', (['N', 'n', '(2)'], {}), '(N, n, 2)\n', (4162, 4171), False, 'import torch\n'), ((5458, 5480), 'torch.tensor', 'torch.tensor', (['[-l / 2]'], {}), '([-l / 2])\n', (5470, 5480), False, 'import torch\n'), ((5481, 5502), 'torch.tensor', 'torch.tensor', (['[l / 2]'], {}), '([l / 2])\n', (5493, 5502), False, 'import torch\n'), ((5568, 5582), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (5579, 5582), False, 'import torch\n'), ((626, 642), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (640, 642), True, 'import numpy as np\n'), ((718, 734), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (732, 734), True, 'import numpy as np\n'), ((6561, 6601), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.G', '"""tether"""'], {}), "(self.G, 'tether')\n", (6583, 6601), True, 'import networkx as nx\n')] |
# -*- coding: utf-8 -*-
from typing import Union, List
import numpy as np
from astartool.common import BIT_EACH
from snowland.gis_tool import NDS_180_DEGREES, NDS_360_DEGREES, NDS_90_DEGREES, RULE_MORTON_TO_LONLAT
npl = np.linalg
npa = np.array
def dms_to_nds(dms: Union[np.ndarray, List, float]):
if isinstance(dms, (np.ndarray, list)):
nds = ((1 << 32) / 360 * npa(dms)).astype(np.uint64)
else:
nds = npa([(1 << 32) / 360 * dms]).astype(np.uint64)
return nds
def nds_to_morton(nds_lon, nds_lat):
if isinstance(nds_lon, (np.uint, int)) and isinstance(nds_lat, (np.uint, int)):
mortonCode = 0
x, y = nds_lon, nds_lat
if y < 0:
y += 0x7FFFFFFF
y <<= 1
for i in range(32):
mortonCode |= x & BIT_EACH[i * 2]
x <<= 1
mortonCode |= y & BIT_EACH[i * 2 + 1]
y <<= 1
return mortonCode
if isinstance(nds_lon, list):
nds_lon = npa(nds_lon)
if isinstance(nds_lat, list):
nds_lat = npa(nds_lat)
assert len(nds_lon) == len(nds_lat)
bit = 1
morton_code = np.zeros_like(nds_lat, dtype=np.uint64)
x, y = nds_lon, nds_lat
y[y < 0] += 0x7FFFFFFF
y <<= 1
for i in range(32):
morton_code |= x & bit
x <<= 1
bit <<= 1
morton_code |= y & bit
y <<= 1
bit <<= 1
return morton_code
def get_tile_id(lon: (np.ndarray, List[float]), lat: (np.ndarray, List[float]), level=13):
"""
获得level层的瓦片
"""
ndsLon = dms_to_nds(lon)
ndsLat = dms_to_nds(lat)
morton = nds_to_morton(ndsLon, ndsLat)
ntile_id = (((morton >> (2 * (31 - level))) & 0xffffffff) + (1 << (16 + level)))
return ntile_id
def get_level_of_tile_id(tile_id: int) -> int:
tId = tile_id >> 16
level = 0
while tId:
tId = tId >> 1
level += 1
return level
def get_left_bottom_of_tile(tile_id, level=None):
if level is None:
level = get_level_of_tile_id(tile_id)
return get_left_bottom_of_tile_with_level(tile_id, level)
def get_lonlat_of_tile(tile_id: int):
coord = get_left_bottom_of_tile(tile_id)
return coord
def get_index_of_tile_id_with_level(tile_id: int, level: int):
offset = 1 << (16 + level)
return tile_id - offset
def normalize_coord(result):
"""纠偏"""
# if x > 180 degrees, then subtract 360 degrees
if result[0] > NDS_180_DEGREES:
result[0] -= NDS_360_DEGREES + 1 # add 1 because 0 must be counted as well
elif result[0] < -NDS_180_DEGREES: # if x < 180 , x += 360
result[0] += NDS_360_DEGREES + 1 # add 1 because 0 must be counted as well
# if y > 90 degrees, then subtract 180 degrees
if result[1] > NDS_90_DEGREES:
result[1] -= NDS_180_DEGREES + 1 # add 1 because 0 must be counted as well
elif result[1] < -NDS_90_DEGREES: # if y < 90, y += 180
result[1] += NDS_180_DEGREES + 1 # add 1 because 0 must be counted as well
return result
def morton_code_to_coord(morton_code_param):
coord = np.zeros(2, dtype=np.uint32)
morton_code = morton_code_param
bit = 1
for i in range(32):
coord[0] = coord[0] | (morton_code & bit)
morton_code = morton_code >> 1
coord[1] = coord[1] | (morton_code & bit)
bit = bit << 1
return coord
def get_left_bottom_of_tile_with_level(tile_id: int, level: int = None):
if level is None:
level = get_level_of_tile_id(tile_id)
indexOfTile = get_index_of_tile_id_with_level(tile_id, level)
mortonCode = indexOfTile << (2 * (31 - level))
coord = morton_code_to_coord(mortonCode)
coord = normalize_coord(coord)
coord += 1
return coord
def get_lon_lat_of_tile(tile_id: int, level: int = None):
if level is None:
level = get_level_of_tile_id(tile_id)
coord = get_left_bottom_of_tile_with_level(tile_id, level)
result = coord * RULE_MORTON_TO_LONLAT
return result
def get_tile_bounding_box(tile_id: int, level=13):
if level is None:
level = get_level_of_tile_id(tile_id)
p = get_left_bottom_of_tile_with_level(tile_id, level)
length = 180 / (1 << level)
return p, length, length
| [
"numpy.zeros_like",
"numpy.zeros"
] | [((1128, 1167), 'numpy.zeros_like', 'np.zeros_like', (['nds_lat'], {'dtype': 'np.uint64'}), '(nds_lat, dtype=np.uint64)\n', (1141, 1167), True, 'import numpy as np\n'), ((3070, 3098), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.uint32'}), '(2, dtype=np.uint32)\n', (3078, 3098), True, 'import numpy as np\n')] |
import pytest
import tempfile
from flaky import flaky
import numpy as np
import deepchem as dc
from deepchem.feat import MolGraphConvFeaturizer
from deepchem.models.tests.test_graph_models import get_dataset
try:
import dgl
import dgllife
import torch
from deepchem.models import GATModel
has_torch_and_dgl = True
except:
has_torch_and_dgl = False
@pytest.mark.torch
def test_gat_regression():
# load datasets
featurizer = MolGraphConvFeaturizer()
tasks, dataset, transformers, metric = get_dataset(
'regression', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = GATModel(
mode='regression',
n_tasks=n_tasks,
number_atom_features=30,
batch_size=10,
learning_rate=0.001)
# overfit test
model.fit(dataset, nb_epoch=500)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.5
# test on a small MoleculeNet dataset
from deepchem.molnet import load_delaney
tasks, all_dataset, transformers = load_delaney(featurizer=featurizer)
train_set, _, _ = all_dataset
model = dc.models.GATModel(
mode='regression',
n_tasks=len(tasks),
graph_attention_layers=[2],
n_attention_heads=1,
residual=False,
predictor_hidden_feats=2)
model.fit(train_set, nb_epoch=1)
@flaky
@pytest.mark.torch
def test_gat_classification():
# load datasets
featurizer = MolGraphConvFeaturizer()
tasks, dataset, transformers, metric = get_dataset(
'classification', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = GATModel(
mode='classification',
n_tasks=n_tasks,
number_atom_features=30,
batch_size=10,
learning_rate=0.001)
# overfit test
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.85
# test on a small MoleculeNet dataset
from deepchem.molnet import load_bace_classification
tasks, all_dataset, transformers = load_bace_classification(
featurizer=featurizer)
train_set, _, _ = all_dataset
model = dc.models.GATModel(
mode='classification',
n_tasks=len(tasks),
graph_attention_layers=[2],
n_attention_heads=1,
residual=False,
predictor_hidden_feats=2)
model.fit(train_set, nb_epoch=1)
@pytest.mark.torch
def test_gat_reload():
# load datasets
featurizer = MolGraphConvFeaturizer()
tasks, dataset, transformers, metric = get_dataset(
'classification', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model_dir = tempfile.mkdtemp()
model = GATModel(
mode='classification',
n_tasks=n_tasks,
number_atom_features=30,
model_dir=model_dir,
batch_size=10,
learning_rate=0.001)
model.fit(dataset, nb_epoch=100)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.85
reloaded_model = GATModel(
mode='classification',
n_tasks=n_tasks,
number_atom_features=30,
model_dir=model_dir,
batch_size=10,
learning_rate=0.001)
reloaded_model.restore()
pred_mols = ["CCCC", "CCCCCO", "CCCCC"]
X_pred = featurizer(pred_mols)
random_dataset = dc.data.NumpyDataset(X_pred)
original_pred = model.predict(random_dataset)
reload_pred = reloaded_model.predict(random_dataset)
assert np.all(original_pred == reload_pred)
| [
"deepchem.models.tests.test_graph_models.get_dataset",
"deepchem.models.GATModel",
"deepchem.molnet.load_bace_classification",
"deepchem.feat.MolGraphConvFeaturizer",
"tempfile.mkdtemp",
"deepchem.data.NumpyDataset",
"deepchem.molnet.load_delaney",
"numpy.all"
] | [((443, 467), 'deepchem.feat.MolGraphConvFeaturizer', 'MolGraphConvFeaturizer', ([], {}), '()\n', (465, 467), False, 'from deepchem.feat import MolGraphConvFeaturizer\n'), ((509, 557), 'deepchem.models.tests.test_graph_models.get_dataset', 'get_dataset', (['"""regression"""'], {'featurizer': 'featurizer'}), "('regression', featurizer=featurizer)\n", (520, 557), False, 'from deepchem.models.tests.test_graph_models import get_dataset\n'), ((621, 730), 'deepchem.models.GATModel', 'GATModel', ([], {'mode': '"""regression"""', 'n_tasks': 'n_tasks', 'number_atom_features': '(30)', 'batch_size': '(10)', 'learning_rate': '(0.001)'}), "(mode='regression', n_tasks=n_tasks, number_atom_features=30,\n batch_size=10, learning_rate=0.001)\n", (629, 730), False, 'from deepchem.models import GATModel\n'), ((1037, 1072), 'deepchem.molnet.load_delaney', 'load_delaney', ([], {'featurizer': 'featurizer'}), '(featurizer=featurizer)\n', (1049, 1072), False, 'from deepchem.molnet import load_delaney\n'), ((1428, 1452), 'deepchem.feat.MolGraphConvFeaturizer', 'MolGraphConvFeaturizer', ([], {}), '()\n', (1450, 1452), False, 'from deepchem.feat import MolGraphConvFeaturizer\n'), ((1494, 1546), 'deepchem.models.tests.test_graph_models.get_dataset', 'get_dataset', (['"""classification"""'], {'featurizer': 'featurizer'}), "('classification', featurizer=featurizer)\n", (1505, 1546), False, 'from deepchem.models.tests.test_graph_models import get_dataset\n'), ((1610, 1723), 'deepchem.models.GATModel', 'GATModel', ([], {'mode': '"""classification"""', 'n_tasks': 'n_tasks', 'number_atom_features': '(30)', 'batch_size': '(10)', 'learning_rate': '(0.001)'}), "(mode='classification', n_tasks=n_tasks, number_atom_features=30,\n batch_size=10, learning_rate=0.001)\n", (1618, 1723), False, 'from deepchem.models import GATModel\n'), ((2043, 2090), 'deepchem.molnet.load_bace_classification', 'load_bace_classification', ([], {'featurizer': 'featurizer'}), '(featurizer=featurizer)\n', (2067, 2090), False, 'from deepchem.molnet import load_bace_classification\n'), ((2442, 2466), 'deepchem.feat.MolGraphConvFeaturizer', 'MolGraphConvFeaturizer', ([], {}), '()\n', (2464, 2466), False, 'from deepchem.feat import MolGraphConvFeaturizer\n'), ((2508, 2560), 'deepchem.models.tests.test_graph_models.get_dataset', 'get_dataset', (['"""classification"""'], {'featurizer': 'featurizer'}), "('classification', featurizer=featurizer)\n", (2519, 2560), False, 'from deepchem.models.tests.test_graph_models import get_dataset\n'), ((2628, 2646), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2644, 2646), False, 'import tempfile\n'), ((2657, 2791), 'deepchem.models.GATModel', 'GATModel', ([], {'mode': '"""classification"""', 'n_tasks': 'n_tasks', 'number_atom_features': '(30)', 'model_dir': 'model_dir', 'batch_size': '(10)', 'learning_rate': '(0.001)'}), "(mode='classification', n_tasks=n_tasks, number_atom_features=30,\n model_dir=model_dir, batch_size=10, learning_rate=0.001)\n", (2665, 2791), False, 'from deepchem.models import GATModel\n'), ((2986, 3120), 'deepchem.models.GATModel', 'GATModel', ([], {'mode': '"""classification"""', 'n_tasks': 'n_tasks', 'number_atom_features': '(30)', 'model_dir': 'model_dir', 'batch_size': '(10)', 'learning_rate': '(0.001)'}), "(mode='classification', n_tasks=n_tasks, number_atom_features=30,\n model_dir=model_dir, batch_size=10, learning_rate=0.001)\n", (2994, 3120), False, 'from deepchem.models import GATModel\n'), ((3276, 3304), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X_pred'], {}), '(X_pred)\n', (3296, 3304), True, 'import deepchem as dc\n'), ((3417, 3453), 'numpy.all', 'np.all', (['(original_pred == reload_pred)'], {}), '(original_pred == reload_pred)\n', (3423, 3453), True, 'import numpy as np\n')] |
import numpy as np
import mp2 as mp2
import hartree_fock as hf
import noble_gas_model as noble_gas_model
if __name__ == "__main__":
NobleGasModel = noble_gas_model.NobleGasModel()
atomic_coordinates = np.array([[0.0,0.0,0.0], [3.0,4.0,5.0]])
mp2_instance = mp2.MP2(NobleGasModel,atomic_coordinates)
mp2_instance.density_matrix = mp2_instance.calculate_atomic_density_matrix(NobleGasModel)
mp2_instance.density_matrix, mp2_instance.fock_matrix = mp2_instance.scf_cycle(NobleGasModel)
mp2_instance.energy_scf = mp2_instance.calculate_energy_scf()
mp2_instance.energy_ion = mp2_instance.calculate_energy_ion(NobleGasModel)
print(F'The SCF energy is {mp2_instance.energy_scf} and the ion energy is {mp2_instance.energy_ion} ')
mp2_instance.mp2_energy = mp2_instance.calculate_energy_mp2()
print(F'The MP2 energy is {mp2_instance.mp2_energy}')
| [
"numpy.array",
"noble_gas_model.NobleGasModel",
"mp2.MP2"
] | [((153, 184), 'noble_gas_model.NobleGasModel', 'noble_gas_model.NobleGasModel', ([], {}), '()\n', (182, 184), True, 'import noble_gas_model as noble_gas_model\n'), ((210, 254), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [3.0, 4.0, 5.0]]'], {}), '([[0.0, 0.0, 0.0], [3.0, 4.0, 5.0]])\n', (218, 254), True, 'import numpy as np\n'), ((270, 312), 'mp2.MP2', 'mp2.MP2', (['NobleGasModel', 'atomic_coordinates'], {}), '(NobleGasModel, atomic_coordinates)\n', (277, 312), True, 'import mp2 as mp2\n')] |
import cv2
import numpy as np
import argparse
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("images_path", help="Path to images.")
args = parser.parse_args()
images_path = args.images_path
print("Images Path: {0}".format(images_path))
background = cv2.imread(os.path.join(images_path,"background.png"),cv2.IMREAD_GRAYSCALE)
cv2.imshow('background',background)
cv2.waitKey(0)
cv2.destroyWindow('background')
fish01 = cv2.imread(os.path.join(images_path,"fish01.png"),cv2.IMREAD_GRAYSCALE)
foreground = cv2.subtract(background,fish01)
cv2.imshow('foreground',foreground)
cv2.waitKey(0)
cv2.destroyWindow('foreground')
(retval,threshold) = cv2.threshold(foreground,25,255,cv2.THRESH_BINARY)
cv2.imshow('threshold',threshold)
cv2.waitKey(0)
cv2.destroyWindow('threshold')
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(threshold,kernel,iterations = 1)
cv2.imshow('erosion',erosion)
cv2.waitKey(0)
cv2.destroyWindow('erosion')
| [
"cv2.subtract",
"argparse.ArgumentParser",
"cv2.waitKey",
"cv2.threshold",
"numpy.ones",
"cv2.destroyWindow",
"cv2.erode",
"cv2.imshow",
"os.path.join"
] | [((98, 123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (121, 123), False, 'import argparse\n'), ((402, 438), 'cv2.imshow', 'cv2.imshow', (['"""background"""', 'background'], {}), "('background', background)\n", (412, 438), False, 'import cv2\n'), ((442, 456), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (453, 456), False, 'import cv2\n'), ((461, 492), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""background"""'], {}), "('background')\n", (478, 492), False, 'import cv2\n'), ((597, 629), 'cv2.subtract', 'cv2.subtract', (['background', 'fish01'], {}), '(background, fish01)\n', (609, 629), False, 'import cv2\n'), ((633, 669), 'cv2.imshow', 'cv2.imshow', (['"""foreground"""', 'foreground'], {}), "('foreground', foreground)\n", (643, 669), False, 'import cv2\n'), ((673, 687), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (684, 687), False, 'import cv2\n'), ((692, 723), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""foreground"""'], {}), "('foreground')\n", (709, 723), False, 'import cv2\n'), ((750, 803), 'cv2.threshold', 'cv2.threshold', (['foreground', '(25)', '(255)', 'cv2.THRESH_BINARY'], {}), '(foreground, 25, 255, cv2.THRESH_BINARY)\n', (763, 803), False, 'import cv2\n'), ((805, 839), 'cv2.imshow', 'cv2.imshow', (['"""threshold"""', 'threshold'], {}), "('threshold', threshold)\n", (815, 839), False, 'import cv2\n'), ((843, 857), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (854, 857), False, 'import cv2\n'), ((862, 892), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""threshold"""'], {}), "('threshold')\n", (879, 892), False, 'import cv2\n'), ((907, 932), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (914, 932), True, 'import numpy as np\n'), ((945, 987), 'cv2.erode', 'cv2.erode', (['threshold', 'kernel'], {'iterations': '(1)'}), '(threshold, kernel, iterations=1)\n', (954, 987), False, 'import cv2\n'), ((992, 1022), 'cv2.imshow', 'cv2.imshow', (['"""erosion"""', 'erosion'], {}), "('erosion', erosion)\n", (1002, 1022), False, 'import cv2\n'), ((1026, 1040), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1037, 1040), False, 'import cv2\n'), ((1045, 1073), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""erosion"""'], {}), "('erosion')\n", (1062, 1073), False, 'import cv2\n'), ((333, 376), 'os.path.join', 'os.path.join', (['images_path', '"""background.png"""'], {}), "(images_path, 'background.png')\n", (345, 376), False, 'import os\n'), ((518, 557), 'os.path.join', 'os.path.join', (['images_path', '"""fish01.png"""'], {}), "(images_path, 'fish01.png')\n", (530, 557), False, 'import os\n')] |
import sys
import itk
import numpy as np
import torch
from collections import OrderedDict
class SegmentationPostProcessing():
"""
Post-processing callable that remembers some intermediate steps.
Given a binary lung segmentation (with labels 0=background and 1=lung) this will return a segmentation
that picks out a left and a right lung (left=1, right=2), and that guarantees each lung to be a simply connected region
(i.e. both connected and containing no holes).
Note that "left" and "right" refer to the left and right sides of the _image_, not necessarily of the patient!
To get patient left/right correctly, you would need to involve the PA/AP orientation information associated to the xray.
"""
def __init__(self):
self.intermediate_steps = OrderedDict()
def log_intermediate_step(self, step_name, step_description, step_artifact):
self.intermediate_steps[step_name] = {
"description" : step_description,
"artifact" : step_artifact,
}
def __call__(self, seg_tensor: torch.Tensor):
"""
seg_tensor should be a pytorch tensor of shape (H,W), a binary image label map with; labels 0 and 1.
Returns a processed version of seg_tensor, while logging some intermediate steps in case they need to be inspected.
"""
# A dict mapping each step name to a corresponding step artifact
# to log intermediate steps of computation
self.intermediate_steps = OrderedDict()
if (not len(seg_tensor.shape)==2):
raise ValueError("Expected 2D image, i.e. tensor of shape (H,W).")
# Convert tensor to ITK image
seg_itk = itk.image_from_array(seg_tensor.numpy().astype(np.uint8))
# Compute connected components from binary label map
seg_connected = itk.ConnectedComponentImageFilter(seg_itk)
# save copy to allow inspecting later
self.log_intermediate_step(
"connected_components", "Connected components of segmentation",
itk.array_from_image(seg_connected)
)
# Construct a list of pairs (label, size) consisting of the label assigned to each connected
# component followed by the size of that component. The label 0 is excluded because it
# stands for background, and the itk connected components filter should preserve that label.
label_size_pairs = [(l,(seg_connected==l).sum()) for l in np.unique(seg_connected) if l!=0]
# sort by region size, descending
label_size_pairs = sorted(label_size_pairs, key = lambda pair : pair[1], reverse=True)
if len(label_size_pairs) < 2:
raise Exception("Invalid segmentation mask; fewer than two components detected. (Expected left lung and right lung)")
if (label_size_pairs[0][1]/label_size_pairs[1][1] > 2.):
print("Something may be wrong: one lung segment (left or right) seems to be much larger than the other",file=sys.stderr)
# the top two labels in terms of region size
largest_two_labels = [pair[0].item() for pair in label_size_pairs[:2]]
# Use ITK to compute shape attributes
label_map = itk.LabelImageToShapeLabelMapFilter(seg_connected.astype(itk.UC))
# Get the centroid of each of the largest two regions
centroids = np.array([label_map.GetLabelObject(l).GetCentroid() for l in largest_two_labels])
# This must be true because we raise exception when largest_two_labels is too short of a list,
# and because the input image was a 2D image.
# centroids[i,j] is the j^th coordinate of the i^th label
assert(centroids.shape==(2,2))
self.log_intermediate_step(
"centroids", "Centroids of the two largest connected components of the segmentation",
centroids
)
# Use centroid x coordinate to determine indices of largest_two_labels that correspond
# to left and right lungs, and validate that the x coordinates are reasonable
left_lung_index = centroids[:,0].argmin()
right_lung_index = 0 if left_lung_index==1 else 1
lung_indices = [left_lung_index, right_lung_index]
x_total = seg_connected.shape[0]
left_lung_x_proportion, right_lung_x_proportion = centroids[lung_indices,0] / x_total
if not (left_lung_x_proportion > 0. and left_lung_x_proportion < 0.5 and
right_lung_x_proportion > 0.5 and right_lung_x_proportion < 1.0):
print("Something may be wrong: left and right lung segments ended up not reasonably positioned",file=sys.stderr)
left_lung_label, right_lung_label = np.array(largest_two_labels)[lung_indices]
# Construct lung mask with left and right labels
lr_lung_seg = np.zeros_like(seg_itk)
lr_lung_seg[seg_connected==left_lung_label] = 1
lr_lung_seg[seg_connected==right_lung_label] = 2
self.log_intermediate_step(
"unfilled_lung_segmentation",
"Lung segmentation after identifying left vs right lung, but before filling any holes",
np.copy(lr_lung_seg)
)
# Fill holes in each label
lr_lung_seg = itk.image_from_array(lr_lung_seg.astype(np.uint8))
lr_lung_seg = itk.BinaryFillholeImageFilter(lr_lung_seg, ForegroundValue=1)
lr_lung_seg = itk.BinaryFillholeImageFilter(lr_lung_seg, ForegroundValue=2)
lr_lung_seg = itk.array_from_image(lr_lung_seg)
self.log_intermediate_step(
"filled_lung_segmentation",
"Lung segmentation after identifying left vs right lung and filling any holes in them",
np.copy(lr_lung_seg)
)
return lr_lung_seg
| [
"itk.ConnectedComponentImageFilter",
"numpy.zeros_like",
"numpy.copy",
"itk.BinaryFillholeImageFilter",
"itk.array_from_image",
"numpy.array",
"collections.OrderedDict",
"numpy.unique"
] | [((771, 784), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (782, 784), False, 'from collections import OrderedDict\n'), ((1425, 1438), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1436, 1438), False, 'from collections import OrderedDict\n'), ((1737, 1779), 'itk.ConnectedComponentImageFilter', 'itk.ConnectedComponentImageFilter', (['seg_itk'], {}), '(seg_itk)\n', (1770, 1779), False, 'import itk\n'), ((4514, 4536), 'numpy.zeros_like', 'np.zeros_like', (['seg_itk'], {}), '(seg_itk)\n', (4527, 4536), True, 'import numpy as np\n'), ((4956, 5017), 'itk.BinaryFillholeImageFilter', 'itk.BinaryFillholeImageFilter', (['lr_lung_seg'], {'ForegroundValue': '(1)'}), '(lr_lung_seg, ForegroundValue=1)\n', (4985, 5017), False, 'import itk\n'), ((5036, 5097), 'itk.BinaryFillholeImageFilter', 'itk.BinaryFillholeImageFilter', (['lr_lung_seg'], {'ForegroundValue': '(2)'}), '(lr_lung_seg, ForegroundValue=2)\n', (5065, 5097), False, 'import itk\n'), ((5116, 5149), 'itk.array_from_image', 'itk.array_from_image', (['lr_lung_seg'], {}), '(lr_lung_seg)\n', (5136, 5149), False, 'import itk\n'), ((1931, 1966), 'itk.array_from_image', 'itk.array_from_image', (['seg_connected'], {}), '(seg_connected)\n', (1951, 1966), False, 'import itk\n'), ((4399, 4427), 'numpy.array', 'np.array', (['largest_two_labels'], {}), '(largest_two_labels)\n', (4407, 4427), True, 'import numpy as np\n'), ((4810, 4830), 'numpy.copy', 'np.copy', (['lr_lung_seg'], {}), '(lr_lung_seg)\n', (4817, 4830), True, 'import numpy as np\n'), ((5316, 5336), 'numpy.copy', 'np.copy', (['lr_lung_seg'], {}), '(lr_lung_seg)\n', (5323, 5336), True, 'import numpy as np\n'), ((2321, 2345), 'numpy.unique', 'np.unique', (['seg_connected'], {}), '(seg_connected)\n', (2330, 2345), True, 'import numpy as np\n')] |
import numpy
n = int(input())
a,b = ( numpy.array([input().split() for _ in range(n)], int) for _ in range(2) )
print(numpy.dot(a,b))
#https://www.hackerrank.com/challenges/np-dot-and-cross/problem | [
"numpy.dot"
] | [((119, 134), 'numpy.dot', 'numpy.dot', (['a', 'b'], {}), '(a, b)\n', (128, 134), False, 'import numpy\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Calculate laplacian matrix, used to network weight.
Evaluate the performance of net work.
"""
import numpy as np
import mindspore.ops as ops
from scipy.linalg import fractional_matrix_power
from scipy.sparse.linalg import eigs
def calculate_laplacian_matrix(adj_mat, mat_type):
"""
calculate laplacian matrix used for graph convolution layer.
"""
n_vertex = adj_mat.shape[0]
# row sum
deg_mat_row = np.asmatrix(np.diag(np.sum(adj_mat, axis=1)))
# column sum
#deg_mat_col = np.asmatrix(np.diag(np.sum(adj_mat, axis=0)))
deg_mat = deg_mat_row
adj_mat = np.asmatrix(adj_mat)
id_mat = np.asmatrix(np.identity(n_vertex))
# Combinatorial
com_lap_mat = deg_mat - adj_mat
# For SpectraConv
# To [0, 1]
sym_normd_lap_mat = np.matmul(np.matmul(fractional_matrix_power(deg_mat, -0.5), \
com_lap_mat), fractional_matrix_power(deg_mat, -0.5))
# For ChebConv
# From [0, 1] to [-1, 1]
lambda_max_sym = eigs(sym_normd_lap_mat, k=1, which='LR')[0][0].real
wid_sym_normd_lap_mat = 2 * sym_normd_lap_mat / lambda_max_sym - id_mat
# For GCNConv
wid_deg_mat = deg_mat + id_mat
wid_adj_mat = adj_mat + id_mat
hat_sym_normd_lap_mat = np.matmul(np.matmul(fractional_matrix_power(wid_deg_mat, -0.5), \
wid_adj_mat), fractional_matrix_power(wid_deg_mat, -0.5))
# Random Walk
rw_lap_mat = np.matmul(np.linalg.matrix_power(deg_mat, -1), adj_mat)
# For SpectraConv
# To [0, 1]
rw_normd_lap_mat = id_mat - rw_lap_mat
# For ChebConv
# From [0, 1] to [-1, 1]
lambda_max_rw = eigs(rw_lap_mat, k=1, which='LR')[0][0].real
wid_rw_normd_lap_mat = 2 * rw_normd_lap_mat / lambda_max_rw - id_mat
# For GCNConv
wid_deg_mat = deg_mat + id_mat
wid_adj_mat = adj_mat + id_mat
hat_rw_normd_lap_mat = np.matmul(np.linalg.matrix_power(wid_deg_mat, -1), wid_adj_mat)
if mat_type == 'wid_sym_normd_lap_mat':
return wid_sym_normd_lap_mat
if mat_type == 'hat_sym_normd_lap_mat':
return hat_sym_normd_lap_mat
if mat_type == 'wid_rw_normd_lap_mat':
return wid_rw_normd_lap_mat
if mat_type == 'hat_rw_normd_lap_mat':
return hat_rw_normd_lap_mat
raise ValueError(f'ERROR: "{mat_type}" is unknown.')
def evaluate_metric(model, dataset, scaler):
"""
evaluate the performance of network.
"""
mae, sum_y, mape, mse = [], [], [], []
for data in dataset.create_dict_iterator():
x = data['inputs']
y = data['labels']
y_pred = model(x)
y_pred = ops.Reshape()(y_pred, (len(y_pred), -1))
y_pred = scaler.inverse_transform(y_pred.asnumpy()).reshape(-1)
y = scaler.inverse_transform(y.asnumpy()).reshape(-1)
d = np.abs(y - y_pred)
mae += d.tolist()
sum_y += y.tolist()
mape += (d / y).tolist()
mse += (d ** 2).tolist()
MAE = np.array(mae).mean()
MAPE = np.array(mape).mean()
RMSE = np.sqrt(np.array(mse).mean())
#WMAPE = np.sum(np.array(mae)) / np.sum(np.array(sum_y))
return MAE, RMSE, MAPE
| [
"numpy.abs",
"numpy.sum",
"mindspore.ops.Reshape",
"scipy.linalg.fractional_matrix_power",
"numpy.identity",
"numpy.linalg.matrix_power",
"numpy.asmatrix",
"numpy.array",
"scipy.sparse.linalg.eigs"
] | [((1267, 1287), 'numpy.asmatrix', 'np.asmatrix', (['adj_mat'], {}), '(adj_mat)\n', (1278, 1287), True, 'import numpy as np\n'), ((1313, 1334), 'numpy.identity', 'np.identity', (['n_vertex'], {}), '(n_vertex)\n', (1324, 1334), True, 'import numpy as np\n'), ((1537, 1575), 'scipy.linalg.fractional_matrix_power', 'fractional_matrix_power', (['deg_mat', '(-0.5)'], {}), '(deg_mat, -0.5)\n', (1560, 1575), False, 'from scipy.linalg import fractional_matrix_power\n'), ((1977, 2019), 'scipy.linalg.fractional_matrix_power', 'fractional_matrix_power', (['wid_deg_mat', '(-0.5)'], {}), '(wid_deg_mat, -0.5)\n', (2000, 2019), False, 'from scipy.linalg import fractional_matrix_power\n'), ((2067, 2102), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['deg_mat', '(-1)'], {}), '(deg_mat, -1)\n', (2089, 2102), True, 'import numpy as np\n'), ((2508, 2547), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['wid_deg_mat', '(-1)'], {}), '(wid_deg_mat, -1)\n', (2530, 2547), True, 'import numpy as np\n'), ((3418, 3436), 'numpy.abs', 'np.abs', (['(y - y_pred)'], {}), '(y - y_pred)\n', (3424, 3436), True, 'import numpy as np\n'), ((1118, 1141), 'numpy.sum', 'np.sum', (['adj_mat'], {'axis': '(1)'}), '(adj_mat, axis=1)\n', (1124, 1141), True, 'import numpy as np\n'), ((1476, 1514), 'scipy.linalg.fractional_matrix_power', 'fractional_matrix_power', (['deg_mat', '(-0.5)'], {}), '(deg_mat, -0.5)\n', (1499, 1514), False, 'from scipy.linalg import fractional_matrix_power\n'), ((1912, 1954), 'scipy.linalg.fractional_matrix_power', 'fractional_matrix_power', (['wid_deg_mat', '(-0.5)'], {}), '(wid_deg_mat, -0.5)\n', (1935, 1954), False, 'from scipy.linalg import fractional_matrix_power\n'), ((3231, 3244), 'mindspore.ops.Reshape', 'ops.Reshape', ([], {}), '()\n', (3242, 3244), True, 'import mindspore.ops as ops\n'), ((3567, 3580), 'numpy.array', 'np.array', (['mae'], {}), '(mae)\n', (3575, 3580), True, 'import numpy as np\n'), ((3599, 3613), 'numpy.array', 'np.array', (['mape'], {}), '(mape)\n', (3607, 3613), True, 'import numpy as np\n'), ((1647, 1687), 'scipy.sparse.linalg.eigs', 'eigs', (['sym_normd_lap_mat'], {'k': '(1)', 'which': '"""LR"""'}), "(sym_normd_lap_mat, k=1, which='LR')\n", (1651, 1687), False, 'from scipy.sparse.linalg import eigs\n'), ((2264, 2297), 'scipy.sparse.linalg.eigs', 'eigs', (['rw_lap_mat'], {'k': '(1)', 'which': '"""LR"""'}), "(rw_lap_mat, k=1, which='LR')\n", (2268, 2297), False, 'from scipy.sparse.linalg import eigs\n'), ((3640, 3653), 'numpy.array', 'np.array', (['mse'], {}), '(mse)\n', (3648, 3653), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 <NAME> (<EMAIL>)
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import sys
import tensorflow as tf
import numpy as np
import random
label_one_hot = {'Iris-setosa': [1,0,0], 'Iris-versicolor': [0,1,0], 'Iris-virginica': [0,0,1]}
result_2_label = {0: 'no idea', 1: 'Iris-setosa', 2: 'Iris-versicolor', 3: 'Iris-virginica'}
def get_data_from_csv(filename):
data = [ ]
labels = [ ]
for line in file(filename):
row = line.split(",")
temp = [ ]
for x in range(0,4):
temp.append(float(row[x]))
label = row[4]
label = label.replace('\n', '')
if (label in label_one_hot):
labels.append(label_one_hot[label])
data.append(temp)
return data, labels
print ('loading and extracting data ...'),
x_input, y_input = get_data_from_csv('data/iris.data.csv')
print ('done.')
print ('initializing placeholders and variables ...'),
# placeholders and variables
x=tf.placeholder(tf.float32,shape=[None,4]) # input
y_=tf.placeholder(tf.float32,shape=[None, 3]) # output
# weight and bias
W=tf.Variable(tf.zeros([4,3]))
b=tf.Variable(tf.zeros([3]))
# softmax classification
y = tf.nn.softmax(tf.matmul(x, W) + b)
# loss function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices = [1]))
# optimizer
train_step = tf.train.AdamOptimizer(0.05).minimize(cross_entropy)
# accuracy
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print ('done.')
print ('initialing session...'),
# session parameters
session = tf.InteractiveSession()
# initialising variables
init = tf.initialize_all_variables()
session.run(init)
print ('done.')
print ('learning ...')
# number of interations
epoch=100
for step in xrange(epoch):
_, c = session.run([train_step, cross_entropy], feed_dict={x: x_input, y_: y_input})
sys.stdout.write("\r%d" % step)
sys.stdout.flush()
print ('\ndone.')
print ('running test set...')
for test_run in range(0,10):
sepal_length = random.uniform( 4.0, 8.0 )
sepal_width = random.uniform( 2.0, 5.0 )
petal_length = random.uniform( 1.0, 6.0 )
petal_width = random.uniform( 0.1, 3.0 )
test_arr = np.asarray([ sepal_length, sepal_width, petal_length, petal_width ])
test_set = test_arr.reshape(1,4)
probabilities = y.eval(feed_dict={x: test_set}, session=session)
predictions = session.run(tf.arg_max(y, 1), feed_dict={x: test_set})
print ('test run [' + str(test_run) +'] with a generated test set ' + str(test_arr) + ' we got: ')
print ('probabilities: ' + str(probabilities))
print ('prediction: ' + str(predictions[0]) + ' == ' + result_2_label[predictions[0]+1])
print (' ')
session.close()
| [
"sys.stdout.write",
"random.uniform",
"tensorflow.argmax",
"numpy.asarray",
"tensorflow.arg_max",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.zeros",
"sys.stdout.flush",
"tensorflow.matmul",
"tensorflow.initialize_all_variables",
"tensorflow.log",
"tensorflow.InteractiveSession"... | [((1492, 1535), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 4]'}), '(tf.float32, shape=[None, 4])\n', (1506, 1535), True, 'import tensorflow as tf\n'), ((1545, 1588), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 3]'}), '(tf.float32, shape=[None, 3])\n', (1559, 1588), True, 'import tensorflow as tf\n'), ((2149, 2172), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (2170, 2172), True, 'import tensorflow as tf\n'), ((2206, 2235), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2233, 2235), True, 'import tensorflow as tf\n'), ((1630, 1646), 'tensorflow.zeros', 'tf.zeros', (['[4, 3]'], {}), '([4, 3])\n', (1638, 1646), True, 'import tensorflow as tf\n'), ((1661, 1674), 'tensorflow.zeros', 'tf.zeros', (['[3]'], {}), '([3])\n', (1669, 1674), True, 'import tensorflow as tf\n'), ((1968, 1983), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (1977, 1983), True, 'import tensorflow as tf\n'), ((1984, 2000), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (1993, 2000), True, 'import tensorflow as tf\n'), ((2027, 2066), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2034, 2066), True, 'import tensorflow as tf\n'), ((2449, 2480), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%d' % step)"], {}), "('\\r%d' % step)\n", (2465, 2480), False, 'import sys\n'), ((2485, 2503), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2501, 2503), False, 'import sys\n'), ((2602, 2626), 'random.uniform', 'random.uniform', (['(4.0)', '(8.0)'], {}), '(4.0, 8.0)\n', (2616, 2626), False, 'import random\n'), ((2647, 2671), 'random.uniform', 'random.uniform', (['(2.0)', '(5.0)'], {}), '(2.0, 5.0)\n', (2661, 2671), False, 'import random\n'), ((2693, 2717), 'random.uniform', 'random.uniform', (['(1.0)', '(6.0)'], {}), '(1.0, 6.0)\n', (2707, 2717), False, 'import random\n'), ((2738, 2762), 'random.uniform', 'random.uniform', (['(0.1)', '(3.0)'], {}), '(0.1, 3.0)\n', (2752, 2762), False, 'import random\n'), ((2781, 2847), 'numpy.asarray', 'np.asarray', (['[sepal_length, sepal_width, petal_length, petal_width]'], {}), '([sepal_length, sepal_width, petal_length, petal_width])\n', (2791, 2847), True, 'import numpy as np\n'), ((1720, 1735), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (1729, 1735), True, 'import tensorflow as tf\n'), ((1872, 1900), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.05)'], {}), '(0.05)\n', (1894, 1900), True, 'import tensorflow as tf\n'), ((2987, 3003), 'tensorflow.arg_max', 'tf.arg_max', (['y', '(1)'], {}), '(y, 1)\n', (2997, 3003), True, 'import tensorflow as tf\n'), ((1809, 1818), 'tensorflow.log', 'tf.log', (['y'], {}), '(y)\n', (1815, 1818), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
"""Compute vanishing points from images.
Usage:
demo.py [options] <yaml-config> <checkpoint> <image>
demo.py ( -h | --help )
Arguments:
<yaml-config> Path to the yaml hyper-parameter file
<checkpoint> Path to the checkpoint
<image> Path to an image
Options:
-h --help Show this screen
-d --devices <devices> Comma seperated GPU devices [default: 0]
-o --output <output> Path to the output AA curve [default: error.npz]
--dump <output-dir> Optionally, save the vanishing points to npz format.
"""
import os
import sys
import math
import shlex
import pprint
import random
import os.path as osp
import threading
import subprocess
import time
import torch
import matplotlib as mpl
import skimage.io
import numpy as np
import numpy.linalg as LA
import scipy.spatial.distance as scipy_spatial_dist
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from tqdm import tqdm
from docopt import docopt
import scipy.io as sio
import scipy.optimize
import sklearn.metrics
import scipy.sparse
from sklearn.metrics import pairwise_distances
from sklearn.cluster import DBSCAN
import vpd
import vpd.models.vanishing_net as vn
from vpd.config import C, M
from vpd.datasets import ScanNetDataset, WireframeDataset, YUDDataset, NYUDataset
from vpd.models.sphere.sphere_utils import gold_spiral_sampling_patch, catersian_to_sphere
def topk_orthogonal_vps(scores, xyz, num_vps=3):
index = np.argsort(-scores)
vps_idx = [index[0]]
for i in index[1:]:
if len(vps_idx) == num_vps:
break
# cos_distance function: input: x: mxp, y: nxp; output: y, mxn
### scipy: same 0, opposite 2, orthorgonal 1, dist = 1-AB/(|A||B|)
dist_cos = scipy_spatial_dist.cdist(xyz[vps_idx], xyz[i][None, :], 'cosine')
### same 1, opposite -1, orthorgonal 0
dist_cos = np.abs(-1.0*dist_cos+1.0)
dist_cos_arc = np.min(np.arccos(dist_cos))
if dist_cos_arc >= np.pi/num_vps:
vps_idx.append(i)
else:
continue
vps_pd = xyz[vps_idx]
return vps_pd, vps_idx
def vps_clustering(vps_prob, xyz, threshold):
inds = np.flatnonzero(vps_prob >= threshold)
vps = xyz[inds, :]
dis = vps @ np.transpose(vps)
dis = np.clip(dis, a_min=-1., a_max=1.) ### same=1, opposite=-1, orthogonal=0
dis = 1.0 - np.abs(dis) ### same/opposite =0, orthogonal = 1
dis_sparse = scipy.sparse.csr_matrix(dis)
clusterer = DBSCAN(eps=0.005, min_samples=9, metric='precomputed').fit(dis_sparse)
labels = clusterer.labels_
# print('clusters', type(clusters), clusters.shape, np.unique(clusters))
if labels.min()<=0: labels += (np.abs(labels.min())+1) ### the labels from DBSCAN can be negtive (zeros) sometimes
vps_pd=[]
for label in np.unique(labels):
inds_cluster = inds[labels==label]
vp_max, vp_argmax = np.max(vps_prob[inds_cluster]), np.argmax(vps_prob[inds_cluster])
vps_pd.append(np.array([inds_cluster[vp_argmax], vp_max]))
# print('vps_pd', inds_cluster[vp_argmax], vp_max, len(inds_cluster))
vps_pd = np.vstack(vps_pd)
arg_prob = np.argsort(vps_pd[:, 1])[::-1]
vps_pd_sort = vps_pd[arg_prob, 0].astype(int)
# # # cluster labels for each spherical point
vps_cluster = np.zeros(vps_prob.shape)
vps_cluster[inds] = labels
return xyz[vps_pd_sort], vps_cluster.astype(int)
def to_pixel(vpts, focal_length=1.0, h=480, w=640):
x = vpts[:,0] / vpts[:, 2] * focal_length * max(h, w)/2.0 + w//2
y = -vpts[:,1] / vpts[:, 2] * focal_length * max(h, w)/2.0 + h//2
return y, x
def main():
args = docopt(__doc__)
config_file = args["<yaml-config>"]
C.update(C.from_yaml(filename=config_file))
C.model.im2col_step = 32 # override im2col_step for evaluation
M.update(C.model)
pprint.pprint(C, indent=4)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
# # # save plots for visualization
# os.environ['QT_QPA_PLATFORM']='offscreen'
device_name = "cpu"
num_gpus = args["--devices"].count(",") + 1
os.environ["CUDA_VISIBLE_DEVICES"] = args["--devices"]
if torch.cuda.is_available():
device_name = "cuda"
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(0)
print("Let's use", torch.cuda.device_count(), "GPU(s)!")
for k in range(0, torch.cuda.device_count()):
print('kth, device name', k, torch.cuda.get_device_name(k))
else:
print("CUDA is not available")
device = torch.device(device_name)
npzfile = np.load(C.io.ht_mapping, allow_pickle=True)
ht_mapping = npzfile['ht_mapping']
ht_mapping[:,2] = npzfile['rho_res'].item() - np.abs(ht_mapping[:,2])
ht_mapping[:,2] /= npzfile['rho_res'].item()
vote_ht_dict={}
vote_ht_dict["vote_mapping"]= torch.tensor(ht_mapping, requires_grad=False).float().contiguous()
vote_ht_dict["im_size"]= (npzfile['rows'], npzfile['cols'])
vote_ht_dict["ht_size"]= (npzfile['h'], npzfile['w'])
print('vote_ht_dict memory MB', vote_ht_dict["vote_mapping"].size(),
vote_ht_dict["vote_mapping"].element_size() * vote_ht_dict["vote_mapping"].nelement() / (1024 * 1024))
npzfile = np.load(C.io.sphere_mapping, allow_pickle=True)
sphere_neighbors = npzfile['sphere_neighbors']
vote_sphere_dict={}
vote_sphere_dict["vote_mapping"]=torch.tensor(sphere_neighbors, requires_grad=False).float().contiguous()
vote_sphere_dict["ht_size"]=(npzfile['h'], npzfile['w'])
vote_sphere_dict["sphere_size"]=npzfile['num_points']
print('vote_sphere_dict memory MB', vote_sphere_dict["sphere_size"], vote_sphere_dict["vote_mapping"].size(),
vote_sphere_dict["vote_mapping"].element_size() * vote_sphere_dict["vote_mapping"].nelement() / (1024 * 1024))
# 2. model
if M.backbone == "stacked_hourglass":
backbone = vpd.models.hg(
planes=128, depth=M.depth, num_stacks=M.num_stacks, num_blocks=M.num_blocks
)
else:
raise NotImplementedError
model = vpd.models.VanishingNet(backbone, vote_ht_dict, vote_sphere_dict)
model = model.to(device)
model = torch.nn.DataParallel(
model, device_ids=list(range(args["--devices"].count(",") + 1))
)
if args["<checkpoint>"]:
print('args["<checkpoint>"]', args["<checkpoint>"])
checkpoint = torch.load(args["<checkpoint>"], map_location=lambda storage, loc: storage)
print('checkpoint', checkpoint["iteration"], checkpoint["epoch"])
# print('checkpoint', checkpoint["iteration"])
model.load_state_dict(checkpoint["model_state_dict"])
model.eval()
# print('model', model)
##### number of parameters in a model
total_params = sum(p.numel() for p in model.parameters())
##### number of trainable parameters in a model
train_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('num of total parameters', total_params)
print('num of trainable parameters', train_params)
xyz = gold_spiral_sampling_patch(np.array([0, 0, 1]), alpha=90.0 * np.pi / 180., num_pts=C.io.num_nodes)
angles = catersian_to_sphere(xyz)
if args["--dump"] is not None:
os.makedirs(args["--dump"], exist_ok=True)
# demo: use pretrained models on NYU to predict VPs from a given image
print('processing: ', args["<image>"])
image_name = args["<image>"]
image = skimage.io.imread(image_name)
if image.shape[0:2]!=tuple([480, 640]):
print("warning: images resized to [480, 640]!")
image = skimage.transform.resize(image, (480,640))
image *= 255.0
image = np.rollaxis(image, 2).copy()
image = torch.from_numpy(image).float().to(device)
targets = torch.zeros(C.io.num_nodes).float().to(device)
input_dict = {"image": image[None], "target": targets, "eval": True}
with torch.no_grad():
result = model(input_dict)
pred = result["prediction"].cpu().numpy()[0]
# Option 1:
# a. f available: first map to camera space, and then pick up the top3;
# b. Assumption: VPs are more or less equally spread over the sphere.
# vpts_pd, vpts_idx = topk_orthogonal_vps(pred, xyz, num_vps=3)
# Option 2 - unknown f: Use clustering to detect multiple VPs
vpts_pd, vpts_idx = vps_clustering(pred, xyz, threshold=0.5)
angles_pd = catersian_to_sphere(vpts_pd)
# You might want to resize VPs from [480, 640] to original size [img_h, img_w].
ys, xs = to_pixel(vpts_pd, focal_length=1.0, h=480, w=640)
### save predictions,
image = image.permute(1,2,0).cpu().numpy()
if args["--dump"]:
np.savez(
os.path.join(args["--dump"], image_name.replace(".jpg", ".npz")),
image = image,
vpts_pd=vpts_pd,
vpts_sphere=pred,
)
### visualize results on the hemisphere
fig = plt.figure()
ax = fig.add_subplot(121)
ax.imshow(image/255.0)
for (x, y) in zip(xs, ys):
ax.scatter(x, y)
ax = fig.add_subplot(122)
ax.scatter(angles[:, 0], angles[:, 1], c=pred)
ax.scatter(angles_pd[:, 0], angles_pd[:, 1], c='r')
ax.set_title('Sphere')
plt.savefig('pred.png', format='png', bbox_inches ='tight', pad_inches = 0.1, transparent=True, dpi=600)
plt.suptitle('VP prediction')
plt.show()
if __name__ == "__main__":
main()
| [
"numpy.load",
"numpy.random.seed",
"numpy.abs",
"docopt.docopt",
"matplotlib.pyplot.suptitle",
"numpy.argmax",
"numpy.clip",
"torch.cuda.device_count",
"numpy.argsort",
"matplotlib.pyplot.figure",
"pprint.pprint",
"torch.device",
"torch.no_grad",
"vpd.config.M.update",
"numpy.unique",
... | [((1539, 1558), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (1549, 1558), True, 'import numpy as np\n'), ((2256, 2293), 'numpy.flatnonzero', 'np.flatnonzero', (['(vps_prob >= threshold)'], {}), '(vps_prob >= threshold)\n', (2270, 2293), True, 'import numpy as np\n'), ((2361, 2396), 'numpy.clip', 'np.clip', (['dis'], {'a_min': '(-1.0)', 'a_max': '(1.0)'}), '(dis, a_min=-1.0, a_max=1.0)\n', (2368, 2396), True, 'import numpy as np\n'), ((2895, 2912), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2904, 2912), True, 'import numpy as np\n'), ((3209, 3226), 'numpy.vstack', 'np.vstack', (['vps_pd'], {}), '(vps_pd)\n', (3218, 3226), True, 'import numpy as np\n'), ((3393, 3417), 'numpy.zeros', 'np.zeros', (['vps_prob.shape'], {}), '(vps_prob.shape)\n', (3401, 3417), True, 'import numpy as np\n'), ((3737, 3752), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (3743, 3752), False, 'from docopt import docopt\n'), ((3913, 3930), 'vpd.config.M.update', 'M.update', (['C.model'], {}), '(C.model)\n', (3921, 3930), False, 'from vpd.config import C, M\n'), ((3935, 3961), 'pprint.pprint', 'pprint.pprint', (['C'], {'indent': '(4)'}), '(C, indent=4)\n', (3948, 3961), False, 'import pprint\n'), ((3967, 3981), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3978, 3981), False, 'import random\n'), ((3986, 4003), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4000, 4003), True, 'import numpy as np\n'), ((4008, 4028), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4025, 4028), False, 'import torch\n'), ((4260, 4285), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4283, 4285), False, 'import torch\n'), ((4653, 4678), 'torch.device', 'torch.device', (['device_name'], {}), '(device_name)\n', (4665, 4678), False, 'import torch\n'), ((4694, 4737), 'numpy.load', 'np.load', (['C.io.ht_mapping'], {'allow_pickle': '(True)'}), '(C.io.ht_mapping, allow_pickle=True)\n', (4701, 4737), True, 'import numpy as np\n'), ((5345, 5392), 'numpy.load', 'np.load', (['C.io.sphere_mapping'], {'allow_pickle': '(True)'}), '(C.io.sphere_mapping, allow_pickle=True)\n', (5352, 5392), True, 'import numpy as np\n'), ((6181, 6246), 'vpd.models.VanishingNet', 'vpd.models.VanishingNet', (['backbone', 'vote_ht_dict', 'vote_sphere_dict'], {}), '(backbone, vote_ht_dict, vote_sphere_dict)\n', (6204, 6246), False, 'import vpd\n'), ((7279, 7303), 'vpd.models.sphere.sphere_utils.catersian_to_sphere', 'catersian_to_sphere', (['xyz'], {}), '(xyz)\n', (7298, 7303), False, 'from vpd.models.sphere.sphere_utils import gold_spiral_sampling_patch, catersian_to_sphere\n'), ((8501, 8529), 'vpd.models.sphere.sphere_utils.catersian_to_sphere', 'catersian_to_sphere', (['vpts_pd'], {}), '(vpts_pd)\n', (8520, 8529), False, 'from vpd.models.sphere.sphere_utils import gold_spiral_sampling_patch, catersian_to_sphere\n'), ((9026, 9038), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9036, 9038), True, 'import matplotlib.pyplot as plt\n'), ((9321, 9426), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pred.png"""'], {'format': '"""png"""', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)', 'transparent': '(True)', 'dpi': '(600)'}), "('pred.png', format='png', bbox_inches='tight', pad_inches=0.1,\n transparent=True, dpi=600)\n", (9332, 9426), True, 'import matplotlib.pyplot as plt\n'), ((9431, 9460), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""VP prediction"""'], {}), "('VP prediction')\n", (9443, 9460), True, 'import matplotlib.pyplot as plt\n'), ((9465, 9475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9473, 9475), True, 'import matplotlib.pyplot as plt\n'), ((1827, 1892), 'scipy.spatial.distance.cdist', 'scipy_spatial_dist.cdist', (['xyz[vps_idx]', 'xyz[i][None, :]', '"""cosine"""'], {}), "(xyz[vps_idx], xyz[i][None, :], 'cosine')\n", (1851, 1892), True, 'import scipy.spatial.distance as scipy_spatial_dist\n'), ((1959, 1988), 'numpy.abs', 'np.abs', (['(-1.0 * dist_cos + 1.0)'], {}), '(-1.0 * dist_cos + 1.0)\n', (1965, 1988), True, 'import numpy as np\n'), ((2333, 2350), 'numpy.transpose', 'np.transpose', (['vps'], {}), '(vps)\n', (2345, 2350), True, 'import numpy as np\n'), ((2450, 2461), 'numpy.abs', 'np.abs', (['dis'], {}), '(dis)\n', (2456, 2461), True, 'import numpy as np\n'), ((3243, 3267), 'numpy.argsort', 'np.argsort', (['vps_pd[:, 1]'], {}), '(vps_pd[:, 1])\n', (3253, 3267), True, 'import numpy as np\n'), ((3806, 3839), 'vpd.config.C.from_yaml', 'C.from_yaml', ([], {'filename': 'config_file'}), '(filename=config_file)\n', (3817, 3839), False, 'from vpd.config import C, M\n'), ((4374, 4399), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (4396, 4399), False, 'import torch\n'), ((4827, 4851), 'numpy.abs', 'np.abs', (['ht_mapping[:, 2]'], {}), '(ht_mapping[:, 2])\n', (4833, 4851), True, 'import numpy as np\n'), ((6011, 6105), 'vpd.models.hg', 'vpd.models.hg', ([], {'planes': '(128)', 'depth': 'M.depth', 'num_stacks': 'M.num_stacks', 'num_blocks': 'M.num_blocks'}), '(planes=128, depth=M.depth, num_stacks=M.num_stacks,\n num_blocks=M.num_blocks)\n', (6024, 6105), False, 'import vpd\n'), ((6500, 6575), 'torch.load', 'torch.load', (["args['<checkpoint>']"], {'map_location': '(lambda storage, loc: storage)'}), "(args['<checkpoint>'], map_location=lambda storage, loc: storage)\n", (6510, 6575), False, 'import torch\n'), ((7194, 7213), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (7202, 7213), True, 'import numpy as np\n'), ((7348, 7390), 'os.makedirs', 'os.makedirs', (["args['--dump']"], {'exist_ok': '(True)'}), "(args['--dump'], exist_ok=True)\n", (7359, 7390), False, 'import os\n'), ((8009, 8024), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8022, 8024), False, 'import torch\n'), ((2016, 2035), 'numpy.arccos', 'np.arccos', (['dist_cos'], {}), '(dist_cos)\n', (2025, 2035), True, 'import numpy as np\n'), ((2563, 2617), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.005)', 'min_samples': '(9)', 'metric': '"""precomputed"""'}), "(eps=0.005, min_samples=9, metric='precomputed')\n", (2569, 2617), False, 'from sklearn.cluster import DBSCAN\n'), ((2985, 3015), 'numpy.max', 'np.max', (['vps_prob[inds_cluster]'], {}), '(vps_prob[inds_cluster])\n', (2991, 3015), True, 'import numpy as np\n'), ((3017, 3050), 'numpy.argmax', 'np.argmax', (['vps_prob[inds_cluster]'], {}), '(vps_prob[inds_cluster])\n', (3026, 3050), True, 'import numpy as np\n'), ((3073, 3116), 'numpy.array', 'np.array', (['[inds_cluster[vp_argmax], vp_max]'], {}), '([inds_cluster[vp_argmax], vp_max])\n', (3081, 3116), True, 'import numpy as np\n'), ((4427, 4452), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4450, 4452), False, 'import torch\n'), ((4491, 4516), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4514, 4516), False, 'import torch\n'), ((7780, 7801), 'numpy.rollaxis', 'np.rollaxis', (['image', '(2)'], {}), '(image, 2)\n', (7791, 7801), True, 'import numpy as np\n'), ((4560, 4589), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['k'], {}), '(k)\n', (4586, 4589), False, 'import torch\n'), ((4954, 4999), 'torch.tensor', 'torch.tensor', (['ht_mapping'], {'requires_grad': '(False)'}), '(ht_mapping, requires_grad=False)\n', (4966, 4999), False, 'import torch\n'), ((5505, 5556), 'torch.tensor', 'torch.tensor', (['sphere_neighbors'], {'requires_grad': '(False)'}), '(sphere_neighbors, requires_grad=False)\n', (5517, 5556), False, 'import torch\n'), ((7821, 7844), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (7837, 7844), False, 'import torch\n'), ((7878, 7905), 'torch.zeros', 'torch.zeros', (['C.io.num_nodes'], {}), '(C.io.num_nodes)\n', (7889, 7905), False, 'import torch\n')] |
'''
COTR dataset
'''
import random
import time
import numpy as np
import torch
from torchvision.transforms import functional as tvtf
from torch.utils import data
import cv2
import imutils
# from COTR.datasets import megadepth_dataset
from COTR.datasets import tracking_datasets
from COTR.utils import debug_utils, utils, constants
# from COTR.projector import pcd_projector
# from COTR.cameras import capture
# from COTR.utils.utils import CropCamConfig
# from COTR.inference import inference_helper
# from COTR.inference.inference_helper import two_images_side_by_side
import COTR.datasets.utils as tracking_utils
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.detach().cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def im_to_numpy(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # H*W*C
return img
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def crop(img, center, scale, res, rot=0):
img = im_to_numpy(img)
# Preprocessing for efficient cropping
ht, wd = img.shape[0], img.shape[1]
sf = scale * 200.0 / res[0]
if sf < 2:
sf = 1
else:
new_size = int(np.math.floor(max(ht, wd) / sf))
new_ht = int(np.math.floor(ht / sf))
new_wd = int(np.math.floor(wd / sf))
if new_size < 2:
return torch.zeros(res[0], res[1], img.shape[2]) \
if len(img.shape) > 2 else torch.zeros(res[0], res[1])
else:
#img = imresize(img, [new_ht, new_wd])
img= cv2.resize(img, dsize=(new_wd,new_ht), interpolation=cv2.INTER_LINEAR)
center = center * 1.0 / sf
scale = scale / sf
# Upper left point
ul = np.array(transform(np.array([0, 0]), center, scale, res, invert=1))
# Bottom right point
br = np.array(transform(np.array(res), center, scale, res, invert=1))
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = [max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]]
new_y = [max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]]
# Range to sample from original image
old_x = [max(0, ul[0]), min(img.shape[1], br[0])]
old_y = [max(0, ul[1]), min(img.shape[0], br[1])]
if(new_x[1]<new_x[0]):
tmp= new_x[1]
new_x[1]=new_x[0]
new_x[0]=tmp
tmp=old_x[0]
old_x[0]=old_x[1]
old_x[1]=tmp
# swapping the upper-left and bottom right point if upper-left goes out of image
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
#new_img = imrotate(new_img, rot)
#new_img= imutils.rotate_bound(new_img,-rot) #For bounded rotation
new_img = imutils.rotate(new_img,rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = im_to_torch(cv2.resize(new_img, dsize=(res[0],res[1]), interpolation=cv2.INTER_LINEAR))
#scipy.misc.imresize(new_img, res)
return new_img
def get_transform(center, scale, res, rot=0):
"""
General image processing functions
"""
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
# pt: 2xN
# Transform pixel location to different reference
# pt = pt.T
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
# new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.stack([pt[0] - 1, pt[1] - 1, np.ones_like(pt[0])], axis=0)
new_pt = np.dot(t, new_pt)
# print(new_pt.shape)
return new_pt[:2].astype(int) + 1
class CATERDataset(data.Dataset):
def __init__(self, opt, dataset_type: str):
assert dataset_type in ['train', 'val', 'test']
# assert len(opt.scenes_name_list) > 0
self.opt = opt
self.dataset_type = dataset_type
# self.sfm_dataset = megadepth_dataset.MegadepthDataset(opt, dataset_type)
self.kp_pool = opt.kp_pool
self.num_kp = opt.num_kp
self.bidirectional = opt.bidirectional
self.need_rotation = opt.need_rotation
# self.max_rotation = opt.max_rotation
# self.rotation_chance = opt.rotation_chance
self.max_rotation = 30 # debug
self.rotation_chance = 0.5 # debug
self.dataset = tracking_datasets.get_dataset('cater', seqlen=6, shuffle=False, env=dataset_type)
def _trim_corrs(self, in_corrs):
length = in_corrs.shape[0]
if length >= self.num_kp:
mask = np.random.choice(length, self.num_kp)
return in_corrs[mask]
else:
mask = np.random.choice(length, self.num_kp - length)
return np.concatenate([in_corrs, in_corrs[mask]], axis=0)
def __len__(self):
# if self.dataset_type == 'val':
# return min(1000, self.sfm_dataset.num_queries)
# else:
# return self.sfm_dataset.num_queries
return len(self.dataset)
def augment_with_rotation(self, query_cap, nn_cap):
if random.random() < self.rotation_chance:
theta = np.random.uniform(low=-1, high=1) * self.max_rotation
query_cap = capture.rotate_capture(query_cap, theta)
if random.random() < self.rotation_chance:
theta = np.random.uniform(low=-1, high=1) * self.max_rotation
nn_cap = capture.rotate_capture(nn_cap, theta)
return query_cap, nn_cap
def __getitem__(self, index):
assert self.opt.k_size == 1
sample = self.dataset[index]
# if self.need_rotation:
# query_cap, nn_cap = self.augment_with_rotation(query_cap, nn_cap)
# randomly select a query frame and a nn frame
pix_T_camXs = sample['pix_T_camXs'] # S x 4 x 4
rgb_camXs = sample['rgb_camXs'] # S x 3 x H x W
xyz_camXs = sample['xyz_camXs'] # S x N x 3
origin_T_camXs = sample['world_T_camXs'] # S x 4 x 4
scorelist = sample['scorelist_s'] # S x K
lrtlist_camXs = sample['lrtlist_camXs'] # S x K x 19
S, _, H, W = rgb_camXs.shape
_, K = scorelist.shape
rgb_camXs += .5 # range [0,1]
rand_frame_id = np.random.choice(np.arange(S), 2, replace=False)
query_frame_id = rand_frame_id[0]
nn_frame_id = rand_frame_id[1]
filtered_xyzs = []
# only take points belong to objects
for obj_id in range(10):
if scorelist[nn_frame_id, obj_id] == 0:
continue
inb = tracking_utils.geom.get_pts_inbound_lrt(xyz_camXs[nn_frame_id:nn_frame_id+1], lrtlist_camXs[nn_frame_id:nn_frame_id+1, obj_id], add_pad=0.1).reshape(-1) # N
xyz = xyz_camXs[nn_frame_id:nn_frame_id+1, inb] # 1, N, 3
filtered_xyzs.append(xyz)
nn_xyz_camXs = torch.cat(filtered_xyzs, dim=1)
# nn_xyz_camXs = xyz_camXs[nn_frame_id:nn_frame_id+1]
nn_xy_camXs = tracking_utils.geom.camera2pixels(nn_xyz_camXs, pix_T_camXs[nn_frame_id:nn_frame_id+1]) # 1 x N x 2
nn_keypoints_x = nn_xy_camXs[..., 0] # 1 x N
nn_keypoints_y = nn_xy_camXs[..., 1] # 1 x N, in image coord
# transform the xyzs into query frame, to find correspondence
origin_T_nn = origin_T_camXs[nn_frame_id:nn_frame_id+1]
origin_T_query = origin_T_camXs[query_frame_id:query_frame_id+1]
query_T_nn = torch.matmul(tracking_utils.geom.safe_inverse(origin_T_query), origin_T_nn)
query_xyz_camXs = tracking_utils.geom.apply_4x4(query_T_nn, nn_xyz_camXs)
query_xy_camXs = tracking_utils.geom.camera2pixels(query_xyz_camXs, pix_T_camXs[query_frame_id:query_frame_id+1]) #
nn_keypoints_xy = nn_xy_camXs[0].numpy() # N x 2
query_keypoints_xy = query_xy_camXs[0].numpy() # N x 2
query_img = rgb_camXs[query_frame_id] # 3 x H x W
nn_img = rgb_camXs[nn_frame_id] # 3 x H x W
# TODO: reshape/crop the images. now just concat raw image together
if random.random() < self.rotation_chance:
theta1 = np.random.uniform(low=-1, high=1) * self.max_rotation
else:
theta1 = 0.0
if random.random() < self.rotation_chance:
theta2 = np.random.uniform(low=-1, high=1) * self.max_rotation
else:
theta2 = 0.0
c1 = [W/2, H/2]
s1 = np.random.uniform(low=0.6, high=1.0)
c2 = [W/2, H/2]
s2 = np.random.uniform(low=0.6, high=1.0)
c1[0] += np.random.uniform(low=-1, high=1) * 50
c2[1] += np.random.uniform(low=-1, high=1) * 50
c1[0] += np.random.uniform(low=-1, high=1) * 50
c2[1] += np.random.uniform(low=-1, high=1) * 50
query_img = crop(query_img, c1, s1, (constants.MAX_SIZE, constants.MAX_SIZE), rot=theta1)
query_keypoints_xy = transform(query_keypoints_xy.T, c1, s1, (constants.MAX_SIZE, constants.MAX_SIZE), rot=theta1).T
nn_img = crop(nn_img, c2, s2, (constants.MAX_SIZE, constants.MAX_SIZE), rot=theta2)
nn_keypoints_xy = transform(nn_keypoints_xy.T, c2, s2, (constants.MAX_SIZE, constants.MAX_SIZE), rot=theta2).T
H, W = constants.MAX_SIZE, constants.MAX_SIZE
# query_img = tvtf.rotate(query_img, theta)
# # adjust labels accordingly
# query_keypoints_xy = tvtf.rotate(torch.tensor(query_keypoints_xy), theta).numpy()
# if random.random() < self.rotation_chance:
# # theta = np.random.uniform(low=-1, high=1) * self.max_rotation
# theta=30
# nn_img = tvtf.rotate(nn_img, theta)
# nn_keypoints_xy = tvtf.rotate(torch.tensor(nn_keypoints_xy), theta).numpy()
sbs_img = torch.cat([query_img, nn_img], axis=-1) # 3 x H x 2*W
corrs = np.concatenate([query_keypoints_xy, nn_keypoints_xy], axis=1) # N x 4, x1y1x2y2
mask_query = np.logical_and(np.logical_and(query_keypoints_xy[:,0]>0, query_keypoints_xy[:,0]<W), np.logical_and(query_keypoints_xy[:,1]>0, query_keypoints_xy[:,1]<H))
mask_nn = np.logical_and(np.logical_and(nn_keypoints_xy[:,0]>0, nn_keypoints_xy[:,0]<W), np.logical_and(nn_keypoints_xy[:,1]>0, nn_keypoints_xy[:,1]<H))
mask = np.logical_and(mask_nn, mask_query)
corrs = corrs[mask]
# if corrs.shape[0] < self.num_kp:
if len(corrs) == 0:
# print('bad example')
return self.__getitem__(random.randint(0, self.__len__() - 1))
corrs = self._trim_corrs(corrs)
# # for cv2 vis
# sbs_img_np = sbs_img.numpy()
# sbs_img_np = (np.transpose(sbs_img_np, (1, 2, 0))*255.0).astype(np.uint8).copy()
# for i in range(len(corrs)):
# corr = corrs[i] # 4
# sbs_img_np = cv2.line(sbs_img_np, (int(corr[0]), int(corr[1])), (int(corr[2])+W, int(corr[3])), (0, 255, 0), 1)
# cv2.imwrite('corr.png', sbs_img_np[..., [2,1,0]]) # rgb->bgr
# time.sleep(1)
# assert(False)
corrs[:, 2] += W
corrs = corrs.astype(float)
corrs /= np.array([W * 2, H, W * 2, H]).reshape(1, 4).astype(float)
assert (0.0 <= corrs[:, 0]).all() and (corrs[:, 0] <= 0.5).all()
assert (0.0 <= corrs[:, 1]).all() and (corrs[:, 1] <= 1.0).all()
assert (0.5 <= corrs[:, 2]).all() and (corrs[:, 2] <= 1.0).all()
assert (0.0 <= corrs[:, 3]).all() and (corrs[:, 3] <= 1.0).all()
out = {
'image': tvtf.normalize(sbs_img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
'corrs': torch.from_numpy(corrs).float(),
}
if self.bidirectional:
out['queries'] = torch.from_numpy(np.concatenate([corrs[:, :2], corrs[:, 2:]], axis=0)).float()
out['targets'] = torch.from_numpy(np.concatenate([corrs[:, 2:], corrs[:, :2]], axis=0)).float()
else:
out['queries'] = torch.from_numpy(corrs[:, :2]).float()
out['targets'] = torch.from_numpy(corrs[:, 2:]).float()
return out
# class COTRZoomDataset(COTRDataset):
# def __init__(self, opt, dataset_type: str):
# assert opt.crop_cam in ['no_crop', 'crop_center']
# assert opt.use_ram == False
# super().__init__(opt, dataset_type)
# self.zoom_start = opt.zoom_start
# self.zoom_end = opt.zoom_end
# self.zoom_levels = opt.zoom_levels
# self.zoom_jitter = opt.zoom_jitter
# self.zooms = np.logspace(np.log10(opt.zoom_start),
# np.log10(opt.zoom_end),
# num=opt.zoom_levels)
# def get_corrs(self, from_cap, to_cap, reduced_size=None):
# from_y, from_x = np.where(from_cap.depth_map > 0)
# from_y, from_x = from_y[..., None], from_x[..., None]
# if reduced_size is not None:
# filter_idx = np.random.choice(from_y.shape[0], reduced_size, replace=False)
# from_y, from_x = from_y[filter_idx], from_x[filter_idx]
# from_z = from_cap.depth_map[np.floor(from_y).astype('int'), np.floor(from_x).astype('int')]
# from_xy = np.concatenate([from_x, from_y], axis=1)
# from_3d_world, valid_index_1 = pcd_projector.PointCloudProjector.pcd_2d_to_pcd_3d_np(from_xy, from_z, from_cap.pinhole_cam.intrinsic_mat, motion=from_cap.cam_pose.camera_to_world, return_index=True)
# to_xyz, valid_index_2 = pcd_projector.PointCloudProjector.pcd_3d_to_pcd_2d_np(
# from_3d_world,
# to_cap.pinhole_cam.intrinsic_mat,
# to_cap.cam_pose.world_to_camera[0:3, :],
# to_cap.image.shape[:2],
# keep_z=True,
# crop=True,
# filter_neg=True,
# norm_coord=False,
# return_index=True,
# )
# to_xy = to_xyz[:, 0:2]
# to_z_proj = to_xyz[:, 2:3]
# to_z = to_cap.depth_map[np.floor(to_xy[:, 1:2]).astype('int'), np.floor(to_xy[:, 0:1]).astype('int')]
# mask = (abs(to_z - to_z_proj) < 0.5)[:, 0]
# if mask.sum() > 0:
# return np.concatenate([from_xy[valid_index_1][valid_index_2][mask], to_xy[mask]], axis=1)
# else:
# return None
# def get_seed_corr(self, from_cap, to_cap, max_try=100):
# seed_corr = self.get_corrs(from_cap, to_cap, reduced_size=max_try)
# if seed_corr is None:
# return None
# shuffle = np.random.permutation(seed_corr.shape[0])
# seed_corr = np.take(seed_corr, shuffle, axis=0)
# return seed_corr[0]
# def get_zoomed_cap(self, cap, pos, scale, jitter):
# patch = inference_helper.get_patch_centered_at(cap.image, pos, scale=scale, return_content=False)
# patch = inference_helper.get_patch_centered_at(cap.image,
# pos + np.array([patch.w, patch.h]) * np.random.uniform(-jitter, jitter, 2),
# scale=scale,
# return_content=False)
# zoom_config = CropCamConfig(x=patch.x,
# y=patch.y,
# w=patch.w,
# h=patch.h,
# out_w=constants.MAX_SIZE,
# out_h=constants.MAX_SIZE,
# orig_w=cap.shape[1],
# orig_h=cap.shape[0])
# zoom_cap = capture.crop_capture(cap, zoom_config)
# return zoom_cap
# def __getitem__(self, index):
# assert self.opt.k_size == 1
# query_cap, nn_caps = self.sfm_dataset.get_query_with_knn(index)
# nn_cap = nn_caps[0]
# if self.need_rotation:
# query_cap, nn_cap = self.augment_with_rotation(query_cap, nn_cap)
# # find seed
# seed_corr = self.get_seed_corr(nn_cap, query_cap)
# if seed_corr is None:
# return self.__getitem__(random.randint(0, self.__len__() - 1))
# # crop cap
# s = np.random.choice(self.zooms)
# nn_zoom_cap = self.get_zoomed_cap(nn_cap, seed_corr[:2], s, 0)
# query_zoom_cap = self.get_zoomed_cap(query_cap, seed_corr[2:], s, self.zoom_jitter)
# assert nn_zoom_cap.shape == query_zoom_cap.shape == (constants.MAX_SIZE, constants.MAX_SIZE)
# corrs = self.get_corrs(query_zoom_cap, nn_zoom_cap)
# if corrs is None or corrs.shape[0] < self.num_kp:
# return self.__getitem__(random.randint(0, self.__len__() - 1))
# shuffle = np.random.permutation(corrs.shape[0])
# corrs = np.take(corrs, shuffle, axis=0)
# corrs = self._trim_corrs(corrs)
# # flip augmentation
# if np.random.uniform() < 0.5:
# corrs[:, 0] = constants.MAX_SIZE - 1 - corrs[:, 0]
# corrs[:, 2] = constants.MAX_SIZE - 1 - corrs[:, 2]
# sbs_img = two_images_side_by_side(np.fliplr(query_zoom_cap.image), np.fliplr(nn_zoom_cap.image))
# else:
# sbs_img = two_images_side_by_side(query_zoom_cap.image, nn_zoom_cap.image)
# corrs[:, 2] += constants.MAX_SIZE
# corrs /= np.array([constants.MAX_SIZE * 2, constants.MAX_SIZE, constants.MAX_SIZE * 2, constants.MAX_SIZE])
# assert (0.0 <= corrs[:, 0]).all() and (corrs[:, 0] <= 0.5).all()
# assert (0.0 <= corrs[:, 1]).all() and (corrs[:, 1] <= 1.0).all()
# assert (0.5 <= corrs[:, 2]).all() and (corrs[:, 2] <= 1.0).all()
# assert (0.0 <= corrs[:, 3]).all() and (corrs[:, 3] <= 1.0).all()
# out = {
# 'image': tvtf.normalize(tvtf.to_tensor(sbs_img), (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
# 'corrs': torch.from_numpy(corrs).float(),
# }
# if self.bidirectional:
# out['queries'] = torch.from_numpy(np.concatenate([corrs[:, :2], corrs[:, 2:]], axis=0)).float()
# out['targets'] = torch.from_numpy(np.concatenate([corrs[:, 2:], corrs[:, :2]], axis=0)).float()
# else:
# out['queries'] = torch.from_numpy(corrs[:, :2]).float()
# out['targets'] = torch.from_numpy(corrs[:, 2:]).float()
# return out
| [
"COTR.datasets.tracking_datasets.get_dataset",
"torch.cat",
"numpy.sin",
"numpy.arange",
"numpy.linalg.norm",
"COTR.datasets.utils.geom.get_pts_inbound_lrt",
"numpy.transpose",
"numpy.random.choice",
"torch.zeros",
"torch.is_tensor",
"COTR.datasets.utils.geom.safe_inverse",
"cv2.resize",
"nu... | [((649, 672), 'torch.is_tensor', 'torch.is_tensor', (['tensor'], {}), '(tensor)\n', (664, 672), False, 'import torch\n'), ((947, 975), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (959, 975), True, 'import numpy as np\n'), ((1032, 1060), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (1044, 1060), True, 'import numpy as np\n'), ((2734, 2753), 'numpy.zeros', 'np.zeros', (['new_shape'], {}), '(new_shape)\n', (2742, 2753), True, 'import numpy as np\n'), ((4028, 4044), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4036, 4044), True, 'import numpy as np\n'), ((5158, 5175), 'numpy.dot', 'np.dot', (['t', 'new_pt'], {}), '(t, new_pt)\n', (5164, 5175), True, 'import numpy as np\n'), ((1240, 1265), 'torch.from_numpy', 'torch.from_numpy', (['ndarray'], {}), '(ndarray)\n', (1256, 1265), False, 'import torch\n'), ((3621, 3649), 'imutils.rotate', 'imutils.rotate', (['new_img', 'rot'], {}), '(new_img, rot)\n', (3635, 3649), False, 'import imutils\n'), ((3723, 3798), 'cv2.resize', 'cv2.resize', (['new_img'], {'dsize': '(res[0], res[1])', 'interpolation': 'cv2.INTER_LINEAR'}), '(new_img, dsize=(res[0], res[1]), interpolation=cv2.INTER_LINEAR)\n', (3733, 3798), False, 'import cv2\n'), ((4334, 4350), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4342, 4350), True, 'import numpy as np\n'), ((4582, 4591), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4588, 4591), True, 'import numpy as np\n'), ((4999, 5015), 'numpy.linalg.inv', 'np.linalg.inv', (['t'], {}), '(t)\n', (5012, 5015), True, 'import numpy as np\n'), ((5943, 6029), 'COTR.datasets.tracking_datasets.get_dataset', 'tracking_datasets.get_dataset', (['"""cater"""'], {'seqlen': '(6)', 'shuffle': '(False)', 'env': 'dataset_type'}), "('cater', seqlen=6, shuffle=False, env=\n dataset_type)\n", (5972, 6029), False, 'from COTR.datasets import tracking_datasets\n'), ((8437, 8468), 'torch.cat', 'torch.cat', (['filtered_xyzs'], {'dim': '(1)'}), '(filtered_xyzs, dim=1)\n', (8446, 8468), False, 'import torch\n'), ((8555, 8649), 'COTR.datasets.utils.geom.camera2pixels', 'tracking_utils.geom.camera2pixels', (['nn_xyz_camXs', 'pix_T_camXs[nn_frame_id:nn_frame_id + 1]'], {}), '(nn_xyz_camXs, pix_T_camXs[nn_frame_id:\n nn_frame_id + 1])\n', (8588, 8649), True, 'import COTR.datasets.utils as tracking_utils\n'), ((9109, 9164), 'COTR.datasets.utils.geom.apply_4x4', 'tracking_utils.geom.apply_4x4', (['query_T_nn', 'nn_xyz_camXs'], {}), '(query_T_nn, nn_xyz_camXs)\n', (9138, 9164), True, 'import COTR.datasets.utils as tracking_utils\n'), ((9191, 9294), 'COTR.datasets.utils.geom.camera2pixels', 'tracking_utils.geom.camera2pixels', (['query_xyz_camXs', 'pix_T_camXs[query_frame_id:query_frame_id + 1]'], {}), '(query_xyz_camXs, pix_T_camXs[\n query_frame_id:query_frame_id + 1])\n', (9224, 9294), True, 'import COTR.datasets.utils as tracking_utils\n'), ((9969, 10005), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.6)', 'high': '(1.0)'}), '(low=0.6, high=1.0)\n', (9986, 10005), True, 'import numpy as np\n'), ((10045, 10081), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.6)', 'high': '(1.0)'}), '(low=0.6, high=1.0)\n', (10062, 10081), True, 'import numpy as np\n'), ((11309, 11348), 'torch.cat', 'torch.cat', (['[query_img, nn_img]'], {'axis': '(-1)'}), '([query_img, nn_img], axis=-1)\n', (11318, 11348), False, 'import torch\n'), ((11381, 11442), 'numpy.concatenate', 'np.concatenate', (['[query_keypoints_xy, nn_keypoints_xy]'], {'axis': '(1)'}), '([query_keypoints_xy, nn_keypoints_xy], axis=1)\n', (11395, 11442), True, 'import numpy as np\n'), ((11813, 11848), 'numpy.logical_and', 'np.logical_and', (['mask_nn', 'mask_query'], {}), '(mask_nn, mask_query)\n', (11827, 11848), True, 'import numpy as np\n'), ((1279, 1303), 'torch.is_tensor', 'torch.is_tensor', (['ndarray'], {}), '(ndarray)\n', (1294, 1303), False, 'import torch\n'), ((1739, 1761), 'numpy.math.floor', 'np.math.floor', (['(ht / sf)'], {}), '(ht / sf)\n', (1752, 1761), True, 'import numpy as np\n'), ((1784, 1806), 'numpy.math.floor', 'np.math.floor', (['(wd / sf)'], {}), '(wd / sf)\n', (1797, 1806), True, 'import numpy as np\n'), ((2059, 2130), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(new_wd, new_ht)', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, dsize=(new_wd, new_ht), interpolation=cv2.INTER_LINEAR)\n', (2069, 2130), False, 'import cv2\n'), ((2254, 2270), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2262, 2270), True, 'import numpy as np\n'), ((2356, 2369), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2364, 2369), True, 'import numpy as np\n'), ((4402, 4417), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (4408, 4417), True, 'import numpy as np\n'), ((4419, 4434), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (4425, 4434), True, 'import numpy as np\n'), ((5115, 5134), 'numpy.ones_like', 'np.ones_like', (['pt[0]'], {}), '(pt[0])\n', (5127, 5134), True, 'import numpy as np\n'), ((6151, 6188), 'numpy.random.choice', 'np.random.choice', (['length', 'self.num_kp'], {}), '(length, self.num_kp)\n', (6167, 6188), True, 'import numpy as np\n'), ((6256, 6302), 'numpy.random.choice', 'np.random.choice', (['length', '(self.num_kp - length)'], {}), '(length, self.num_kp - length)\n', (6272, 6302), True, 'import numpy as np\n'), ((6322, 6372), 'numpy.concatenate', 'np.concatenate', (['[in_corrs, in_corrs[mask]]'], {'axis': '(0)'}), '([in_corrs, in_corrs[mask]], axis=0)\n', (6336, 6372), True, 'import numpy as np\n'), ((6666, 6681), 'random.random', 'random.random', ([], {}), '()\n', (6679, 6681), False, 'import random\n'), ((6856, 6871), 'random.random', 'random.random', ([], {}), '()\n', (6869, 6871), False, 'import random\n'), ((7834, 7846), 'numpy.arange', 'np.arange', (['S'], {}), '(S)\n', (7843, 7846), True, 'import numpy as np\n'), ((9020, 9068), 'COTR.datasets.utils.geom.safe_inverse', 'tracking_utils.geom.safe_inverse', (['origin_T_query'], {}), '(origin_T_query)\n', (9052, 9068), True, 'import COTR.datasets.utils as tracking_utils\n'), ((9611, 9626), 'random.random', 'random.random', ([], {}), '()\n', (9624, 9626), False, 'import random\n'), ((9777, 9792), 'random.random', 'random.random', ([], {}), '()\n', (9790, 9792), False, 'import random\n'), ((10101, 10134), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (10118, 10134), True, 'import numpy as np\n'), ((10157, 10190), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (10174, 10190), True, 'import numpy as np\n'), ((10214, 10247), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (10231, 10247), True, 'import numpy as np\n'), ((10270, 10303), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (10287, 10303), True, 'import numpy as np\n'), ((11497, 11571), 'numpy.logical_and', 'np.logical_and', (['(query_keypoints_xy[:, 0] > 0)', '(query_keypoints_xy[:, 0] < W)'], {}), '(query_keypoints_xy[:, 0] > 0, query_keypoints_xy[:, 0] < W)\n', (11511, 11571), True, 'import numpy as np\n'), ((11567, 11641), 'numpy.logical_and', 'np.logical_and', (['(query_keypoints_xy[:, 1] > 0)', '(query_keypoints_xy[:, 1] < H)'], {}), '(query_keypoints_xy[:, 1] > 0, query_keypoints_xy[:, 1] < H)\n', (11581, 11641), True, 'import numpy as np\n'), ((11670, 11738), 'numpy.logical_and', 'np.logical_and', (['(nn_keypoints_xy[:, 0] > 0)', '(nn_keypoints_xy[:, 0] < W)'], {}), '(nn_keypoints_xy[:, 0] > 0, nn_keypoints_xy[:, 0] < W)\n', (11684, 11738), True, 'import numpy as np\n'), ((11734, 11802), 'numpy.logical_and', 'np.logical_and', (['(nn_keypoints_xy[:, 1] > 0)', '(nn_keypoints_xy[:, 1] < H)'], {}), '(nn_keypoints_xy[:, 1] > 0, nn_keypoints_xy[:, 1] < H)\n', (11748, 11802), True, 'import numpy as np\n'), ((13040, 13109), 'torchvision.transforms.functional.normalize', 'tvtf.normalize', (['sbs_img', '(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '(sbs_img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (13054, 13109), True, 'from torchvision.transforms import functional as tvtf\n'), ((1852, 1893), 'torch.zeros', 'torch.zeros', (['res[0]', 'res[1]', 'img.shape[2]'], {}), '(res[0], res[1], img.shape[2])\n', (1863, 1893), False, 'import torch\n'), ((1947, 1974), 'torch.zeros', 'torch.zeros', (['res[0]', 'res[1]'], {}), '(res[0], res[1])\n', (1958, 1974), False, 'import torch\n'), ((2492, 2515), 'numpy.linalg.norm', 'np.linalg.norm', (['(br - ul)'], {}), '(br - ul)\n', (2506, 2515), True, 'import numpy as np\n'), ((4749, 4765), 'numpy.dot', 'np.dot', (['t_mat', 't'], {}), '(t_mat, t)\n', (4755, 4765), True, 'import numpy as np\n'), ((6726, 6759), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (6743, 6759), True, 'import numpy as np\n'), ((6916, 6949), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (6933, 6949), True, 'import numpy as np\n'), ((9672, 9705), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (9689, 9705), True, 'import numpy as np\n'), ((9838, 9871), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (9855, 9871), True, 'import numpy as np\n'), ((8148, 8296), 'COTR.datasets.utils.geom.get_pts_inbound_lrt', 'tracking_utils.geom.get_pts_inbound_lrt', (['xyz_camXs[nn_frame_id:nn_frame_id + 1]', 'lrtlist_camXs[nn_frame_id:nn_frame_id + 1, obj_id]'], {'add_pad': '(0.1)'}), '(xyz_camXs[nn_frame_id:nn_frame_id +\n 1], lrtlist_camXs[nn_frame_id:nn_frame_id + 1, obj_id], add_pad=0.1)\n', (8187, 8296), True, 'import COTR.datasets.utils as tracking_utils\n'), ((13132, 13155), 'torch.from_numpy', 'torch.from_numpy', (['corrs'], {}), '(corrs)\n', (13148, 13155), False, 'import torch\n'), ((13466, 13496), 'torch.from_numpy', 'torch.from_numpy', (['corrs[:, :2]'], {}), '(corrs[:, :2])\n', (13482, 13496), False, 'import torch\n'), ((13534, 13564), 'torch.from_numpy', 'torch.from_numpy', (['corrs[:, 2:]'], {}), '(corrs[:, 2:])\n', (13550, 13564), False, 'import torch\n'), ((12652, 12682), 'numpy.array', 'np.array', (['[W * 2, H, W * 2, H]'], {}), '([W * 2, H, W * 2, H])\n', (12660, 12682), True, 'import numpy as np\n'), ((13253, 13305), 'numpy.concatenate', 'np.concatenate', (['[corrs[:, :2], corrs[:, 2:]]'], {'axis': '(0)'}), '([corrs[:, :2], corrs[:, 2:]], axis=0)\n', (13267, 13305), True, 'import numpy as np\n'), ((13361, 13413), 'numpy.concatenate', 'np.concatenate', (['[corrs[:, 2:], corrs[:, :2]]'], {'axis': '(0)'}), '([corrs[:, 2:], corrs[:, :2]], axis=0)\n', (13375, 13413), True, 'import numpy as np\n')] |
import tensorflow as tf
from multiprocessing import Process, Queue
import os
import datetime
import numpy as np
from impala.model import ImpalaModel, Learner, Actor
from impala.replay_buffer import UniformBuffer, PrioritizedBuffer
from common.logger import TensorBoardWriter, CSVWriter, CombinedWriter
import utils as U
from impala.py_process import PyProcessHook
def get_default_parameters():
return {
'batch_size': 2,
'entropy_scale': 0.1,
'horizon': 256,
'learning_rate': 2.0e-4,
'max_steps': 50000,
'rho_clip': 2.0,
'sequence_length': 128
}
def parameter_grid_search():
"""
generator function that yields sets of parameters
Usage:
for param_kws in parameter_search():
print(param_kws)
:return: yield, dict
"""
horizons = [256]
batch_sizes = [16]
sequence_lengths = [16]
learning_rates = [2.0e-4, 4.0e-4]
entropy_scales = [1e-1, 1e-2, 1e-3]
for bs in batch_sizes:
for hor in horizons:
for seq_len in sequence_lengths:
for es in entropy_scales:
for lr in learning_rates:
yield dict(batch_size=bs, entropy_scale=es, horizon=hor,
learning_rate=lr, sequence_length=seq_len)
class NormalizeObservationsRewards:
def __init__(self, observation_space, clip_value=10.0, epsilon=1e-8):
self.obs_shape = observation_space.shape
self.ob_rms = U.TfRunningMeanStd(shape=observation_space.shape, scope='RunningMeanStd/Obs')
self.ret_rms = U.TfRunningMeanStd(shape=(), scope='RunningMeanStd/Rew')
self.clip = clip_value
self.epsilon = epsilon
def normalize_and_update(self, obs, rewards):
""" normalize inputs and update internal running mean/std parameters """
return self._ob_filter(obs), self._reward_filter(rewards.flatten())
def normalize(self, obs, rewards, update_internal_with_session=None):
""" only normalize inputs and rewards """
if update_internal_with_session:
self.get_values_from_tf_graph(session=update_internal_with_session)
flatten_obs = obs.reshape((-1, *self.obs_shape))
ob = np.clip((flatten_obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clip, self.clip)
rew = np.clip((rewards.flatten() - self.ret_rms.mean) / np.sqrt(self.ret_rms.var + self.epsilon), -self.clip, self.clip)
return ob, rew
def normalize_observation(self, obs):
""" only normalize inputs"""
return np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clip, self.clip)
def _ob_filter(self, obs):
# flatten observations for calculating mean and std along axis=0
flatten_obs = obs.reshape((-1, *self.obs_shape))
self.ob_rms.update(flatten_obs)
normed_flat_obs = np.clip((flatten_obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clip, self.clip)
return normed_flat_obs
def _reward_filter(self, rewards):
self.ret_rms.update(rewards)
rew = np.clip((rewards - self.ret_rms.mean) / np.sqrt(self.ret_rms.var + self.epsilon), -self.clip, self.clip)
return rew
def setup(self, session=None):
""" get_values_from_tf_graph and copy into classes"""
self.ob_rms.get_values_from_tf_graph(session)
self.ret_rms.get_values_from_tf_graph(session)
def get_values_from_tf_graph(self, session=None):
""" get_values_from_tf_graph and copy into classes"""
self.ob_rms.get_values_from_tf_graph(session)
self.ret_rms.get_values_from_tf_graph(session)
def update_ops(self, obs, rewards, session):
flatten_obs = obs.reshape((-1, *self.obs_shape))
if not session.should_stop():
self.ob_rms.update(flatten_obs, session=session)
self.ret_rms.update(rewards.flatten(), session=session)
def training(cluster,
job_name,
task_index,
queue,
kwargs,
horizon,
sequence_length,
learning_rate,
entropy_scale,
max_steps,
batch_size):
"""
Trains the architecture
learner updates the parameters of the NN according to Actor-Critic
actors roll-out policy and put trajectories into queue
:param cluster:
:param job_name:
:param task_index:
:param queue:
:param kwargs:
:param horizon:
:param sequence_length:
:param learning_rate:
:param entropy_scale:
:param max_steps:
:param batch_size:
:return:
"""
print('==============================')
print('Parsed Parameters for Training')
print('============================== \n')
print('Learning Rate: {}'.format(learning_rate))
print('Horizon: {}'.format(horizon))
print('Sequence Length: {}'.format(sequence_length))
print('Entropy Scale: {}'.format(entropy_scale))
print('Maximum Steps: {}'.format(max_steps))
print('\n==============================')
print('Started server: job_name={}, task_index={}'.format(job_name, task_index))
date_string = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M")
log_directory = os.path.join('/tmp/impala', date_string)
is_chief = (job_name == 'learner')
server = tf.train.Server(cluster, job_name=job_name, task_index=task_index)
device_name = '/job:{}/task:{}'.format(job_name, task_index)
print('Place on tf model on device:', device_name)
with tf.device(tf.train.replica_device_setter(worker_device=device_name, cluster=cluster)):
# create model ...
env = U.make_env(**kwargs) # share running mean ops across devices
normalizer = NormalizeObservationsRewards(observation_space=env.observation_space)
if is_chief: # only learners needs to build loss
with tf.device('/gpu'):
model = ImpalaModel(observation_shape=env.observation_space.shape,
n_actions=env.action_space.n,
learning_rate=learning_rate,
entropy_scale=entropy_scale)
model.build_loss()
model.build_trainer()
trajectory_buffer = PrioritizedBuffer(obs_shape=env.observation_space.shape,
batch_size=batch_size,
horizon=horizon,
sequence_length=sequence_length,
size=1000)
logs = CombinedWriter(dir=log_directory)
print('Logging to', log_directory)
U.dump_dict_as_json(kwargs, directory=log_directory, file_name='configuration')
worker = Learner(model=model, queue=queue, buffer=trajectory_buffer, logger=logs, norm=normalizer, **kwargs)
else:
with tf.device('/cpu'): # pin workers to CPU
model = ImpalaModel(observation_shape=env.observation_space.shape,
n_actions=env.action_space.n,
learning_rate=learning_rate,
entropy_scale=entropy_scale)
worker = Actor(model=model, env=env, queue=queue, normalizer=normalizer, **kwargs)
# The StopAtStepHook handles stopping after running given steps.
# max_steps = 10000
hooks = [tf.train.StopAtStepHook(last_step=max_steps)] # , PyProcessHook()]
# TODO adjust tf.Config for flexible node placement on GPUs
tf_config = tf.ConfigProto(allow_soft_placement=True, # soft placement to allow flexible training on CPU/GPU
intra_op_parallelism_threads=1, # speed up training time
inter_op_parallelism_threads=1) # number of physical cores
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=is_chief,
checkpoint_dir=os.path.join("/tmp/impala/", date_string),
config=tf_config,
save_checkpoint_secs=120,
hooks=hooks) as mon_sess:
normalizer.setup(session=mon_sess)
while not mon_sess.should_stop():
# learner batches from experience buffer and updates policy network
# actors only enqueue trajectories into the FIFO queue
worker.work(session=mon_sess)
print('{}:{} wants to join ... Training finished!'.format(job_name, task_index))
if is_chief:
logs.close()
server.join()
def play(args, **kwargs):
""" play mode """
print(args.dir)
assert args.dir, 'Please provide directory where checkpoint file is located'
kwargs['normalize'] = True
normed_env = U.make_env(**kwargs) # use env.setup() after session creation to apply mean/std to obs and rewards
model = ImpalaModel(observation_shape=normed_env.observation_space.shape,
n_actions=normed_env.action_space.n, learning_rate=0.01, entropy_scale=0.0)
# max_steps = 10000
# hooks = [tf.train.StopAtStepHook(last_step=max_steps)] # , PyProcessHook()]
print('Restore from:', args.dir)
with tf.train.SingularMonitoredSession(checkpoint_dir=args.dir) as sess:
normed_env.setup(session=sess) # restore values for running mean/std
print('Restored from global step:', sess.run(model.global_step))
try:
done = False
obs = normed_env.reset()
print(obs)
while not done:
normed_env.render()
action, _ = model.get_action_and_prob(session=sess, observation=obs)
obs, reward, done, info = normed_env.step(action)
except KeyboardInterrupt:
print('got KeyboardInterrupt')
finally:
pass
def main(args, **kwargs):
print('--> Using the following configuration:')
print(kwargs)
num_actors = 2
cluster = tf.train.ClusterSpec({
"worker": ['localhost:{}'.format(8000 + i) for i in range(num_actors)],
"learner": ["localhost:9000"]
})
queue = Queue(maxsize=100)
bs = kwargs['batch_size']
horizon = kwargs['horizon']
lr = kwargs['learning_rate']
es = kwargs['entropy_scale']
max_steps = kwargs['max_steps']
seq_len = kwargs['sequence_length']
processes = []
# define processes as daemon so that children terminate when parent crashes
params = (cluster, 'learner', 0, queue, kwargs, horizon, seq_len, lr, es, max_steps, bs)
p = Process(target=training, args=params)
p.daemon = True
p.start()
processes.append(p)
for actor_id in range(num_actors): # create worker processes
params = (cluster, 'worker', actor_id, queue, kwargs, horizon, seq_len, lr, es, max_steps, bs)
p = Process(target=training, args=params)
p.daemon = True
p.start()
processes.append(p)
print('ALL PROCESSES STARTED')
# time.sleep(5)
for p in processes:
p.join()
print('ALL JOINED')
if __name__ == '__main__':
shared_job_device = '/job:learner/task:0'
main(shared_job_device)
| [
"impala.model.ImpalaModel",
"tensorflow.ConfigProto",
"multiprocessing.Queue",
"utils.dump_dict_as_json",
"os.path.join",
"tensorflow.train.Server",
"impala.replay_buffer.PrioritizedBuffer",
"tensorflow.train.StopAtStepHook",
"impala.model.Actor",
"datetime.datetime.now",
"common.logger.Combined... | [((5313, 5353), 'os.path.join', 'os.path.join', (['"""/tmp/impala"""', 'date_string'], {}), "('/tmp/impala', date_string)\n", (5325, 5353), False, 'import os\n'), ((5407, 5473), 'tensorflow.train.Server', 'tf.train.Server', (['cluster'], {'job_name': 'job_name', 'task_index': 'task_index'}), '(cluster, job_name=job_name, task_index=task_index)\n', (5422, 5473), True, 'import tensorflow as tf\n'), ((7748, 7857), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(allow_soft_placement=True, intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n', (7762, 7857), True, 'import tensorflow as tf\n'), ((9271, 9291), 'utils.make_env', 'U.make_env', ([], {}), '(**kwargs)\n', (9281, 9291), True, 'import utils as U\n'), ((9384, 9530), 'impala.model.ImpalaModel', 'ImpalaModel', ([], {'observation_shape': 'normed_env.observation_space.shape', 'n_actions': 'normed_env.action_space.n', 'learning_rate': '(0.01)', 'entropy_scale': '(0.0)'}), '(observation_shape=normed_env.observation_space.shape, n_actions\n =normed_env.action_space.n, learning_rate=0.01, entropy_scale=0.0)\n', (9395, 9530), False, 'from impala.model import ImpalaModel, Learner, Actor\n'), ((10645, 10663), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': '(100)'}), '(maxsize=100)\n', (10650, 10663), False, 'from multiprocessing import Process, Queue\n'), ((11070, 11107), 'multiprocessing.Process', 'Process', ([], {'target': 'training', 'args': 'params'}), '(target=training, args=params)\n', (11077, 11107), False, 'from multiprocessing import Process, Queue\n'), ((1498, 1575), 'utils.TfRunningMeanStd', 'U.TfRunningMeanStd', ([], {'shape': 'observation_space.shape', 'scope': '"""RunningMeanStd/Obs"""'}), "(shape=observation_space.shape, scope='RunningMeanStd/Obs')\n", (1516, 1575), True, 'import utils as U\n'), ((1599, 1655), 'utils.TfRunningMeanStd', 'U.TfRunningMeanStd', ([], {'shape': '()', 'scope': '"""RunningMeanStd/Rew"""'}), "(shape=(), scope='RunningMeanStd/Rew')\n", (1617, 1655), True, 'import utils as U\n'), ((5732, 5752), 'utils.make_env', 'U.make_env', ([], {}), '(**kwargs)\n', (5742, 5752), True, 'import utils as U\n'), ((7599, 7643), 'tensorflow.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': 'max_steps'}), '(last_step=max_steps)\n', (7622, 7643), True, 'import tensorflow as tf\n'), ((9705, 9763), 'tensorflow.train.SingularMonitoredSession', 'tf.train.SingularMonitoredSession', ([], {'checkpoint_dir': 'args.dir'}), '(checkpoint_dir=args.dir)\n', (9738, 9763), True, 'import tensorflow as tf\n'), ((11348, 11385), 'multiprocessing.Process', 'Process', ([], {'target': 'training', 'args': 'params'}), '(target=training, args=params)\n', (11355, 11385), False, 'from multiprocessing import Process, Queue\n'), ((5241, 5264), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5262, 5264), False, 'import datetime\n'), ((5614, 5688), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': 'device_name', 'cluster': 'cluster'}), '(worker_device=device_name, cluster=cluster)\n', (5644, 5688), True, 'import tensorflow as tf\n'), ((6946, 7049), 'impala.model.Learner', 'Learner', ([], {'model': 'model', 'queue': 'queue', 'buffer': 'trajectory_buffer', 'logger': 'logs', 'norm': 'normalizer'}), '(model=model, queue=queue, buffer=trajectory_buffer, logger=logs,\n norm=normalizer, **kwargs)\n', (6953, 7049), False, 'from impala.model import ImpalaModel, Learner, Actor\n'), ((7418, 7491), 'impala.model.Actor', 'Actor', ([], {'model': 'model', 'env': 'env', 'queue': 'queue', 'normalizer': 'normalizer'}), '(model=model, env=env, queue=queue, normalizer=normalizer, **kwargs)\n', (7423, 7491), False, 'from impala.model import ImpalaModel, Learner, Actor\n'), ((2286, 2325), 'numpy.sqrt', 'np.sqrt', (['(self.ob_rms.var + self.epsilon)'], {}), '(self.ob_rms.var + self.epsilon)\n', (2293, 2325), True, 'import numpy as np\n'), ((2414, 2454), 'numpy.sqrt', 'np.sqrt', (['(self.ret_rms.var + self.epsilon)'], {}), '(self.ret_rms.var + self.epsilon)\n', (2421, 2454), True, 'import numpy as np\n'), ((2632, 2671), 'numpy.sqrt', 'np.sqrt', (['(self.ob_rms.var + self.epsilon)'], {}), '(self.ob_rms.var + self.epsilon)\n', (2639, 2671), True, 'import numpy as np\n'), ((2968, 3007), 'numpy.sqrt', 'np.sqrt', (['(self.ob_rms.var + self.epsilon)'], {}), '(self.ob_rms.var + self.epsilon)\n', (2975, 3007), True, 'import numpy as np\n'), ((3194, 3234), 'numpy.sqrt', 'np.sqrt', (['(self.ret_rms.var + self.epsilon)'], {}), '(self.ret_rms.var + self.epsilon)\n', (3201, 3234), True, 'import numpy as np\n'), ((5961, 5978), 'tensorflow.device', 'tf.device', (['"""/gpu"""'], {}), "('/gpu')\n", (5970, 5978), True, 'import tensorflow as tf\n'), ((6004, 6155), 'impala.model.ImpalaModel', 'ImpalaModel', ([], {'observation_shape': 'env.observation_space.shape', 'n_actions': 'env.action_space.n', 'learning_rate': 'learning_rate', 'entropy_scale': 'entropy_scale'}), '(observation_shape=env.observation_space.shape, n_actions=env.\n action_space.n, learning_rate=learning_rate, entropy_scale=entropy_scale)\n', (6015, 6155), False, 'from impala.model import ImpalaModel, Learner, Actor\n'), ((6368, 6513), 'impala.replay_buffer.PrioritizedBuffer', 'PrioritizedBuffer', ([], {'obs_shape': 'env.observation_space.shape', 'batch_size': 'batch_size', 'horizon': 'horizon', 'sequence_length': 'sequence_length', 'size': '(1000)'}), '(obs_shape=env.observation_space.shape, batch_size=\n batch_size, horizon=horizon, sequence_length=sequence_length, size=1000)\n', (6385, 6513), False, 'from impala.replay_buffer import UniformBuffer, PrioritizedBuffer\n'), ((6744, 6777), 'common.logger.CombinedWriter', 'CombinedWriter', ([], {'dir': 'log_directory'}), '(dir=log_directory)\n', (6758, 6777), False, 'from common.logger import TensorBoardWriter, CSVWriter, CombinedWriter\n'), ((6845, 6924), 'utils.dump_dict_as_json', 'U.dump_dict_as_json', (['kwargs'], {'directory': 'log_directory', 'file_name': '"""configuration"""'}), "(kwargs, directory=log_directory, file_name='configuration')\n", (6864, 6924), True, 'import utils as U\n'), ((7077, 7094), 'tensorflow.device', 'tf.device', (['"""/cpu"""'], {}), "('/cpu')\n", (7086, 7094), True, 'import tensorflow as tf\n'), ((7142, 7293), 'impala.model.ImpalaModel', 'ImpalaModel', ([], {'observation_shape': 'env.observation_space.shape', 'n_actions': 'env.action_space.n', 'learning_rate': 'learning_rate', 'entropy_scale': 'entropy_scale'}), '(observation_shape=env.observation_space.shape, n_actions=env.\n action_space.n, learning_rate=learning_rate, entropy_scale=entropy_scale)\n', (7153, 7293), False, 'from impala.model import ImpalaModel, Learner, Actor\n'), ((8391, 8432), 'os.path.join', 'os.path.join', (['"""/tmp/impala/"""', 'date_string'], {}), "('/tmp/impala/', date_string)\n", (8403, 8432), False, 'import os\n')] |
# --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by <NAME> (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
from pycocotools.coco import COCO
import cv2
import numpy as np
color_bar = np.random.randint(0, 255, (90, 3))
visual = True
dataDir = '.'
dataType = 'val2017'
annFile = '{}/annotations/instances_{}.json'.format(dataDir,dataType)
coco = COCO(annFile)
for img_id in coco.imgs:
img = coco.loadImgs(img_id)[0]
annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
im = cv2.imread('{}/{}/{}'.format(dataDir, dataType, img['file_name']))
for ann in anns:
rect = ann['bbox']
c = ann['category_id']
if visual:
pt1 = (int(rect[0]), int(rect[1]))
pt2 = (int(rect[0]+rect[2]-1), int(rect[1]+rect[3]-1))
cv2.rectangle(im, pt1, pt2, color_bar[c-1].tolist(), 3)
cv2.imshow('img', im)
cv2.waitKey(200)
print('done')
| [
"cv2.waitKey",
"pycocotools.coco.COCO",
"numpy.random.randint",
"cv2.imshow"
] | [((287, 321), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(90, 3)'], {}), '(0, 255, (90, 3))\n', (304, 321), True, 'import numpy as np\n'), ((450, 463), 'pycocotools.coco.COCO', 'COCO', (['annFile'], {}), '(annFile)\n', (454, 463), False, 'from pycocotools.coco import COCO\n'), ((978, 999), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'im'], {}), "('img', im)\n", (988, 999), False, 'import cv2\n'), ((1004, 1020), 'cv2.waitKey', 'cv2.waitKey', (['(200)'], {}), '(200)\n', (1015, 1020), False, 'import cv2\n')] |
#!/usr/bin/env python3
import sys
import math
import numpy as np
import glob
import scipy.optimize as optimization
import os
from itertools import islice
import re
#Angle Types:
# 1-16 -> SPS
# 17-20 -> SBB
# 21-36 -> 3PSB5
# 37-52 -> 5PSB3
WC = {
'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'
}
typeAngle = {
'SPS/AA': 1,
'SPS/AC': 2,
'SPS/AG': 3,
'SPS/AT': 4,
'SPS/CA': 5,
'SPS/CC': 6,
'SPS/CG': 7,
'SPS/CT': 8,
'SPS/GA': 9,
'SPS/GC': 10,
'SPS/GG': 11,
'SPS/GT': 12,
'SPS/TA': 13,
'SPS/TC': 14,
'SPS/TG': 15,
'SPS/TT': 16,
'SBB/AT': 17,
'SBB/CG': 18,
'SBB/GC': 19,
'SBB/TA': 20,
'3PSB5/AA': 21,
'3PSB5/AC': 22,
'3PSB5/AG': 23,
'3PSB5/AT': 24,
'3PSB5/CA': 25,
'3PSB5/CC': 26,
'3PSB5/CG': 27,
'3PSB5/CT': 28,
'3PSB5/GA': 29,
'3PSB5/GC': 30,
'3PSB5/GG': 31,
'3PSB5/GT': 32,
'3PSB5/TA': 33,
'3PSB5/TC': 34,
'3PSB5/TG': 35,
'3PSB5/TT': 36,
'5PSB3/AA': 37,
'5PSB3/AC': 38,
'5PSB3/AG': 39,
'5PSB3/AT': 40,
'5PSB3/CA': 41,
'5PSB3/CC': 42,
'5PSB3/CG': 43,
'5PSB3/CT': 44,
'5PSB3/GA': 45,
'5PSB3/GC': 46,
'5PSB3/GG': 47,
'5PSB3/GT': 48,
'5PSB3/TA': 49,
'5PSB3/TC': 50,
'5PSB3/TG': 51,
'5PSB3/TT': 52
}
kangle = {
1: 52.297400,
2: 55.454400,
3: 47.686800,
4: 66.077800,
5: 45.379000,
6: 64.938800,
7: 43.603800,
8: 76.369000,
9: 45.815600,
10: 50.919600,
11: 68.554600,
12: 57.550000,
13: 52.805600,
14: 68.361800,
15: 59.717400,
16: 93.246000,
17: 103.155400,
18: 107.712400,
19: 104.304600,
20: 99.360400,
21: 64.937000,
22: 81.014400,
23: 71.817000,
24: 87.157400,
25: 76.118000,
26: 102.752800,
27: 75.546800,
28: 102.550000,
29: 58.532800,
30: 66.371800,
31: 105.403600,
32: 74.350600,
33: 81.635800,
34: 97.176400,
35: 85.435000,
36: 115.830400,
37: 12.424360,
38: 41.473600,
39: 4.879880,
40: 40.484000,
41: 26.925600,
42: 38.385200,
43: 16.429200,
44: 8.518380,
45: 3.122240,
46: 20.988400,
47: 22.892600,
48: 52.965400,
49: 17.571740,
50: 6.500380,
51: 30.029600,
52: 26.309600
}
theta0angle = {
1: 1.642672,
2: 1.602450,
3: 1.642474,
4: 1.620228,
5: 1.679752,
6: 1.649765,
7: 1.666130,
8: 1.620831,
9: 1.660466,
10: 1.617227,
11: 1.645032,
12: 1.648561,
13: 1.647350,
14: 1.637375,
15: 1.625242,
16: 1.617213,
17: 2.693183,
18: 2.414680,
19: 2.770780,
20: 2.317780,
21: 2.012836,
22: 2.026240,
23: 2.011928,
24: 2.002451,
25: 2.080747,
26: 2.051792,
27: 2.103087,
28: 2.045683,
29: 1.950824,
30: 1.953687,
31: 1.913579,
32: 1.942115,
33: 2.087745,
34: 2.112005,
35: 2.095948,
36: 2.092440,
37: 1.975940,
38: 1.907418,
39: 2.081096,
40: 1.846366,
41: 1.928449,
42: 1.943127,
43: 1.978174,
44: 1.862109,
45: 1.890907,
46: 1.861935,
47: 2.128202,
48: 1.830554,
49: 1.994266,
50: 1.875985,
51: 2.070519,
52: 1.831810
}
def create_angles(seqstring):
lseq = len(seqstring)
nangles = 8*lseq-6
angletype = np.zeros(nangles+1, dtype=int)
i1list = np.zeros(nangles+1, dtype=int)
i2list = np.zeros(nangles+1, dtype=int)
i3list = np.zeros(nangles+1, dtype=int)
listkangle = np.zeros(nangles+1, dtype=float)
listtheta0angle = np.zeros(nangles+1, dtype=float)
firstS1 = 1
lastS1 = firstS1 + (lseq-1)*3
firstP1 = 3
lastP1 = firstP1 + (lseq-2)*3
firstB1 = 2
lastB1 = firstB1 + (lseq-1)*3
firstS2 = lastB1 + 1
lastS2 = firstS2 + (lseq-1)*3
firstP2 = lastB1 + 3
lastP2 = firstP2 + (lseq-2)*3
firstB2 = lastB1 + 2
lastB2 = firstB2 + (lseq-1)*3
iangle = 1
#### SPS
for i in range(1,lseq):
i1 = 3*i - 2
i1list[iangle] = i1
i2list[iangle] = i1 + 2
i3list[iangle] = i1 + 3
angletype[iangle] = typeAngle['SPS/'+seqstring[i-1]+seqstring[i]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
for i in range(1,lseq):
i1 = lastB1 + 3*i - 2
i1list[iangle] = i1
i2list[iangle] = i1 + 2
i3list[iangle] = i1 + 3
angletype[iangle] = typeAngle['SPS/'+WC[seqstring[lseq-1-(i-1)]]+WC[seqstring[lseq-1-i]]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
#### SBB
for i in range(1,lseq+1):
i1 = 3*i - 2
i1list[iangle] = i1
i2list[iangle] = i1 + 1
i3list[iangle] = lastB2 + 2 - i2list[iangle]
angletype[iangle] = typeAngle['SBB/'+seqstring[i-1]+WC[seqstring[i-1]]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
for i in range(1,lseq+1):
i1 = lastB1 + 3*i - 2
i1list[iangle] = i1
i2list[iangle] = i1 + 1
i3list[iangle] = lastB2 + 2 - i2list[iangle]
angletype[iangle] = typeAngle['SBB/'+WC[seqstring[lseq-1-(i-1)]]+seqstring[lseq-1-(i-1)]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
#### 3PSB5
for i in range(1,lseq):
i1 = 3*i
i1list[iangle] = i1
i2list[iangle] = i1 - 2
i3list[iangle] = i1 - 1
angletype[iangle] = typeAngle['3PSB5/'+seqstring[i-1]+seqstring[i]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
for i in range(1,lseq):
i1 = lastB1 + 3*i
i1list[iangle] = i1
i2list[iangle] = i1 - 2
i3list[iangle] = i1 - 1
angletype[iangle] = typeAngle['3PSB5/'+WC[seqstring[lseq-1-(i-1)]]+WC[seqstring[lseq-1-i]]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
#### 5PSB3
for i in range(1,lseq):
i1 = 3*i
i1list[iangle] = i1
i2list[iangle] = i1 + 1
i3list[iangle] = i1 + 2
angletype[iangle] = typeAngle['5PSB3/'+seqstring[i-1]+seqstring[i]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
for i in range(1,lseq):
i1 = lastB1 + 3*i
i1list[iangle] = i1
i2list[iangle] = i1 + 1
i3list[iangle] = i1 + 2
angletype[iangle] = typeAngle['5PSB3/'+WC[seqstring[lseq-1-(i-1)]]+WC[seqstring[lseq-1-i]]]
listkangle[iangle] = kangle[angletype[iangle]]
listtheta0angle[iangle] = theta0angle[angletype[iangle]]
iangle += 1
return i1list, i2list, i3list, listtheta0angle, listkangle
| [
"numpy.zeros"
] | [((3348, 3380), 'numpy.zeros', 'np.zeros', (['(nangles + 1)'], {'dtype': 'int'}), '(nangles + 1, dtype=int)\n', (3356, 3380), True, 'import numpy as np\n'), ((3391, 3423), 'numpy.zeros', 'np.zeros', (['(nangles + 1)'], {'dtype': 'int'}), '(nangles + 1, dtype=int)\n', (3399, 3423), True, 'import numpy as np\n'), ((3434, 3466), 'numpy.zeros', 'np.zeros', (['(nangles + 1)'], {'dtype': 'int'}), '(nangles + 1, dtype=int)\n', (3442, 3466), True, 'import numpy as np\n'), ((3477, 3509), 'numpy.zeros', 'np.zeros', (['(nangles + 1)'], {'dtype': 'int'}), '(nangles + 1, dtype=int)\n', (3485, 3509), True, 'import numpy as np\n'), ((3524, 3558), 'numpy.zeros', 'np.zeros', (['(nangles + 1)'], {'dtype': 'float'}), '(nangles + 1, dtype=float)\n', (3532, 3558), True, 'import numpy as np\n'), ((3578, 3612), 'numpy.zeros', 'np.zeros', (['(nangles + 1)'], {'dtype': 'float'}), '(nangles + 1, dtype=float)\n', (3586, 3612), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import copy
import numpy as np
from . import util as util
__all__ = ['JParameter','JTensor']
##########################################################
### Auto-Jacobian
##########################################################
class JParameter(nn.Parameter):
r"""Special type of Parameter to compute Jacobian"""
def __new__(cls, data=None, requires_grad=True):
if data is None:
data = torch.Tensor()
return torch.Tensor._make_subclass(cls, data, requires_grad)
def __init__(self,**kwargs):
self._jacobian = None
self.__jacobian_ready = False
self._size_flat = util.flat_dim(self.data.shape)
@property
def size_flat(self):
return self._size_flat
def __repr__(self):
return 'JParameter containing:\n' + super(JParameter, self).__repr__()
def zero_jacobian_(self,backend='pytorch'):
self.__jacobian_ready = False
if self._jacobian is not None:
# self._jacobian = self._jacobian.detach()
# self._jacobian.requires_grad_(False)
if backend == 'pytorch':
if isinstance(self._jacobian,torch.Tensor):
self._jacobian.zero_()
self._jacobian = self._jacobian.detach()
else:
self._jacobian = None
else:
if isinstance(self._jacobian,np.ndarray):
self._jacobian.fill(0.)
else:
self._jacobian = None
def update_jacobian_(self,D,mode):
# check if its already been updated at least once
if not self.__jacobian_ready:
shape = D.shape if mode=='batch' else D.shape[1:] if mode=='sum' else None
if not (self._jacobian is not None and self._jacobian.shape == shape):
# print('Mode: %s. D shape: %s. J shape: %s' %(mode,str(D.shape),str(shape)))
if isinstance(D,torch.Tensor):
self._jacobian = torch.zeros(shape)
self._jacobian.requires_grad_(False)
else:
self._jacobian = np.zeros(shape,dtype=np.float32)
if isinstance(D,torch.Tensor):
D = D.detach()
if mode == 'batch':
pass
elif mode == 'sum':
D = torch.sum(D,dim=0)
else:
raise RuntimeError('Undefined Behavior')
self._jacobian.add_(D.data) # need to make sure not to tie up computation graph
else:
if mode == 'batch':
pass
elif mode == 'sum':
D = np.sum(D,axis=0)
else:
raise RuntimeError('Undefined Behavior')
self._jacobian += D
self.__jacobian_ready = True
@property
def jacobian_ready(self):
return self.__jacobian_ready
@property
def jacobian(self):
return self._jacobian
@property
def jacobian_numpy(self):
if self._jacobian is not None:
return copy.deepcopy(self._jacobian.numpy()) if isinstance(self._jacobian,torch.Tensor) else copy.deepcopy(self._jacobian)
return None
def parameters(self):
return [self]
def differentiate(self,in_grad = None,mode='sum',backend=None):
# import pdb; pdb.set_trace()
self.update_jacobian_(in_grad,mode)
class JTensor(object):
def __init__(self,data,creator,jacobian_info):
self.data = data
self.ndata = self.data.detach().numpy()
self._jacobian_info = jacobian_info # should contain creator (JModule or JFunc), an args and a kwargs?
self._creator = creator
def __repr__(self):
return 'JTensor containing:\n' + self.data.__repr__()
def differentiate(self,in_grad = None,mode='sum',backend=None):
if in_grad is None:
N = self.data.shape[0]
d = self.data.shape[1]
in_grad = torch.eye(d).unsqueeze(0).repeat(N,1,1)
if backend is None:
backend = 'pytorch' if isinstance(in_grad,torch.Tensor) else 'numpy'
if backend == 'pytorch':
in_grad = in_grad if isinstance(in_grad,torch.Tensor) else torch.from_numpy(in_grad)
elif backend == 'numpy':
in_grad = in_grad.detach().numpy() if isinstance(in_grad,torch.Tensor) else in_grad
self._creator._compute_jacobian(in_grad,self._jacobian_info,mode)
# WARNING
self._jacobian_info = None
self._creator = None
| [
"copy.deepcopy",
"numpy.sum",
"torch.eye",
"numpy.zeros",
"torch.Tensor",
"torch.zeros",
"torch.sum",
"torch.Tensor._make_subclass",
"torch.from_numpy"
] | [((483, 536), 'torch.Tensor._make_subclass', 'torch.Tensor._make_subclass', (['cls', 'data', 'requires_grad'], {}), '(cls, data, requires_grad)\n', (510, 536), False, 'import torch\n'), ((453, 467), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (465, 467), False, 'import torch\n'), ((3194, 3223), 'copy.deepcopy', 'copy.deepcopy', (['self._jacobian'], {}), '(self._jacobian)\n', (3207, 3223), False, 'import copy\n'), ((4266, 4291), 'torch.from_numpy', 'torch.from_numpy', (['in_grad'], {}), '(in_grad)\n', (4282, 4291), False, 'import torch\n'), ((2041, 2059), 'torch.zeros', 'torch.zeros', (['shape'], {}), '(shape)\n', (2052, 2059), False, 'import torch\n'), ((2176, 2209), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2184, 2209), True, 'import numpy as np\n'), ((2381, 2400), 'torch.sum', 'torch.sum', (['D'], {'dim': '(0)'}), '(D, dim=0)\n', (2390, 2400), False, 'import torch\n'), ((2686, 2703), 'numpy.sum', 'np.sum', (['D'], {'axis': '(0)'}), '(D, axis=0)\n', (2692, 2703), True, 'import numpy as np\n'), ((4012, 4024), 'torch.eye', 'torch.eye', (['d'], {}), '(d)\n', (4021, 4024), False, 'import torch\n')] |
# Code adapted from https://github.com/liucong3/camelyon17
# and https://github.com/cv-lee/Camelyon17
import openslide
import cv2
import numpy as np
import pandas as pd
import os
import csv
import argparse
from tqdm import tqdm
from xml.etree.ElementTree import parse
from PIL import Image
PATCH_LEVEL = 2
MASK_LEVEL = 4
CENTER_SIZE = 32
def _read_xml(xml_path, mask_level):
"""
Read an XML file with annotations and return coordinates of tumor and normal areas
"""
xml = parse(xml_path).getroot()
tumor_coord_list = []
normal_coord_list = []
for annotation in xml.iter('Annotation'):
annotation_type = annotation.get('PartOfGroup')
assert annotation_type in ['metastases', 'normal', 'None']
if annotation_type == 'metastases':
coord_list = tumor_coord_list
elif annotation_type == 'normal':
coord_list = normal_coord_list
elif annotation_type == 'None':
continue
for region_idx, region in enumerate(annotation.iter('Coordinates')):
assert region_idx == 0
coords = []
for coord in region:
coords.append([round(float(coord.get('X'))/(2**mask_level)),
round(float(coord.get('Y'))/(2**mask_level))])
coord_list.append(coords)
return tumor_coord_list, normal_coord_list
def _make_masks(slide_path, xml_path, mask_level, make_map, **args):
'''
Return a slide with annotated tumor, normal, and tissue masks using an Otsu threshold
'''
print('_make_masks(%s)' % slide_path)
#slide loading
slide = openslide.OpenSlide(slide_path)
# xml loading
tumor_coord_list, normal_coord_list = _read_xml(xml_path, mask_level)
if make_map:
slide_map = np.array(slide.get_thumbnail(slide.level_dimensions[mask_level]))
# draw boundary of tumor in map
for coords in tumor_coord_list:
cv2.drawContours(slide_map, np.array([coords]), -1, 255, 1)
for coords in normal_coord_list:
cv2.drawContours(slide_map, np.array([coords]), -1, 127, 1)
else:
slide_map = None
# draw tumor mask
# first fill up tumors, then draw normal boundaries and fill those up with 0
tumor_mask = np.zeros(slide.level_dimensions[mask_level][::-1])
for coords in tumor_coord_list:
cv2.drawContours(tumor_mask, np.array([coords]), -1, 255, -1)
for coords in normal_coord_list:
cv2.drawContours(tumor_mask, np.array([coords]), -1, 0, -1)
# draw tissue mask
slide_lv = slide.read_region((0, 0), mask_level, slide.level_dimensions[mask_level])
slide_lv = cv2.cvtColor(np.array(slide_lv), cv2.COLOR_RGBA2RGB)
slide_lv = cv2.cvtColor(slide_lv, cv2.COLOR_BGR2HSV)
slide_lv = slide_lv[:, :, 1]
_, tissue_mask = cv2.threshold(slide_lv, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# check normal mask / draw normal mask
normal_mask = np.array(tissue_mask).copy()
normal_mask[tumor_mask > 127] = 0
return slide, slide_map, tumor_mask, tissue_mask, normal_mask
def _write_masks(mask_folder_path, slide_map, tumor_mask, tissue_mask, normal_mask, **args):
"""
Write masks out to disk; used for sanity checking and visualization.
"""
print('_write_masks')
os.makedirs(mask_folder_path, exist_ok=True)
map_path = os.path.join(mask_folder_path, 'map.png')
cv2.imwrite(map_path, slide_map)
tumor_mask_path = os.path.join(mask_folder_path, 'tumor_mask.png')
cv2.imwrite(tumor_mask_path, tumor_mask) # CHANGED
tissue_mask_path = os.path.join(mask_folder_path, 'tissue_mask.png')
cv2.imwrite(tissue_mask_path, np.array(tissue_mask))
normal_mask_path = os.path.join(mask_folder_path, 'normal_mask.png')
cv2.imwrite(normal_mask_path, normal_mask)
def _record_patches(center_size,
slide, slide_map, patch_level,
mask_level, tumor_mask, tissue_mask, normal_mask,
tumor_threshold,
normal_threshold,
**args):
"""
Extract all tumor and non-tumor patches from a slide, using the given masks.
"""
# Patch size is 3*center_size by 3*center_size
# It is in terms of pixels of the final output
# So it's measured with respect to patch_level
patch_size = center_size * 3
# Extract normal, tumor patches using normal, tumor mask
width, height = np.array(slide.level_dimensions[patch_level]) // center_size
total = width * height
all_cnt = 0
t_cnt = 0
n_cnt = 0
print('_record_patches(w=%d,h=%d)' % (width,height))
margin = 5 #3
mask_max = 255
assert mask_level >= patch_level
width_mask_step = center_size * slide.level_dimensions[mask_level][0] / slide.level_dimensions[patch_level][0]
height_mask_step = center_size * slide.level_dimensions[mask_level][1] / slide.level_dimensions[patch_level][1]
patch_list = []
# These mark the coordinates of the central region of the patch
for i in range(margin, width-margin):
for j in range(margin, height-margin):
mask_i_start = round(width_mask_step * i)
mask_i_end = round(width_mask_step * (i+1))
mask_j_start = round(height_mask_step * j)
mask_j_end = round(height_mask_step * (j+1))
# Compute masks only over central region
tumor_mask_avg = tumor_mask[
mask_j_start : mask_j_end,
mask_i_start : mask_i_end].mean()
normal_mask_avg = normal_mask[
mask_j_start : mask_j_end,
mask_i_start : mask_i_end].mean()
tumor_area_ratio = tumor_mask_avg / mask_max
normal_area_ratio = normal_mask_avg / mask_max
# Extract patch coordinates
# Coords correspond just to the center, not the entire patch
if (tumor_area_ratio > tumor_threshold):
patch_list.append((center_size*i, center_size*j, 1))
cv2.rectangle(
slide_map,
(mask_i_start, mask_j_start),
(mask_i_end, mask_j_end),
(0,0,255),
1)
elif (normal_area_ratio > normal_threshold):
patch_list.append((center_size*i, center_size*j, 0))
cv2.rectangle(
slide_map,
(mask_i_start, mask_j_start),
(mask_i_end, mask_j_end),
(255,255,0),
1)
df = pd.DataFrame(patch_list,
columns=[
'x_coord',
'y_coord',
'tumor'
])
return df
def generate_file(patient, node, xml_path, slide_path, folder_path):
args = {
'slide_path' : slide_path,
'xml_path': xml_path,
'patch_level' : PATCH_LEVEL,
'mask_level' : MASK_LEVEL,
'center_size' : CENTER_SIZE,
'tumor_threshold' : 0,
'normal_threshold' : 0.2,
'mask_folder_path' : folder_path,
'make_map' : True
}
args['slide'], args['slide_map'], args['tumor_mask'], args['tissue_mask'], args['normal_mask'] = _make_masks(**args)
df = _record_patches(**args)
df['patient'] = patient
df['node'] = node
_write_masks(**args)
return df
def generate_files(slide_root, output_root):
aggregate_df = pd.DataFrame(
columns=[
'patient',
'node',
'x_coord',
'y_coord',
'tumor'
])
for root, dirs, files in os.walk(os.path.join(slide_root, 'lesion_annotations')):
for file in files:
if file.endswith('.xml') and not file.startswith('._'):
prefix = file.split('.xml')[0]
try:
assert len(prefix.split('_')) == 4
df = generate_file(
patient=prefix.split('_')[1],
node=prefix.split('_')[3],
xml_path=os.path.join(root, file),
slide_path=os.path.join(slide_root, 'tif', f'{prefix}.tif'),
folder_path=os.path.join(output_root, 'masks', prefix))
aggregate_df = pd.concat([aggregate_df, df])
except openslide.OpenSlideError as err:
print(err)
continue
aggregate_df = aggregate_df.reset_index(drop=True)
aggregate_df.to_csv(os.path.join(output_root, 'all_patch_coords.csv'))
return aggregate_df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--slide_root', required=True)
parser.add_argument('--output_root', required=True)
args = parser.parse_args()
generate_files(
slide_root=args.slide_root,
output_root=args.output_root)
| [
"pandas.DataFrame",
"openslide.OpenSlide",
"xml.etree.ElementTree.parse",
"os.makedirs",
"argparse.ArgumentParser",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.threshold",
"numpy.zeros",
"numpy.array",
"cv2.rectangle",
"os.path.join",
"pandas.concat"
] | [((1690, 1721), 'openslide.OpenSlide', 'openslide.OpenSlide', (['slide_path'], {}), '(slide_path)\n', (1709, 1721), False, 'import openslide\n'), ((2355, 2405), 'numpy.zeros', 'np.zeros', (['slide.level_dimensions[mask_level][::-1]'], {}), '(slide.level_dimensions[mask_level][::-1])\n', (2363, 2405), True, 'import numpy as np\n'), ((2822, 2863), 'cv2.cvtColor', 'cv2.cvtColor', (['slide_lv', 'cv2.COLOR_BGR2HSV'], {}), '(slide_lv, cv2.COLOR_BGR2HSV)\n', (2834, 2863), False, 'import cv2\n'), ((2920, 2988), 'cv2.threshold', 'cv2.threshold', (['slide_lv', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(slide_lv, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2933, 2988), False, 'import cv2\n'), ((3411, 3455), 'os.makedirs', 'os.makedirs', (['mask_folder_path'], {'exist_ok': '(True)'}), '(mask_folder_path, exist_ok=True)\n', (3422, 3455), False, 'import os\n'), ((3472, 3513), 'os.path.join', 'os.path.join', (['mask_folder_path', '"""map.png"""'], {}), "(mask_folder_path, 'map.png')\n", (3484, 3513), False, 'import os\n'), ((3519, 3551), 'cv2.imwrite', 'cv2.imwrite', (['map_path', 'slide_map'], {}), '(map_path, slide_map)\n', (3530, 3551), False, 'import cv2\n'), ((3575, 3623), 'os.path.join', 'os.path.join', (['mask_folder_path', '"""tumor_mask.png"""'], {}), "(mask_folder_path, 'tumor_mask.png')\n", (3587, 3623), False, 'import os\n'), ((3629, 3669), 'cv2.imwrite', 'cv2.imwrite', (['tumor_mask_path', 'tumor_mask'], {}), '(tumor_mask_path, tumor_mask)\n', (3640, 3669), False, 'import cv2\n'), ((3704, 3753), 'os.path.join', 'os.path.join', (['mask_folder_path', '"""tissue_mask.png"""'], {}), "(mask_folder_path, 'tissue_mask.png')\n", (3716, 3753), False, 'import os\n'), ((3836, 3885), 'os.path.join', 'os.path.join', (['mask_folder_path', '"""normal_mask.png"""'], {}), "(mask_folder_path, 'normal_mask.png')\n", (3848, 3885), False, 'import os\n'), ((3891, 3933), 'cv2.imwrite', 'cv2.imwrite', (['normal_mask_path', 'normal_mask'], {}), '(normal_mask_path, normal_mask)\n', (3902, 3933), False, 'import cv2\n'), ((6791, 6856), 'pandas.DataFrame', 'pd.DataFrame', (['patch_list'], {'columns': "['x_coord', 'y_coord', 'tumor']"}), "(patch_list, columns=['x_coord', 'y_coord', 'tumor'])\n", (6803, 6856), True, 'import pandas as pd\n'), ((7663, 7735), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['patient', 'node', 'x_coord', 'y_coord', 'tumor']"}), "(columns=['patient', 'node', 'x_coord', 'y_coord', 'tumor'])\n", (7675, 7735), True, 'import pandas as pd\n'), ((8901, 8926), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8924, 8926), False, 'import argparse\n'), ((2766, 2784), 'numpy.array', 'np.array', (['slide_lv'], {}), '(slide_lv)\n', (2774, 2784), True, 'import numpy as np\n'), ((3789, 3810), 'numpy.array', 'np.array', (['tissue_mask'], {}), '(tissue_mask)\n', (3797, 3810), True, 'import numpy as np\n'), ((4599, 4644), 'numpy.array', 'np.array', (['slide.level_dimensions[patch_level]'], {}), '(slide.level_dimensions[patch_level])\n', (4607, 4644), True, 'import numpy as np\n'), ((7862, 7908), 'os.path.join', 'os.path.join', (['slide_root', '"""lesion_annotations"""'], {}), "(slide_root, 'lesion_annotations')\n", (7874, 7908), False, 'import os\n'), ((8779, 8828), 'os.path.join', 'os.path.join', (['output_root', '"""all_patch_coords.csv"""'], {}), "(output_root, 'all_patch_coords.csv')\n", (8791, 8828), False, 'import os\n'), ((517, 532), 'xml.etree.ElementTree.parse', 'parse', (['xml_path'], {}), '(xml_path)\n', (522, 532), False, 'from xml.etree.ElementTree import parse\n'), ((2481, 2499), 'numpy.array', 'np.array', (['[coords]'], {}), '([coords])\n', (2489, 2499), True, 'import numpy as np\n'), ((2590, 2608), 'numpy.array', 'np.array', (['[coords]'], {}), '([coords])\n', (2598, 2608), True, 'import numpy as np\n'), ((3052, 3073), 'numpy.array', 'np.array', (['tissue_mask'], {}), '(tissue_mask)\n', (3060, 3073), True, 'import numpy as np\n'), ((2046, 2064), 'numpy.array', 'np.array', (['[coords]'], {}), '([coords])\n', (2054, 2064), True, 'import numpy as np\n'), ((2161, 2179), 'numpy.array', 'np.array', (['[coords]'], {}), '([coords])\n', (2169, 2179), True, 'import numpy as np\n'), ((6228, 6328), 'cv2.rectangle', 'cv2.rectangle', (['slide_map', '(mask_i_start, mask_j_start)', '(mask_i_end, mask_j_end)', '(0, 0, 255)', '(1)'], {}), '(slide_map, (mask_i_start, mask_j_start), (mask_i_end,\n mask_j_end), (0, 0, 255), 1)\n', (6241, 6328), False, 'import cv2\n'), ((6576, 6678), 'cv2.rectangle', 'cv2.rectangle', (['slide_map', '(mask_i_start, mask_j_start)', '(mask_i_end, mask_j_end)', '(255, 255, 0)', '(1)'], {}), '(slide_map, (mask_i_start, mask_j_start), (mask_i_end,\n mask_j_end), (255, 255, 0), 1)\n', (6589, 6678), False, 'import cv2\n'), ((8545, 8574), 'pandas.concat', 'pd.concat', (['[aggregate_df, df]'], {}), '([aggregate_df, df])\n', (8554, 8574), True, 'import pandas as pd\n'), ((8316, 8340), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8328, 8340), False, 'import os\n'), ((8378, 8426), 'os.path.join', 'os.path.join', (['slide_root', '"""tif"""', 'f"""{prefix}.tif"""'], {}), "(slide_root, 'tif', f'{prefix}.tif')\n", (8390, 8426), False, 'import os\n'), ((8465, 8507), 'os.path.join', 'os.path.join', (['output_root', '"""masks"""', 'prefix'], {}), "(output_root, 'masks', prefix)\n", (8477, 8507), False, 'import os\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class OuterMetricModel(object):
def __init__(self, encoder_dim):
filters = 16
kernel_size = (5, 5)
pool_size = (3, 3)
self.conv_layers = [
tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size,
activation=tf.nn.tanh, name='conv1'),
tf.keras.layers.MaxPooling2D(pool_size=pool_size, name='pool1'),
tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size,
activation=tf.nn.tanh, name='conv2'),
tf.keras.layers.MaxPooling2D(pool_size=pool_size, name='pool2'),
tf.keras.layers.Flatten(name='flatten')
]
self.fc = tf.keras.layers.Dense(1, name='linear')
def forward(self, embed_a, embed_b):
x = tf.matmul(tf.expand_dims(embed_a, 2), tf.expand_dims(embed_b, 1))
x = tf.expand_dims(x, 3)
for layer in self.conv_layers:
x = layer.apply(x)
logits = self.fc.apply(x)
return tf.reshape(logits, [-1])
def _ortho_weight(shape, dtype=tf.float32):
W = np.random.normal(size=shape)
u, s, v = np.linalg.svd(W)
return tf.convert_to_tensor(u.astype('float32'), dtype)
class BilinearMetricModel(object):
def __init__(self, encoder_dim):
shape = (encoder_dim, encoder_dim)
self.weight = tf.Variable(initial_value=_ortho_weight(shape),
name="bilinear_kernel", shape=shape)
self.dropout = tf.keras.layers.Dropout(0.2)
def forward(self, embed_a, embed_b, training):
embed_a = self.dropout(embed_a, training=training)
embed_b = self.dropout(embed_b, training=training)
aw = tf.matmul(embed_a, self.weight) # B x e
logits = tf.reduce_sum(tf.multiply(aw, embed_b), axis=1)
return logits
class LinearMetricModel(object):
def __init__(self, encoder_dim, tie=False):
self.fc = tf.keras.layers.Dense(encoder_dim, name='linear',
activation=None)
self.fc2 = self.fc if tie else tf.keras.layers.Dense(
encoder_dim, name='linear2', activation=None)
self.dropout = tf.keras.layers.Dropout(0.2)
def forward(self, embed_a, embed_b, training):
a = self.fc(embed_a)
a = self.dropout(a, training=training)
b = self.fc2(embed_b)
b = self.dropout(b, training=training)
logits = tf.reduce_sum(tf.multiply(a, b), axis=1)
return tf.reshape(logits, [-1])
class DeepSetModel(object):
def __init__(self, set_dim):
self.upsample = tf.keras.layers.Conv1D(filters=128, kernel_size=3,
strides=1, name='upsample')
self.phi = tf.keras.models.Sequential([
tf.keras.layers.Dense(set_dim, name='phi1', activation=tf.nn.elu),
tf.keras.layers.Dense(set_dim, name='phi2', activation=None),
])
self.rho = tf.keras.models.Sequential([
tf.keras.layers.Dense(set_dim, name='rho1', activation=tf.nn.elu),
tf.keras.layers.Dense(1, name='rho2', activation=None),
])
self.dropout = tf.keras.layers.Dropout(0.2)
def forward(self, embed_a, embed_b, training):
x = tf.multiply(embed_a, embed_b)
x = tf.expand_dims(x, 2)
x = self.upsample(x)
x = self.dropout(x, training=training)
phi_x = self.phi(x)
phi_x = self.dropout(phi_x, training=training)
phi_x = tf.reduce_sum(phi_x, axis=1)
logits = self.rho(phi_x)
return tf.reshape(logits, [-1])
class RBFMetricModel(object):
def __init__(self, encoder_dim):
if encoder_dim > 0:
self.fc = tf.keras.layers.Dense(encoder_dim, name='linear',
use_bias=False)
else:
self.fc = lambda x: x
def forward(self, embed_a, embed_b):
a = self.fc(embed_a)
b = self.fc(embed_b)
logits = tf.reduce_sum(tf.multiply(a, b), axis=1)
return tf.reshape(logits, [-1])
class CovMetricModel(object):
def __init__(self, encoder_dim):
self.fc = tf.keras.layers.Dense(encoder_dim // 2, name='linear',
use_bias=False)
self.encoder_dim = encoder_dim // 2
def forward(self, embed_a, embed_b):
a = self.fc.apply(embed_a)
b = self.fc.apply(embed_b)
covar = tf.matmul(
tf.expand_dims(a - tf.reduce_mean(a, axis=1, keepdims=True), 2),
tf.expand_dims(b - tf.reduce_mean(b, axis=1, keepdims=True), 1))
covar /= tf.constant(self.encoder_dim, dtype=tf.float32)
logits = tf.norm(covar, ord='fro', axis=[-2, -1])
return logits
class DistanceMetricModel(object):
def __init__(self, encoder_dim):
self.fc = tf.keras.layers.Dense(encoder_dim // 2, name='linear')
def forward(self, embed_a, embed_b):
a = self.fc.apply(embed_a)
b = self.fc.apply(embed_b)
logits = - tf.norm(a - b, axis=1)
return tf.reshape(logits, [-1])
| [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.matmul",
"te... | [((1735, 1763), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'shape'}), '(size=shape)\n', (1751, 1763), True, 'import numpy as np\n'), ((1776, 1792), 'numpy.linalg.svd', 'np.linalg.svd', (['W'], {}), '(W)\n', (1789, 1792), True, 'import numpy as np\n'), ((1372, 1411), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'name': '"""linear"""'}), "(1, name='linear')\n", (1393, 1411), True, 'import tensorflow as tf\n'), ((1534, 1554), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(3)'], {}), '(x, 3)\n', (1548, 1554), True, 'import tensorflow as tf\n'), ((1658, 1682), 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1]'], {}), '(logits, [-1])\n', (1668, 1682), True, 'import tensorflow as tf\n'), ((2114, 2142), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2137, 2142), True, 'import tensorflow as tf\n'), ((2312, 2343), 'tensorflow.matmul', 'tf.matmul', (['embed_a', 'self.weight'], {}), '(embed_a, self.weight)\n', (2321, 2343), True, 'import tensorflow as tf\n'), ((2527, 2593), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['encoder_dim'], {'name': '"""linear"""', 'activation': 'None'}), "(encoder_dim, name='linear', activation=None)\n", (2548, 2593), True, 'import tensorflow as tf\n'), ((2762, 2790), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2785, 2790), True, 'import tensorflow as tf\n'), ((3045, 3069), 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1]'], {}), '(logits, [-1])\n', (3055, 3069), True, 'import tensorflow as tf\n'), ((3151, 3229), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(1)', 'name': '"""upsample"""'}), "(filters=128, kernel_size=3, strides=1, name='upsample')\n", (3173, 3229), True, 'import tensorflow as tf\n'), ((3672, 3700), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (3695, 3700), True, 'import tensorflow as tf\n'), ((3759, 3788), 'tensorflow.multiply', 'tf.multiply', (['embed_a', 'embed_b'], {}), '(embed_a, embed_b)\n', (3770, 3788), True, 'import tensorflow as tf\n'), ((3797, 3817), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(2)'], {}), '(x, 2)\n', (3811, 3817), True, 'import tensorflow as tf\n'), ((3975, 4003), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['phi_x'], {'axis': '(1)'}), '(phi_x, axis=1)\n', (3988, 4003), True, 'import tensorflow as tf\n'), ((4044, 4068), 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1]'], {}), '(logits, [-1])\n', (4054, 4068), True, 'import tensorflow as tf\n'), ((4473, 4497), 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1]'], {}), '(logits, [-1])\n', (4483, 4497), True, 'import tensorflow as tf\n'), ((4579, 4649), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(encoder_dim // 2)'], {'name': '"""linear"""', 'use_bias': '(False)'}), "(encoder_dim // 2, name='linear', use_bias=False)\n", (4600, 4649), True, 'import tensorflow as tf\n'), ((5008, 5055), 'tensorflow.constant', 'tf.constant', (['self.encoder_dim'], {'dtype': 'tf.float32'}), '(self.encoder_dim, dtype=tf.float32)\n', (5019, 5055), True, 'import tensorflow as tf\n'), ((5069, 5109), 'tensorflow.norm', 'tf.norm', (['covar'], {'ord': '"""fro"""', 'axis': '[-2, -1]'}), "(covar, ord='fro', axis=[-2, -1])\n", (5076, 5109), True, 'import tensorflow as tf\n'), ((5214, 5268), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(encoder_dim // 2)'], {'name': '"""linear"""'}), "(encoder_dim // 2, name='linear')\n", (5235, 5268), True, 'import tensorflow as tf\n'), ((5420, 5444), 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1]'], {}), '(logits, [-1])\n', (5430, 5444), True, 'import tensorflow as tf\n'), ((894, 1000), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'activation': 'tf.nn.tanh', 'name': '"""conv1"""'}), "(filters=filters, kernel_size=kernel_size, activation\n =tf.nn.tanh, name='conv1')\n", (916, 1000), True, 'import tensorflow as tf\n'), ((1032, 1095), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': 'pool_size', 'name': '"""pool1"""'}), "(pool_size=pool_size, name='pool1')\n", (1060, 1095), True, 'import tensorflow as tf\n'), ((1103, 1209), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'activation': 'tf.nn.tanh', 'name': '"""conv2"""'}), "(filters=filters, kernel_size=kernel_size, activation\n =tf.nn.tanh, name='conv2')\n", (1125, 1209), True, 'import tensorflow as tf\n'), ((1241, 1304), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': 'pool_size', 'name': '"""pool2"""'}), "(pool_size=pool_size, name='pool2')\n", (1269, 1304), True, 'import tensorflow as tf\n'), ((1312, 1351), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (1335, 1351), True, 'import tensorflow as tf\n'), ((1470, 1496), 'tensorflow.expand_dims', 'tf.expand_dims', (['embed_a', '(2)'], {}), '(embed_a, 2)\n', (1484, 1496), True, 'import tensorflow as tf\n'), ((1498, 1524), 'tensorflow.expand_dims', 'tf.expand_dims', (['embed_b', '(1)'], {}), '(embed_b, 1)\n', (1512, 1524), True, 'import tensorflow as tf\n'), ((2380, 2404), 'tensorflow.multiply', 'tf.multiply', (['aw', 'embed_b'], {}), '(aw, embed_b)\n', (2391, 2404), True, 'import tensorflow as tf\n'), ((2665, 2732), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['encoder_dim'], {'name': '"""linear2"""', 'activation': 'None'}), "(encoder_dim, name='linear2', activation=None)\n", (2686, 2732), True, 'import tensorflow as tf\n'), ((3007, 3024), 'tensorflow.multiply', 'tf.multiply', (['a', 'b'], {}), '(a, b)\n', (3018, 3024), True, 'import tensorflow as tf\n'), ((4176, 4241), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['encoder_dim'], {'name': '"""linear"""', 'use_bias': '(False)'}), "(encoder_dim, name='linear', use_bias=False)\n", (4197, 4241), True, 'import tensorflow as tf\n'), ((4435, 4452), 'tensorflow.multiply', 'tf.multiply', (['a', 'b'], {}), '(a, b)\n', (4446, 4452), True, 'import tensorflow as tf\n'), ((5386, 5408), 'tensorflow.norm', 'tf.norm', (['(a - b)'], {'axis': '(1)'}), '(a - b, axis=1)\n', (5393, 5408), True, 'import tensorflow as tf\n'), ((3324, 3389), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['set_dim'], {'name': '"""phi1"""', 'activation': 'tf.nn.elu'}), "(set_dim, name='phi1', activation=tf.nn.elu)\n", (3345, 3389), True, 'import tensorflow as tf\n'), ((3397, 3457), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['set_dim'], {'name': '"""phi2"""', 'activation': 'None'}), "(set_dim, name='phi2', activation=None)\n", (3418, 3457), True, 'import tensorflow as tf\n'), ((3517, 3582), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['set_dim'], {'name': '"""rho1"""', 'activation': 'tf.nn.elu'}), "(set_dim, name='rho1', activation=tf.nn.elu)\n", (3538, 3582), True, 'import tensorflow as tf\n'), ((3590, 3644), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'name': '"""rho2"""', 'activation': 'None'}), "(1, name='rho2', activation=None)\n", (3611, 3644), True, 'import tensorflow as tf\n'), ((4877, 4917), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['a'], {'axis': '(1)', 'keepdims': '(True)'}), '(a, axis=1, keepdims=True)\n', (4891, 4917), True, 'import tensorflow as tf\n'), ((4948, 4988), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['b'], {'axis': '(1)', 'keepdims': '(True)'}), '(b, axis=1, keepdims=True)\n', (4962, 4988), True, 'import tensorflow as tf\n')] |
"""
Author: <NAME>
Date: 21.04.2020
Brief: Script to test the optimal convex data sampling alogrithm
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
import tensorflow as tf
from src.networks.configmodel import init_neural_closure
from src import utils
from src import math
def quad_func(x):
return x * x
def quad_func_grad(x):
return 2 * x
def exp_func(x):
return np.exp(x) # / np.exp(3)
def get_errors(s_x, function, grad, tol):
eL = []
tL = []
for i in range(len(s_x) - 1):
t1 = 1 / (grad(s_x[i + 1]) - grad(s_x[i])) * (function(s_x[i]) - s_x[i] * grad(s_x[i]) - (
function(s_x[i + 1]) - s_x[i + 1] * grad(s_x[i + 1])))
e1 = function(t1) - (function(s_x[i]) + (t1 - s_x[i]) * grad(s_x[i]))
# error in case that the approximation overshoots
tmp1 = function(s_x[i + 1])
tmp2 = function(s_x[i])
grad_intermediate = (tmp1 - tmp2) / (s_x[i + 1] - s_x[i])
def F(x):
return 0.5 * (grad_intermediate - grad(x)) * (grad_intermediate - grad(x))
result = scipy.optimize.minimize_scalar(method="bounded", fun=F, bounds=[s_x[i], s_x[i + 1]])
if not result.success:
ValueError("Optimization unsuccessful!")
print(grad_intermediate)
print((s_x[i + 1] - s_x[i]) / 2)
print("----")
t2 = float(result.x)
e2 = function(s_x[i]) + grad_intermediate * (t2 - s_x[i]) - function(t2)
if e1 > e2:
eL.append(e1)
tL.append(t1)
else:
eL.append(e2)
tL.append(t2)
return [eL, tL]
def sample_data_entropy_M1(u, alpha, h, tol):
"""
performs the smart sampling algorithm on the entropy closure problem
"""
entropy_tools = math.EntropyTools(1) # Tools for reconstruction
e_max = tol
j = 0
while e_max >= tol:
e_max = 0
i = 0
# [eL, tL] = get_errors(s_x, function, grad, 0.0001)
# plt.semilogy(tL, eL, '*')
# plt.savefig("figures/errors_" + str(j).zfill(3) + ".png")
# plt.clf()
while i < len(u) - 1:
'''
# error, in case that the approximation undershoots
t1 = 1 / (grad(s_x[i + 1]) - grad(s_x[i])) * (function(s_x[i]) - s_x[i] * grad(s_x[i]) - (
function(s_x[i + 1]) - s_x[i + 1] * grad(s_x[i + 1])))
e1 = function(t1) - (function(s_x[i]) + (t1 - s_x[i]) * grad(s_x[i]))
'''
# error in case that the approximation overshoots
test = h[i + 1] - h[i]
test2 = u[i + 1][1]
test3 = u[i + 1][1] - u[i][1]
alpha_o_1 = tf.reshape(((h[i + 1] - h[i]) / (u[i + 1][1] - u[i][1])), (1, 1)) # reshape
alpha_o = entropy_tools.reconstruct_alpha(alpha_o_1)
u_o = entropy_tools.reconstruct_u(alpha_o)
h_o = entropy_tools.compute_h(u_o, alpha_o)
e_o = h[i] + alpha_o_1 * (u_o[0, 1] - u[i][1]) - h_o
# error in case that the approximation undershoots
u_u = 1 / (alpha[i + 1][1] - alpha[i][1]) * (
h[i] - u[i][1] * alpha[i][1] - (h[i + 1] - u[i + 1][1] * alpha[i + 1][1]))
# compute starting point
alpha_1_start = tf.reshape((alpha[i + 1][1] + alpha[i][1]) / 2, shape=(1, 1))
alpha_start = entropy_tools.reconstruct_alpha(alpha_1_start)
u_0 = tf.constant([1.0], dtype=tf.float32)
u_sol = tf.reshape(tf.concat([u_0, u_u], axis=0), shape=(1, 2))
"""
alpha_t = entropy_tools.reconstruct_alpha(tf.constant([0.01], shape=(1, 1)))
u_sol2 = entropy_tools.reconstruct_u(alpha_t)
"""
alpha_u = entropy_tools.minimize_entropy(u_sol, alpha_start)
# make realizable
u_u = entropy_tools.reconstruct_u(alpha_u)
h_u = entropy_tools.compute_h(u_u, alpha_u)
e_u = h_u - (h[i] + (u_u[0, 1] - u[i][1]) * alpha[i][1])
if e_u > e_o:
u_new = tf.reshape(u_u, shape=(2,))
alpha_new = tf.reshape(alpha_u, shape=(2,))
h_new = tf.reshape(h_u, shape=(1,))
e = e_u
# print("undershoot worse")
else:
u_new = tf.reshape(u_o, shape=(2,))
alpha_new = tf.reshape(alpha_o, shape=(2,))
h_new = tf.reshape(h_o, shape=(1,))
e = e_o
# print("overshoot worse")
if e > e_max:
e_max = e
if e > tol:
u.insert(i + 1, u_new)
alpha.insert(i + 1, alpha_new)
h.insert(i + 1, h_new)
i = i + 1
i = i + 1
j = j + 1
# convert to tensorf with first dim = len(u)
u_tensor = tf.convert_to_tensor(u)
alpha_tensor = tf.convert_to_tensor(alpha)
h_tensor = tf.convert_to_tensor(h)
return [u_tensor, alpha_tensor, h_tensor]
def main():
# x = sample_data(-10, 10, 0.0001, quad_func, quad_func_grad)
### Create alpha sampled values
batchSize = 13
N = 1
entropy_tools = math.EntropyTools(N)
alpha_1 = np.linspace(-50, 50, batchSize)
alpha_1 = alpha_1.reshape((batchSize, N))
alpha_1 = entropy_tools.convert_to_tensor_float(alpha_1)
alpha = entropy_tools.reconstruct_alpha(alpha_1)
u = entropy_tools.reconstruct_u(alpha)
h = entropy_tools.compute_h(u, alpha)
# utils.plot1D([u[:, 1]], [alpha[:, 1], h], ['alpha', 'h'], 'sanity_check', log=False, folder_name="figures")
### Create u sampled values
u_0 = np.ones((13, 1))
u_1 = np.reshape(np.linspace(-0.98, 0.98, 13), (13, 1))
u_ = np.concatenate((u_0, u_1), axis=1)
u_in = tf.constant(u_, shape=(13, 2))
alpha_ = []
h_ = []
u_ = []
for i in range(13):
alpha_curr = entropy_tools.minimize_entropy(tf.reshape(u_in[i], shape=(1, 2)),
tf.reshape(u_in[i], shape=(1, 2)))
u_curr = entropy_tools.reconstruct_u(alpha_curr)
h_curr = entropy_tools.compute_h(u_curr, alpha_curr)
u_.append(u_curr[0])
h_.append(h_curr[0])
alpha_.append(alpha_curr[0])
u_ = tf.stack(u_)
h_ = tf.stack(h_)
alpha_ = tf.stack(alpha_)
# utils.plot1D([u_[:, 1]], [alpha_[:, 1], h_], ['alpha', 'h'], 'sanity_check2', log=False, folder_name="figures")
### Create smart sampled values
tolerance = 0.1
alpha_1 = entropy_tools.convert_to_tensor_float(np.asarray([-50, 50]).reshape((2, N)))
alpha_ini = entropy_tools.reconstruct_alpha(alpha_1)
u_ini = entropy_tools.reconstruct_u(alpha_ini)
h_ini = entropy_tools.compute_h(u_ini, alpha_ini)
alpha_list = [alpha_ini[0, :], alpha_ini[1, :]]
u_list = [u_ini[0, :], u_ini[1, :]]
h_list = [h_ini[0, :], h_ini[1, :]]
[u_train, alpha_train, h_train] = sample_data_entropy_M1(u_list, alpha_list, h_list, tolerance)
utils.plot_1d(xs=[u_train[:, 1]], ys=[alpha_train[:, 1], h_train], labels=['alpha', 'h'], linetypes=['+', '*'],
name='smart_sampled_entropy', log=False, folder_name="figures")
utils.plot_1d(xs=[u_train[:, 1], u[:, 1], u_[:, 1]], ys=[alpha_train[:, 1], alpha[:, 1], alpha_[:, 1]],
labels=['alpha_smart', 'alpha_alpha_sampled', 'alpha_u_sampled'],
linetypes=['*', '2', '3'],
name='alha_sampling_strategies', log=False, folder_name="figures")
utils.plot_1d(xs=[u_train[:, 1], u[:, 1], u_[:, 1]], ys=[h_train, h, h_],
labels=['h_smart', 'h_alpha_sampled', 'h_u_sampled'],
linetypes=['*', '2', '3'], name='h_sampling_strategies', log=False, folder_name="figures",
show_fig=False)
### Compare networks with different samplings
model_smart = init_neural_closure(network_mk=11, poly_degree=1, spatial_dim=1, folder_name="testFolder",
nw_width=15, nw_depth=5, normalized=True, loss_combination=1)
model_u = init_neural_closure(network_mk=11, poly_degree=1, spatial_dim=1, folder_name="testFolder",
nw_width=15, nw_depth=5, normalized=True, loss_combination=1)
model_alpha = init_neural_closure(network_mk=11, poly_degree=1, spatial_dim=1, folder_name="testFolder",
nw_width=15, nw_depth=5, normalized=True, loss_combination=1)
mc_best_smart = tf.keras.callbacks.ModelCheckpoint('model_smart/best_model.h5', monitor='loss', mode='min',
save_best_only=True, verbose=0)
csv_logger_smart = tf.keras.callbacks.CSVLogger('model_smart/history.csv')
mc_best_uniform = tf.keras.callbacks.ModelCheckpoint('model_u/best_model.h5', monitor='loss', mode='min',
save_best_only=True, verbose=0)
csv_logger_uniform = tf.keras.callbacks.CSVLogger('model_u/history.csv')
mc_best_alpha = tf.keras.callbacks.ModelCheckpoint('model_alpha/best_model.h5', monitor='loss', mode='min',
save_best_only=True, verbose=0)
csv_logger_alpha = tf.keras.callbacks.CSVLogger('model_alpha/history.csv')
# initialize both models with the same weight
model_smart.model.load_weights('best_model.h5')
model_u.model.load_weights('best_model.h5')
model_alpha.model.load_weights('best_model.h5')
# some params
epochs = 200000
batch = 16
print("training starts")
# model_smart.model.fit(x=u_train[:, 1], y=[h_train, alpha_train[:, 1]], validation_split=0.0, epochs=epochs,
# batch_size=batch, verbose=0, callbacks=[mc_best_smart, csv_logger_smart])
print("trained smart model")
# model_u.model.fit(x=u[:, 1], y=[h, alpha[:, 1]], validation_split=0.0, epochs=epochs, batch_size=batch,
# verbose=0, callbacks=[mc_best_uniform, csv_logger_uniform])
print("trained u model")
model_alpha.model.fit(x=u_[:, 1], y=[h_, alpha_[:, 1]], validation_split=0.0, epochs=epochs, batch_size=batch,
verbose=0, callbacks=[mc_best_alpha, csv_logger_alpha])
print("trained alpha model")
return 0
def pointwiseDiff(trueSamples, predSamples):
"""
brief: computes the squared 2-norm for each sample point
input: trueSamples, dim = (ns,N)
predSamples, dim = (ns,N)
returns: mse(trueSamples-predSamples) dim = (ns,)
"""
err = []
for i in range(trueSamples.shape[0]):
err.append(np.abs(trueSamples[i] - predSamples[i]))
loss_val = np.asarray(err)
return loss_val
if __name__ == '__main__':
main()
| [
"src.networks.configmodel.init_neural_closure",
"numpy.abs",
"tensorflow.convert_to_tensor",
"numpy.asarray",
"tensorflow.reshape",
"numpy.ones",
"tensorflow.constant",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.stack",
"tensorflow.keras.callbacks.CSVLogger",
"tensorflow.concat",
... | [((413, 422), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (419, 422), True, 'import numpy as np\n'), ((1810, 1830), 'src.math.EntropyTools', 'math.EntropyTools', (['(1)'], {}), '(1)\n', (1827, 1830), False, 'from src import math\n'), ((4882, 4905), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['u'], {}), '(u)\n', (4902, 4905), True, 'import tensorflow as tf\n'), ((4925, 4952), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['alpha'], {}), '(alpha)\n', (4945, 4952), True, 'import tensorflow as tf\n'), ((4968, 4991), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['h'], {}), '(h)\n', (4988, 4991), True, 'import tensorflow as tf\n'), ((5204, 5224), 'src.math.EntropyTools', 'math.EntropyTools', (['N'], {}), '(N)\n', (5221, 5224), False, 'from src import math\n'), ((5240, 5271), 'numpy.linspace', 'np.linspace', (['(-50)', '(50)', 'batchSize'], {}), '(-50, 50, batchSize)\n', (5251, 5271), True, 'import numpy as np\n'), ((5675, 5691), 'numpy.ones', 'np.ones', (['(13, 1)'], {}), '((13, 1))\n', (5682, 5691), True, 'import numpy as np\n'), ((5761, 5795), 'numpy.concatenate', 'np.concatenate', (['(u_0, u_1)'], {'axis': '(1)'}), '((u_0, u_1), axis=1)\n', (5775, 5795), True, 'import numpy as np\n'), ((5807, 5837), 'tensorflow.constant', 'tf.constant', (['u_'], {'shape': '(13, 2)'}), '(u_, shape=(13, 2))\n', (5818, 5837), True, 'import tensorflow as tf\n'), ((6299, 6311), 'tensorflow.stack', 'tf.stack', (['u_'], {}), '(u_)\n', (6307, 6311), True, 'import tensorflow as tf\n'), ((6321, 6333), 'tensorflow.stack', 'tf.stack', (['h_'], {}), '(h_)\n', (6329, 6333), True, 'import tensorflow as tf\n'), ((6347, 6363), 'tensorflow.stack', 'tf.stack', (['alpha_'], {}), '(alpha_)\n', (6355, 6363), True, 'import tensorflow as tf\n'), ((7031, 7216), 'src.utils.plot_1d', 'utils.plot_1d', ([], {'xs': '[u_train[:, 1]]', 'ys': '[alpha_train[:, 1], h_train]', 'labels': "['alpha', 'h']", 'linetypes': "['+', '*']", 'name': '"""smart_sampled_entropy"""', 'log': '(False)', 'folder_name': '"""figures"""'}), "(xs=[u_train[:, 1]], ys=[alpha_train[:, 1], h_train], labels=[\n 'alpha', 'h'], linetypes=['+', '*'], name='smart_sampled_entropy', log=\n False, folder_name='figures')\n", (7044, 7216), False, 'from src import utils\n'), ((7230, 7505), 'src.utils.plot_1d', 'utils.plot_1d', ([], {'xs': '[u_train[:, 1], u[:, 1], u_[:, 1]]', 'ys': '[alpha_train[:, 1], alpha[:, 1], alpha_[:, 1]]', 'labels': "['alpha_smart', 'alpha_alpha_sampled', 'alpha_u_sampled']", 'linetypes': "['*', '2', '3']", 'name': '"""alha_sampling_strategies"""', 'log': '(False)', 'folder_name': '"""figures"""'}), "(xs=[u_train[:, 1], u[:, 1], u_[:, 1]], ys=[alpha_train[:, 1],\n alpha[:, 1], alpha_[:, 1]], labels=['alpha_smart',\n 'alpha_alpha_sampled', 'alpha_u_sampled'], linetypes=['*', '2', '3'],\n name='alha_sampling_strategies', log=False, folder_name='figures')\n", (7243, 7505), False, 'from src import utils\n'), ((7552, 7799), 'src.utils.plot_1d', 'utils.plot_1d', ([], {'xs': '[u_train[:, 1], u[:, 1], u_[:, 1]]', 'ys': '[h_train, h, h_]', 'labels': "['h_smart', 'h_alpha_sampled', 'h_u_sampled']", 'linetypes': "['*', '2', '3']", 'name': '"""h_sampling_strategies"""', 'log': '(False)', 'folder_name': '"""figures"""', 'show_fig': '(False)'}), "(xs=[u_train[:, 1], u[:, 1], u_[:, 1]], ys=[h_train, h, h_],\n labels=['h_smart', 'h_alpha_sampled', 'h_u_sampled'], linetypes=['*',\n '2', '3'], name='h_sampling_strategies', log=False, folder_name=\n 'figures', show_fig=False)\n", (7565, 7799), False, 'from src import utils\n'), ((7911, 8071), 'src.networks.configmodel.init_neural_closure', 'init_neural_closure', ([], {'network_mk': '(11)', 'poly_degree': '(1)', 'spatial_dim': '(1)', 'folder_name': '"""testFolder"""', 'nw_width': '(15)', 'nw_depth': '(5)', 'normalized': '(True)', 'loss_combination': '(1)'}), "(network_mk=11, poly_degree=1, spatial_dim=1,\n folder_name='testFolder', nw_width=15, nw_depth=5, normalized=True,\n loss_combination=1)\n", (7930, 8071), False, 'from src.networks.configmodel import init_neural_closure\n'), ((8116, 8276), 'src.networks.configmodel.init_neural_closure', 'init_neural_closure', ([], {'network_mk': '(11)', 'poly_degree': '(1)', 'spatial_dim': '(1)', 'folder_name': '"""testFolder"""', 'nw_width': '(15)', 'nw_depth': '(5)', 'normalized': '(True)', 'loss_combination': '(1)'}), "(network_mk=11, poly_degree=1, spatial_dim=1,\n folder_name='testFolder', nw_width=15, nw_depth=5, normalized=True,\n loss_combination=1)\n", (8135, 8276), False, 'from src.networks.configmodel import init_neural_closure\n'), ((8321, 8481), 'src.networks.configmodel.init_neural_closure', 'init_neural_closure', ([], {'network_mk': '(11)', 'poly_degree': '(1)', 'spatial_dim': '(1)', 'folder_name': '"""testFolder"""', 'nw_width': '(15)', 'nw_depth': '(5)', 'normalized': '(True)', 'loss_combination': '(1)'}), "(network_mk=11, poly_degree=1, spatial_dim=1,\n folder_name='testFolder', nw_width=15, nw_depth=5, normalized=True,\n loss_combination=1)\n", (8340, 8481), False, 'from src.networks.configmodel import init_neural_closure\n'), ((8533, 8661), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['"""model_smart/best_model.h5"""'], {'monitor': '"""loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'verbose': '(0)'}), "('model_smart/best_model.h5', monitor=\n 'loss', mode='min', save_best_only=True, verbose=0)\n", (8567, 8661), True, 'import tensorflow as tf\n'), ((8735, 8790), 'tensorflow.keras.callbacks.CSVLogger', 'tf.keras.callbacks.CSVLogger', (['"""model_smart/history.csv"""'], {}), "('model_smart/history.csv')\n", (8763, 8790), True, 'import tensorflow as tf\n'), ((8813, 8936), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['"""model_u/best_model.h5"""'], {'monitor': '"""loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'verbose': '(0)'}), "('model_u/best_model.h5', monitor='loss',\n mode='min', save_best_only=True, verbose=0)\n", (8847, 8936), True, 'import tensorflow as tf\n'), ((9015, 9066), 'tensorflow.keras.callbacks.CSVLogger', 'tf.keras.callbacks.CSVLogger', (['"""model_u/history.csv"""'], {}), "('model_u/history.csv')\n", (9043, 9066), True, 'import tensorflow as tf\n'), ((9087, 9215), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['"""model_alpha/best_model.h5"""'], {'monitor': '"""loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'verbose': '(0)'}), "('model_alpha/best_model.h5', monitor=\n 'loss', mode='min', save_best_only=True, verbose=0)\n", (9121, 9215), True, 'import tensorflow as tf\n'), ((9289, 9344), 'tensorflow.keras.callbacks.CSVLogger', 'tf.keras.callbacks.CSVLogger', (['"""model_alpha/history.csv"""'], {}), "('model_alpha/history.csv')\n", (9317, 9344), True, 'import tensorflow as tf\n'), ((10728, 10743), 'numpy.asarray', 'np.asarray', (['err'], {}), '(err)\n', (10738, 10743), True, 'import numpy as np\n'), ((5713, 5741), 'numpy.linspace', 'np.linspace', (['(-0.98)', '(0.98)', '(13)'], {}), '(-0.98, 0.98, 13)\n', (5724, 5741), True, 'import numpy as np\n'), ((2713, 2776), 'tensorflow.reshape', 'tf.reshape', (['((h[i + 1] - h[i]) / (u[i + 1][1] - u[i][1]))', '(1, 1)'], {}), '((h[i + 1] - h[i]) / (u[i + 1][1] - u[i][1]), (1, 1))\n', (2723, 2776), True, 'import tensorflow as tf\n'), ((3313, 3374), 'tensorflow.reshape', 'tf.reshape', (['((alpha[i + 1][1] + alpha[i][1]) / 2)'], {'shape': '(1, 1)'}), '((alpha[i + 1][1] + alpha[i][1]) / 2, shape=(1, 1))\n', (3323, 3374), True, 'import tensorflow as tf\n'), ((3466, 3502), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float32'}), '([1.0], dtype=tf.float32)\n', (3477, 3502), True, 'import tensorflow as tf\n'), ((5954, 5987), 'tensorflow.reshape', 'tf.reshape', (['u_in[i]'], {'shape': '(1, 2)'}), '(u_in[i], shape=(1, 2))\n', (5964, 5987), True, 'import tensorflow as tf\n'), ((6041, 6074), 'tensorflow.reshape', 'tf.reshape', (['u_in[i]'], {'shape': '(1, 2)'}), '(u_in[i], shape=(1, 2))\n', (6051, 6074), True, 'import tensorflow as tf\n'), ((10672, 10711), 'numpy.abs', 'np.abs', (['(trueSamples[i] - predSamples[i])'], {}), '(trueSamples[i] - predSamples[i])\n', (10678, 10711), True, 'import numpy as np\n'), ((3534, 3563), 'tensorflow.concat', 'tf.concat', (['[u_0, u_u]'], {'axis': '(0)'}), '([u_0, u_u], axis=0)\n', (3543, 3563), True, 'import tensorflow as tf\n'), ((4092, 4119), 'tensorflow.reshape', 'tf.reshape', (['u_u'], {'shape': '(2,)'}), '(u_u, shape=(2,))\n', (4102, 4119), True, 'import tensorflow as tf\n'), ((4148, 4179), 'tensorflow.reshape', 'tf.reshape', (['alpha_u'], {'shape': '(2,)'}), '(alpha_u, shape=(2,))\n', (4158, 4179), True, 'import tensorflow as tf\n'), ((4204, 4231), 'tensorflow.reshape', 'tf.reshape', (['h_u'], {'shape': '(1,)'}), '(h_u, shape=(1,))\n', (4214, 4231), True, 'import tensorflow as tf\n'), ((4342, 4369), 'tensorflow.reshape', 'tf.reshape', (['u_o'], {'shape': '(2,)'}), '(u_o, shape=(2,))\n', (4352, 4369), True, 'import tensorflow as tf\n'), ((4398, 4429), 'tensorflow.reshape', 'tf.reshape', (['alpha_o'], {'shape': '(2,)'}), '(alpha_o, shape=(2,))\n', (4408, 4429), True, 'import tensorflow as tf\n'), ((4454, 4481), 'tensorflow.reshape', 'tf.reshape', (['h_o'], {'shape': '(1,)'}), '(h_o, shape=(1,))\n', (4464, 4481), True, 'import tensorflow as tf\n'), ((6593, 6614), 'numpy.asarray', 'np.asarray', (['[-50, 50]'], {}), '([-50, 50])\n', (6603, 6614), True, 'import numpy as np\n')] |
import xml.etree.ElementTree as ElementTree
import tensorflow as tf
import numpy as np
class Force:
def get_weighting(self):
return 1
class HarmonicBondForce(Force):
def __init__(self, atom1, atom2, length, k):
self.atom1 = atom1
self.atom2 = atom2
self.length = tf.constant(length)
self.k = tf.constant(k)
def get_weighting(self):
return self.k
def __call__(self):
return HarmonicBondForce._call(self.atom1.pos, self.atom2.pos, self.length, self.k)
@tf.function
def _call(pos1, pos2, length, k):
return k * (tf.norm(pos1 - pos2) - length) ** 2
def __repr__(self):
return f'HarmonicBondForce(between {self.atom1.name} and {self.atom2.name}, length is {self.length})'
class HarmonicAngleForce(Force):
def __init__(self, atom1, atom2, atom3, angle, k):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
# use angle - pi as the actual target angle such that angle=0 is straight and angle=pi is right angle
self.angle = tf.constant(angle) - np.pi
self.angle *= np.sign(self.angle)
self.k = tf.constant(k)
def get_weighting(self):
return self.k
def __call__(self):
return HarmonicAngleForce._call(self.atom1.pos, self.atom2.pos, self.atom3.pos, self.angle, self.k)
@tf.function
def _call(pos1, pos2, pos3, angle, k):
side1 = pos1 - pos2
side2 = pos3 - pos2
cosine_angle = tf.tensordot(side1, side2, 1) / (tf.norm(side1) * tf.norm(side2))
if cosine_angle >= 1:
# acos(x) is not defined for x>1 and gradient is -inf for x=1 (returning 0 here destroys the gradient for this force)
return tf.constant(0, dtype=tf.float32)
ang = tf.math.acos(cosine_angle)
return k * (ang - angle) ** 2
def __repr__(self):
return f'HarmonicAngleForce(between {self.atom1.name}, {self.atom2.name} and {self.atom3.name}, angle is {self.angle})'
class NonBondedForce(Force):
def __init__(self, atom1, atom2):
self.atom1 = atom1
self.atom2 = atom2
self.epsilon = tf.constant(4 * tf.math.sqrt(self.atom1.epsilon * self.atom2.epsilon), dtype=tf.float32)
sigma = (self.atom1.sigma + self.atom2.sigma) / 2
self.sigma6 = tf.constant(sigma ** 6, dtype=tf.float32)
self.sigma12 = tf.constant(sigma ** 12, dtype=tf.float32)
self.charge = tf.constant(self.atom1.charge * self.atom2.charge)
def get_weighting(self):
return self.epsilon
def __call__(self):
return NonBondedForce._call(self.atom1.pos, self.atom2.pos, self.epsilon, self.sigma6, self.sigma12, self.charge)
@tf.function
def _call(pos1, pos2, epsilon, sigma6, sigma12, charge, cutoff=0.6):
# calculation of r should probably not just use the positions but also the contact radii
r_sq = tf.reduce_sum((pos2 - pos1) ** 2)
if r_sq > cutoff ** 2:
return tf.constant(0, dtype=tf.float32)
return epsilon * (sigma12 / r_sq ** 6 - sigma6 / r_sq ** 3) + charge / tf.math.sqrt(r_sq)
class Atom:
def __init__(self, name, element, atom_class, type_id, mass, charge, sigma, epsilon, pos=None):
self.name = name
self.element = element
self.atom_class = atom_class
self.type_id = type_id
self.mass = mass
self.charge = charge
self.sigma = sigma
self.epsilon = epsilon
if pos is None:
self.pos = tf.Variable(tf.random.uniform(shape=(3,)))
elif type(pos) == float or type(pos) == int:
self.pos = tf.Variable(tf.random.uniform(minval=0 + pos, maxval=1 + pos, shape=(3,)))
else:
self.pos = tf.Variable(pos)
def __repr__(self):
return f'Atom({self.name}: element {self.element} with mass {self.mass})'
class Residue:
def __init__(self, name, forcefield='forcefields/amber99sb.xml', add_hydrogens=False, add_oxygen=False, his_replacement='HID', add_non_bonded=True, index=0):
self.index = index
# parse a mapping from single letter amino acid codes to three letter abbreviations
mappings = ('ala:A|arg:R|asn:N|asp:D|cys:C|gln:Q|glu:E|gly:G|his:H|ile:I|'
+ 'leu:L|lys:K|met:M|phe:F|pro:P|ser:S|thr:T|trp:W|tyr:Y|val:V').upper().split('|')
letter2aa = dict([m.split(':')[::-1] for m in mappings])
# figure out the 3 letter amino acid abbreviation from the name parameter
if len(name) == 1:
self.name = letter2aa[name]
else:
self.name = name
if add_hydrogens and add_oxygen:
# theoretically it's possible (I think) but the AMBER forcefield doesn't list this directly
raise ValueError('Can\'t add hydrogens and oxygen to the same residue')
# Histidine (HIS, H) is either one of HID, HIE or HIP in AMBER
if self.name == 'HIS':
self.name = his_replacement
if add_hydrogens:
self.name = 'N' + self.name
if add_oxygen:
self.name = 'C' + self.name
# load the forcefield xml and store the root element
if type(forcefield) == str:
self.forcefield = ElementTree.parse(forcefield).getroot()
elif type(forcefield) == ElementTree.ElementTree:
self.forcefield = forcefield.getroot()
elif type(forcefield) == ElementTree.Element:
self.forcefield = forcefield
else:
raise ValueError(f'Forcefield type {type(forcefield)} not supported')
self.atoms = []
self.bonds = []
self.external_bond_indices = []
# load all atomic attributes for this residue from the forecefield and store atomic bonds
for obj in self.forcefield.find(f'Residues/Residue[@name=\'{self.name}\']'):
if obj.tag == 'Atom':
self.atoms.append(self._get_atom(obj))
elif obj.tag == 'Bond':
self.bonds.append(self._get_bond(obj))
elif obj.tag == 'ExternalBond':
self.external_bond_indices.append(self._get_external_bond(obj))
else:
print(f'Unsupported type {obj.type}')
# get the harmonic bond forces between atoms
self.harmonic_bond_forces = []
for bond in self.bonds:
a1 = bond[0]
a2 = bond[1]
search_options = [(a1.atom_class, a2.atom_class),
(a2.atom_class, a1.atom_class)]
for option in search_options:
force = self._get_harmonic_bond_force(*option)
if force is not None:
break
if force is not None:
self.harmonic_bond_forces.append(HarmonicBondForce(a1, a2, float(force.get('length')), float(force.get('k'))))
else:
print(f'No harmonic bond force found for {a1.name} and {a2.name}')
# get the harmonic angle forces between atoms
self.harmonic_angle_forces = []
for i, a1 in enumerate(self.atoms):
for j, a2 in enumerate(self.atoms[i+1:]):
for k, a3 in enumerate(self.atoms[i+j+2:]):
search_options = [(a1.atom_class, a2.atom_class, a3.atom_class),
(a3.atom_class, a2.atom_class, a1.atom_class)]
for option in search_options:
force = self._get_harmonic_angle_force(*option)
if force is not None:
break
if force is not None:
self.harmonic_angle_forces.append(HarmonicAngleForce(a1, a2, a3, float(force.get('angle')), float(force.get('k'))))
# get non-bonded forces for all atoms
self.nonbonded_forces = []
if add_non_bonded:
for i, a1 in enumerate(self.atoms):
for a2 in self.atoms[i+1:]:
self.nonbonded_forces.append(NonBondedForce(a1, a2))
def _get_atom(self, xml_element):
# extract the attributes of an atom from the forcefield
name = xml_element.get('name')
type_id = int(xml_element.get('type'))
atom_traits = self.forcefield[0][type_id].attrib
atom_class = atom_traits['class']
element = atom_traits['element']
mass = float(atom_traits['mass'])
nonbonded_traits = self.forcefield[5][type_id].attrib
charge = float(nonbonded_traits.get('charge'))
sigma = float(nonbonded_traits.get('sigma'))
epsilon = float(nonbonded_traits.get('epsilon'))
return Atom(name, element, atom_class, type_id, mass, charge, sigma, epsilon, pos=self.index)
def _get_bond(self, xml_element):
# extract the indices of two bonded atoms from the forcefield
attribs = xml_element.attrib
return [self.atoms[int(attribs['from'])], self.atoms[int(attribs['to'])]]
def _get_external_bond(self, xml_element):
# extract the index of an atom with an external bond from the forcefield
return int(xml_element.attrib['from'])
def _get_harmonic_bond_force(self, name1, name2):
return self.forcefield.find(f'HarmonicBondForce/Bond[@class1=\'{name1}\'][@class2=\'{name2}\']')
def _get_harmonic_angle_force(self, name1, name2, name3):
return self.forcefield.find(f'HarmonicAngleForce/Angle[@class1=\'{name1}\'][@class2=\'{name2}\'][@class3=\'{name3}\']')
def get_atom_count(self):
return len(self.atoms)
def get_bond_count(self):
return len(self.bonds)
def get_atoms(self):
return self.atoms
def get_forces(self):
return self.harmonic_bond_forces + self.harmonic_angle_forces + self.nonbonded_forces
def get_variables(self):
return [atom.pos for atom in self.atoms]
def get_energy(self, normalize=False):
forces = self.get_forces()
if normalize:
ks = sum([force.get_weighting() for force in forces])
else:
ks = 1
return sum([force() for force in forces]) / ks
def get_mass(self):
return sum([atom.mass for atom in self.atoms])
def __repr__(self):
return f'Residue({self.name}: {self.get_atom_count()} atoms, {self.get_bond_count()} bonds)'
class Chain:
def __init__(self, sequence, forcefield='forcefields/amber99sb.xml'):
if len(sequence) == 1:
raise ValueError('Must have at least two amino acids to form a chain')
self.sequence = sequence
self.residues = []
# generate residues from the amino acid sequence
for i, aa in enumerate(self.sequence):
self.residues.append(Residue(aa, forcefield, add_non_bonded=False, add_hydrogens=(i == 0), add_oxygen=(i == len(self.sequence) - 1), index=i))
self.external_bonds = []
# store the atoms which have external bonds, reaching from one residue to another
for i in range(1, len(self.residues)):
idx1 = self.residues[i-1].external_bond_indices[-1]
idx2 = self.residues[i].external_bond_indices[0]
self.external_bonds.append([self.residues[i-1].atoms[idx1], self.residues[i].atoms[idx2]])
self.external_harmonic_bond_forces = []
# get the harmonic bond forces between atoms with external bonds
for bond in self.external_bonds:
a1 = bond[0]
a2 = bond[1]
search_options = [(a1.atom_class, a2.atom_class),
(a2.atom_class, a1.atom_class)]
for option in search_options:
force = self.residues[0]._get_harmonic_bond_force(*option)
if force is not None:
break
if force is not None:
self.external_harmonic_bond_forces.append(HarmonicBondForce(a1, a2, float(force.get('length')), float(force.get('k'))))
# get non-bonded forces for all pairs of atoms
self.nonbonded_forces = []
atoms = self.get_atoms()
for i, a1 in enumerate(atoms):
for a2 in atoms[i+1:]:
self.nonbonded_forces.append(NonBondedForce(a1, a2))
def get_atom_count(self):
return sum([res.get_atom_count() for res in self.residues])
def get_bond_count(self):
return sum([res.get_bond_count() for res in self.residues]) + len(self.external_bonds)
def get_atoms(self):
return sum([res.atoms for res in self.residues], [])
def get_bonds(self):
return sum([res.bonds for res in self.residues], []) + self.external_bonds
def get_forces(self):
return sum([res.get_forces() for res in self.residues], []) + self.external_harmonic_bond_forces + self.nonbonded_forces
def get_energy(self, normalize=False):
forces = self.get_forces()
if normalize:
ks = sum([force.get_weighting() for force in forces])
else:
ks = 1
return sum([force() for force in forces]) / ks
def get_variables(self):
return sum([res.get_variables() for res in self.residues], [])
def get_mass(self):
return sum([res.get_mass() for res in self.residues])
def __repr__(self):
return f'Chain({len(self.residues)} residues, {self.get_atom_count()} atoms, {self.get_bond_count()} bonds)'
if __name__ == '__main__':
chain = Chain('QED')
print(chain)
print(chain.get_energy())
| [
"xml.etree.ElementTree.parse",
"tensorflow.reduce_sum",
"tensorflow.random.uniform",
"tensorflow.math.acos",
"tensorflow.constant",
"tensorflow.math.sqrt",
"tensorflow.Variable",
"tensorflow.tensordot",
"numpy.sign",
"tensorflow.norm"
] | [((275, 294), 'tensorflow.constant', 'tf.constant', (['length'], {}), '(length)\n', (286, 294), True, 'import tensorflow as tf\n'), ((306, 320), 'tensorflow.constant', 'tf.constant', (['k'], {}), '(k)\n', (317, 320), True, 'import tensorflow as tf\n'), ((1009, 1028), 'numpy.sign', 'np.sign', (['self.angle'], {}), '(self.angle)\n', (1016, 1028), True, 'import numpy as np\n'), ((1041, 1055), 'tensorflow.constant', 'tf.constant', (['k'], {}), '(k)\n', (1052, 1055), True, 'import tensorflow as tf\n'), ((1603, 1629), 'tensorflow.math.acos', 'tf.math.acos', (['cosine_angle'], {}), '(cosine_angle)\n', (1615, 1629), True, 'import tensorflow as tf\n'), ((2087, 2128), 'tensorflow.constant', 'tf.constant', (['(sigma ** 6)'], {'dtype': 'tf.float32'}), '(sigma ** 6, dtype=tf.float32)\n', (2098, 2128), True, 'import tensorflow as tf\n'), ((2146, 2188), 'tensorflow.constant', 'tf.constant', (['(sigma ** 12)'], {'dtype': 'tf.float32'}), '(sigma ** 12, dtype=tf.float32)\n', (2157, 2188), True, 'import tensorflow as tf\n'), ((2205, 2255), 'tensorflow.constant', 'tf.constant', (['(self.atom1.charge * self.atom2.charge)'], {}), '(self.atom1.charge * self.atom2.charge)\n', (2216, 2255), True, 'import tensorflow as tf\n'), ((2628, 2661), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((pos2 - pos1) ** 2)'], {}), '((pos2 - pos1) ** 2)\n', (2641, 2661), True, 'import tensorflow as tf\n'), ((966, 984), 'tensorflow.constant', 'tf.constant', (['angle'], {}), '(angle)\n', (977, 984), True, 'import tensorflow as tf\n'), ((1339, 1368), 'tensorflow.tensordot', 'tf.tensordot', (['side1', 'side2', '(1)'], {}), '(side1, side2, 1)\n', (1351, 1368), True, 'import tensorflow as tf\n'), ((1561, 1593), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (1572, 1593), True, 'import tensorflow as tf\n'), ((2697, 2729), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (2708, 2729), True, 'import tensorflow as tf\n'), ((1372, 1386), 'tensorflow.norm', 'tf.norm', (['side1'], {}), '(side1)\n', (1379, 1386), True, 'import tensorflow as tf\n'), ((1389, 1403), 'tensorflow.norm', 'tf.norm', (['side2'], {}), '(side2)\n', (1396, 1403), True, 'import tensorflow as tf\n'), ((1946, 1999), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['(self.atom1.epsilon * self.atom2.epsilon)'], {}), '(self.atom1.epsilon * self.atom2.epsilon)\n', (1958, 1999), True, 'import tensorflow as tf\n'), ((2803, 2821), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['r_sq'], {}), '(r_sq)\n', (2815, 2821), True, 'import tensorflow as tf\n'), ((3165, 3194), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (3182, 3194), True, 'import tensorflow as tf\n'), ((3354, 3370), 'tensorflow.Variable', 'tf.Variable', (['pos'], {}), '(pos)\n', (3365, 3370), True, 'import tensorflow as tf\n'), ((536, 556), 'tensorflow.norm', 'tf.norm', (['(pos1 - pos2)'], {}), '(pos1 - pos2)\n', (543, 556), True, 'import tensorflow as tf\n'), ((3269, 3330), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'minval': '(0 + pos)', 'maxval': '(1 + pos)', 'shape': '(3,)'}), '(minval=0 + pos, maxval=1 + pos, shape=(3,))\n', (3286, 3330), True, 'import tensorflow as tf\n'), ((4671, 4700), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['forcefield'], {}), '(forcefield)\n', (4688, 4700), True, 'import xml.etree.ElementTree as ElementTree\n')] |
import rospy
import numpy as np
#import tf
import argparse
import os
import yaml
from scipy.spatial.transform import Rotation as R
from nav_msgs.msg import Odometry
T_frame1_to_frame2 = None
base_odom = None
args_ = None
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--nodename', dest='nodename', action='store', type=str)
parser.add_argument('--sub_topic', dest='sub_topic', action='store', type=str)
parser.add_argument('--pub_topic', dest='pub_topic', action='store', type=str)
parser.add_argument('--frame_id', dest='frame_id', action='store', type=str, default='odom')
parser.add_argument('--child_frame_id', dest='child_frame_id', action='store', type=str)
parser.add_argument('--calib_filename', dest='calib_filename', action='store', type=str)
parser.add_argument('--inverse', dest='inverse', action='store_true')
parser.add_argument('--reset_first', dest='reset_first', action='store_true')
args, unknown = parser.parse_known_args()
return args
def odom_callback(msg):
global odom_pub, T_frame1_to_frame2, base_odom, args_
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w
)
translation = (
msg.pose.pose.position.x,
msg.pose.pose.position.y,
msg.pose.pose.position.z
)
rot = R(quaternion)
pose = np.identity(4)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = translation
if args.inverse:
T_frame2_to_frame1 = np.linalg.inv(T_frame1_to_frame2)
pose = T_frame2_to_frame1.dot(pose).dot(np.linalg.inv(T_frame2_to_frame1))
else:
pose = T_frame1_to_frame2.dot(pose).dot(np.linalg.inv(T_frame1_to_frame2))
if args.reset_first:
if base_odom is None:
base_odom = np.copy(pose)
pose = np.linalg.inv(base_odom).dot(pose)
rot = R.from_matrix(pose[:3, :3])
quaternion = rot.as_quat()
translation = pose[:3, 3]
msg.pose.pose.orientation.x = quaternion[0]
msg.pose.pose.orientation.y = quaternion[1]
msg.pose.pose.orientation.z = quaternion[2]
msg.pose.pose.orientation.w = quaternion[3]
msg.pose.pose.position.x = translation[0]
msg.pose.pose.position.y = translation[1]
msg.pose.pose.position.z = translation[2]
msg.header.frame_id = args.frame_id
msg.child_frame_id = args.child_frame_id
odom_pub.publish(msg)
def main(args):
global T_frame1_to_frame2, odom_pub, odom_sub, args_
# get transform
with open(args.calib_filename) as calib_file:
T_frame1_to_frame2 = yaml.load(calib_file, Loader=yaml.FullLoader)['T']
T_frame1_to_frame2 = np.array(T_frame1_to_frame2)
# subscribe and public
rospy.init_node(args.nodename)
odom_sub = rospy.Subscriber(args.sub_topic, Odometry, odom_callback)
odom_pub = rospy.Publisher(args.pub_topic, Odometry, queue_size=10)
rospy.spin()
if __name__ == '__main__':
args = get_args()
main(args)
| [
"scipy.spatial.transform.Rotation",
"yaml.load",
"rospy.Subscriber",
"argparse.ArgumentParser",
"numpy.copy",
"numpy.identity",
"rospy.Publisher",
"numpy.linalg.inv",
"rospy.init_node",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.array",
"rospy.spin"
] | [((255, 280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (278, 280), False, 'import argparse\n'), ((1423, 1436), 'scipy.spatial.transform.Rotation', 'R', (['quaternion'], {}), '(quaternion)\n', (1424, 1436), True, 'from scipy.spatial.transform import Rotation as R\n'), ((1448, 1462), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1459, 1462), True, 'import numpy as np\n'), ((1946, 1973), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['pose[:3, :3]'], {}), '(pose[:3, :3])\n', (1959, 1973), True, 'from scipy.spatial.transform import Rotation as R\n'), ((2791, 2821), 'rospy.init_node', 'rospy.init_node', (['args.nodename'], {}), '(args.nodename)\n', (2806, 2821), False, 'import rospy\n'), ((2837, 2894), 'rospy.Subscriber', 'rospy.Subscriber', (['args.sub_topic', 'Odometry', 'odom_callback'], {}), '(args.sub_topic, Odometry, odom_callback)\n', (2853, 2894), False, 'import rospy\n'), ((2910, 2966), 'rospy.Publisher', 'rospy.Publisher', (['args.pub_topic', 'Odometry'], {'queue_size': '(10)'}), '(args.pub_topic, Odometry, queue_size=10)\n', (2925, 2966), False, 'import rospy\n'), ((2971, 2983), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2981, 2983), False, 'import rospy\n'), ((1579, 1612), 'numpy.linalg.inv', 'np.linalg.inv', (['T_frame1_to_frame2'], {}), '(T_frame1_to_frame2)\n', (1592, 1612), True, 'import numpy as np\n'), ((2731, 2759), 'numpy.array', 'np.array', (['T_frame1_to_frame2'], {}), '(T_frame1_to_frame2)\n', (2739, 2759), True, 'import numpy as np\n'), ((1661, 1694), 'numpy.linalg.inv', 'np.linalg.inv', (['T_frame2_to_frame1'], {}), '(T_frame2_to_frame1)\n', (1674, 1694), True, 'import numpy as np\n'), ((1754, 1787), 'numpy.linalg.inv', 'np.linalg.inv', (['T_frame1_to_frame2'], {}), '(T_frame1_to_frame2)\n', (1767, 1787), True, 'import numpy as np\n'), ((1870, 1883), 'numpy.copy', 'np.copy', (['pose'], {}), '(pose)\n', (1877, 1883), True, 'import numpy as np\n'), ((2654, 2699), 'yaml.load', 'yaml.load', (['calib_file'], {'Loader': 'yaml.FullLoader'}), '(calib_file, Loader=yaml.FullLoader)\n', (2663, 2699), False, 'import yaml\n'), ((1899, 1923), 'numpy.linalg.inv', 'np.linalg.inv', (['base_odom'], {}), '(base_odom)\n', (1912, 1923), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
import seaborn.apionly as sns
from scipy import stats
def fitMVN():
# set plotting style
sns.set_style("darkgrid")
# load PM dataset
pm_daily = np.loadtxt('../data/epa_hourly/alabamatest.csv', delimiter=',')
#Nyears = int(np.shape(pm_daily)[0] / 365*24)
Nsites = np.shape(pm_daily)[1]
# Months = ['May', 'June', 'July', 'August', 'September', 'October', 'November', 'December', 'January', 'February',
# 'March', 'April']
# calculate standard deviation in daily flows each month and squared Mahalanobis distances
#StdMonthly = calc_monthly_std(pm_daily, Nyears, Nsites)
#D2 = calcD2(Nyears, Nsites, np.log(pm_daily))
#D2 = calcD2(Nsites, np.log(pm_daily))
# calculate theoretical quantiles for a chi^2 distribution with dof = Nsites, and for the standard normal distribution
m = np.array(range(1, Nyears + 1))
p = (m - 0.5) / Nyears
chi2 = stats.chi2.ppf(p, Nsites)
norm = stats.norm.ppf(p, 0, 1)
# initialize matrices to store correlation coefficients and significance levels for marginal normal distributions and chi^2 distributions
normCorr = np.zeros([Nsites, 12])
norm_sigLevel = np.zeros([Nsites, 12])
chi2Corr = np.zeros([12])
chi2_sigLevel = np.zeros([12])
for i in range(len(Months)):
# plot histograms of standard deviation of daily flows each month, and of their logs
plotHistograms(Nsites, StdMonthly[:, :, i], 'Standard Deviation of Daily ' + Months[i] + ' Flows',
Months[i] + 'Hist.png')
plotHistograms(Nsites, np.log(StdMonthly[:, :, i]), 'log(Standard Deviation of Daily ' + Months[i] + ' Flows)', \
'Log' + Months[i] + 'Hist.png')
# plot QQ plots of standard deviation of daily flows each month, and of their logs
plotNormQQ(Nsites, StdMonthly[:, :, i], norm, 'Standard Deviation of Daily ' + Months[i] + ' Flows',
Months[i] + 'QQ.png')
normCorr[:, i] = plotNormQQ(Nsites, np.log(StdMonthly[:, :, i]), norm,
'log(Standard Deviation of Daily ' + Months[i] + ' Flows)',
'Log' + Months[i] + 'QQ.png')
# plot QQ plot of Chi Squared distribution of log of standard deviation in daily flows each month
chi2Corr[i] = plotChi2QQ(Nsites, D2[:, i], chi2,
'D$\mathregular{^2}\!$ of log(Standard Deviation of Daily ' + Months[i] + ' Flows)', \
'Log' + Months[i] + 'Chi2QQ.png')
# find significance levels
chi2_sigLevel[i] = chi2_MC(Nsites, Nyears, chi2, chi2Corr[i])
norm_sigLevel[:, i] = norm_MC(Nsites, Nyears, norm, normCorr[:, i])
np.savetxt('Norm_sigLevels.txt', np.transpose(norm_sigLevel))
np.savetxt('Norm_corr.txt', np.transpose(normCorr))
np.savetxt('Chi2_sigLevels.txt', chi2_sigLevel)
np.savetxt('Chi2_corr.txt', chi2Corr)
return None
def calc_monthly_std(Qdaily, Nyears, Nsites):
Nmonths = 12
# first month = May (1st month of water year)
DaysPerMonth = np.array([31, 30, 31, 31, 30, 31, 30, 31, 31, 28, 31, 30])
Qmonthly = np.zeros([Nsites, Nyears, Nmonths])
StdMonthly = np.zeros([Nsites, Nyears, Nmonths])
for year in range(Nyears):
for month in range(Nmonths):
start = year * 365 + np.sum(DaysPerMonth[0:month])
for i in range(Nsites):
# find total flow each month
Qmonthly[i, year, month] = 86400 * np.sum(Qdaily[start:start + DaysPerMonth[month], i])
# find standard deviation in daily flows each month
for i in range(Nsites):
for j in range(DaysPerMonth[month]):
StdMonthly[i, year, month] = StdMonthly[i, year, month] + \
(
86400 * Qdaily[start + j, i] - Qmonthly[i, year, month] / DaysPerMonth[
month]) ** 2
StdMonthly[i, year, month] = np.sqrt((1 / (DaysPerMonth[month] - 1)) * StdMonthly[i, year, month])
return StdMonthly
def plotHistograms(Nsites, data, xlabel, filename):
fig = plt.figure()
for i in range(Nsites):
ax = fig.add_subplot(1, Nsites, i + 1)
ax.hist(data[i, :], bins=10, color='navy', alpha=0.8)
ax.set_title('Site ' + str(i + 1), fontsize=16)
fig.text(0.1, 0.5, 'Frequency', va='center', rotation='vertical', fontsize=14)
fig.text(0.5, 0.04, xlabel, ha='center', fontsize=14)
fig.subplots_adjust(bottom=0.15)
fig.set_size_inches([22.525, 4.825])
fig.savefig('Hists/' + filename)
fig.clf()
return None
def plotNormQQ(Nsites, data, norm, title, filename):
corr = np.zeros([Nsites])
fig = plt.figure()
for i in range(Nsites):
corr[i] = np.corrcoef(np.sort(data[i, :]), norm)[0, 1]
z = (data[i, :] - np.mean(data[i, :])) / np.std(data[i, :])
ax = fig.add_subplot(1, Nsites, i + 1)
ax.scatter(norm, np.sort(z))
ax.plot([-3, 3], [-3, 3], c='r')
ax.set_title('Site ' + str(i + 1), fontsize=16)
ax.set_xlim([-3, 3])
ax.set_ylim([-3, 3])
fig.text(0.1, 0.5, 'Sample Quantiles', va='center', rotation='vertical', fontsize=14)
fig.text(0.5, 0.04, 'Theoretical Quantiles', ha='center', fontsize=14)
fig.suptitle('Normal Q-Q Plot of ' + title, fontsize=16)
fig.subplots_adjust(bottom=0.15, top=0.85)
fig.set_size_inches([22.525, 4.825])
fig.savefig('QQplots/' + filename)
fig.clf()
return corr
def calcD2(Nsites, data):
D2 = np.zeros([Nyears, 12])
X = np.zeros([Nyears, Nsites])
Xprime = np.zeros([Nyears, Nsites])
S = np.zeros(Nsites)
for i in range(12):
# fill data matrix, X, for ith month
for j in range(Nsites):
X[:, j] = data[j, :, i]
# calculate covariance matrix, S, for ith month
Xprime = X - (1 / Nyears) * np.dot(np.ones([Nyears, Nyears]), X)
S = (1 / (Nyears - 1)) * np.dot(np.transpose(Xprime), Xprime)
# calculate Mahalanobis distance, D2, for each year's ith month
for j in range(Nyears):
D2[j, i] = np.dot(np.dot((X[j, :] - np.mean(X, 0)), np.linalg.inv(S)),
(np.transpose(X[j, :] - np.mean(X, 0))))
return D2
def plotChi2QQ(Nsites, data, chi2, title, filename):
corr = np.corrcoef(np.sort(data), chi2)[0, 1]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(chi2, np.sort(data))
ax.plot([0, 1.1 * np.max(chi2)], [0, 1.1 * np.max(chi2)], c='r')
ax.set_xlabel('Theoretical Quantiles', fontsize=16)
ax.set_xlim([0, 1.1 * np.max(chi2)])
ax.set_ylabel('Sample Quantiles', fontsize=16)
ax.set_ylim([0, 1.1 * np.max(data)])
ax.tick_params(axis='both', labelsize=14)
ax.set_title(r'$\chi^2$' + ' Q-Q Plot of ' + title, fontsize=16)
fig.savefig('QQplots/' + filename)
fig.clf()
return corr
def chi2_MC(Nsites, Nyears, theoretical, dataCorr):
corr = np.zeros(10000)
for i in range(10000): # 10,000 MC simulations
simulated = stats.chi2.rvs(Nsites, size=Nyears)
corr[i] = np.corrcoef(np.sort(simulated), theoretical)[0, 1]
# find significance levels
corr = np.sort(corr)
for i in range(10000):
if dataCorr > corr[i]:
sigLevel = (i + 0.5) / 10000
return sigLevel
def norm_MC(Nsites, Nyears, theoretical, dataCorr):
sigLevel = np.zeros(Nsites)
corr = np.zeros([10000])
for i in range(10000): # 10,000 MC simulations
simulated = stats.norm.rvs(0, 1, size=Nyears)
corr[i] = np.corrcoef(np.sort(simulated), theoretical)[0, 1]
# find significance levels
corr = np.sort(corr)
for i in range(10000):
for j in range(Nsites):
if dataCorr[j] > corr[i]:
sigLevel[j] = (i + 0.5) / 10000
return sigLevel
fitMVN() | [
"numpy.sum",
"scipy.stats.norm.rvs",
"numpy.ones",
"numpy.shape",
"scipy.stats.chi2.rvs",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.std",
"numpy.savetxt",
"numpy.transpose",
"numpy.max",
"numpy.loadtxt",
"seaborn.apionly.set_style",
"scipy.stats.norm.ppf",
"scipy.stats.chi2.ppf",
... | [((187, 212), 'seaborn.apionly.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (200, 212), True, 'import seaborn.apionly as sns\n'), ((251, 314), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/epa_hourly/alabamatest.csv"""'], {'delimiter': '""","""'}), "('../data/epa_hourly/alabamatest.csv', delimiter=',')\n", (261, 314), True, 'import numpy as np\n'), ((1007, 1032), 'scipy.stats.chi2.ppf', 'stats.chi2.ppf', (['p', 'Nsites'], {}), '(p, Nsites)\n', (1021, 1032), False, 'from scipy import stats\n'), ((1044, 1067), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['p', '(0)', '(1)'], {}), '(p, 0, 1)\n', (1058, 1067), False, 'from scipy import stats\n'), ((1226, 1248), 'numpy.zeros', 'np.zeros', (['[Nsites, 12]'], {}), '([Nsites, 12])\n', (1234, 1248), True, 'import numpy as np\n'), ((1269, 1291), 'numpy.zeros', 'np.zeros', (['[Nsites, 12]'], {}), '([Nsites, 12])\n', (1277, 1291), True, 'import numpy as np\n'), ((1307, 1321), 'numpy.zeros', 'np.zeros', (['[12]'], {}), '([12])\n', (1315, 1321), True, 'import numpy as np\n'), ((1342, 1356), 'numpy.zeros', 'np.zeros', (['[12]'], {}), '([12])\n', (1350, 1356), True, 'import numpy as np\n'), ((2958, 3005), 'numpy.savetxt', 'np.savetxt', (['"""Chi2_sigLevels.txt"""', 'chi2_sigLevel'], {}), "('Chi2_sigLevels.txt', chi2_sigLevel)\n", (2968, 3005), True, 'import numpy as np\n'), ((3010, 3047), 'numpy.savetxt', 'np.savetxt', (['"""Chi2_corr.txt"""', 'chi2Corr'], {}), "('Chi2_corr.txt', chi2Corr)\n", (3020, 3047), True, 'import numpy as np\n'), ((3199, 3257), 'numpy.array', 'np.array', (['[31, 30, 31, 31, 30, 31, 30, 31, 31, 28, 31, 30]'], {}), '([31, 30, 31, 31, 30, 31, 30, 31, 31, 28, 31, 30])\n', (3207, 3257), True, 'import numpy as np\n'), ((3274, 3309), 'numpy.zeros', 'np.zeros', (['[Nsites, Nyears, Nmonths]'], {}), '([Nsites, Nyears, Nmonths])\n', (3282, 3309), True, 'import numpy as np\n'), ((3327, 3362), 'numpy.zeros', 'np.zeros', (['[Nsites, Nyears, Nmonths]'], {}), '([Nsites, Nyears, Nmonths])\n', (3335, 3362), True, 'import numpy as np\n'), ((4355, 4367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4365, 4367), True, 'from matplotlib import pyplot as plt\n'), ((4915, 4933), 'numpy.zeros', 'np.zeros', (['[Nsites]'], {}), '([Nsites])\n', (4923, 4933), True, 'import numpy as np\n'), ((4944, 4956), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4954, 4956), True, 'from matplotlib import pyplot as plt\n'), ((5777, 5799), 'numpy.zeros', 'np.zeros', (['[Nyears, 12]'], {}), '([Nyears, 12])\n', (5785, 5799), True, 'import numpy as np\n'), ((5808, 5834), 'numpy.zeros', 'np.zeros', (['[Nyears, Nsites]'], {}), '([Nyears, Nsites])\n', (5816, 5834), True, 'import numpy as np\n'), ((5848, 5874), 'numpy.zeros', 'np.zeros', (['[Nyears, Nsites]'], {}), '([Nyears, Nsites])\n', (5856, 5874), True, 'import numpy as np\n'), ((5883, 5899), 'numpy.zeros', 'np.zeros', (['Nsites'], {}), '(Nsites)\n', (5891, 5899), True, 'import numpy as np\n'), ((6626, 6638), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6636, 6638), True, 'from matplotlib import pyplot as plt\n'), ((7217, 7232), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (7225, 7232), True, 'import numpy as np\n'), ((7453, 7466), 'numpy.sort', 'np.sort', (['corr'], {}), '(corr)\n', (7460, 7466), True, 'import numpy as np\n'), ((7656, 7672), 'numpy.zeros', 'np.zeros', (['Nsites'], {}), '(Nsites)\n', (7664, 7672), True, 'import numpy as np\n'), ((7684, 7701), 'numpy.zeros', 'np.zeros', (['[10000]'], {}), '([10000])\n', (7692, 7701), True, 'import numpy as np\n'), ((7920, 7933), 'numpy.sort', 'np.sort', (['corr'], {}), '(corr)\n', (7927, 7933), True, 'import numpy as np\n'), ((378, 396), 'numpy.shape', 'np.shape', (['pm_daily'], {}), '(pm_daily)\n', (386, 396), True, 'import numpy as np\n'), ((2869, 2896), 'numpy.transpose', 'np.transpose', (['norm_sigLevel'], {}), '(norm_sigLevel)\n', (2881, 2896), True, 'import numpy as np\n'), ((2930, 2952), 'numpy.transpose', 'np.transpose', (['normCorr'], {}), '(normCorr)\n', (2942, 2952), True, 'import numpy as np\n'), ((6694, 6707), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (6701, 6707), True, 'import numpy as np\n'), ((7305, 7340), 'scipy.stats.chi2.rvs', 'stats.chi2.rvs', (['Nsites'], {'size': 'Nyears'}), '(Nsites, size=Nyears)\n', (7319, 7340), False, 'from scipy import stats\n'), ((7774, 7807), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(1)'], {'size': 'Nyears'}), '(0, 1, size=Nyears)\n', (7788, 7807), False, 'from scipy import stats\n'), ((1669, 1696), 'numpy.log', 'np.log', (['StdMonthly[:, :, i]'], {}), '(StdMonthly[:, :, i])\n', (1675, 1696), True, 'import numpy as np\n'), ((2101, 2128), 'numpy.log', 'np.log', (['StdMonthly[:, :, i]'], {}), '(StdMonthly[:, :, i])\n', (2107, 2128), True, 'import numpy as np\n'), ((5097, 5115), 'numpy.std', 'np.std', (['data[i, :]'], {}), '(data[i, :])\n', (5103, 5115), True, 'import numpy as np\n'), ((5188, 5198), 'numpy.sort', 'np.sort', (['z'], {}), '(z)\n', (5195, 5198), True, 'import numpy as np\n'), ((6589, 6602), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (6596, 6602), True, 'import numpy as np\n'), ((3464, 3493), 'numpy.sum', 'np.sum', (['DaysPerMonth[0:month]'], {}), '(DaysPerMonth[0:month])\n', (3470, 3493), True, 'import numpy as np\n'), ((4198, 4265), 'numpy.sqrt', 'np.sqrt', (['(1 / (DaysPerMonth[month] - 1) * StdMonthly[i, year, month])'], {}), '(1 / (DaysPerMonth[month] - 1) * StdMonthly[i, year, month])\n', (4205, 4265), True, 'import numpy as np\n'), ((5015, 5034), 'numpy.sort', 'np.sort', (['data[i, :]'], {}), '(data[i, :])\n', (5022, 5034), True, 'import numpy as np\n'), ((5074, 5093), 'numpy.mean', 'np.mean', (['data[i, :]'], {}), '(data[i, :])\n', (5081, 5093), True, 'import numpy as np\n'), ((6207, 6227), 'numpy.transpose', 'np.transpose', (['Xprime'], {}), '(Xprime)\n', (6219, 6227), True, 'import numpy as np\n'), ((6731, 6743), 'numpy.max', 'np.max', (['chi2'], {}), '(chi2)\n', (6737, 6743), True, 'import numpy as np\n'), ((6756, 6768), 'numpy.max', 'np.max', (['chi2'], {}), '(chi2)\n', (6762, 6768), True, 'import numpy as np\n'), ((6860, 6872), 'numpy.max', 'np.max', (['chi2'], {}), '(chi2)\n', (6866, 6872), True, 'import numpy as np\n'), ((6952, 6964), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (6958, 6964), True, 'import numpy as np\n'), ((7371, 7389), 'numpy.sort', 'np.sort', (['simulated'], {}), '(simulated)\n', (7378, 7389), True, 'import numpy as np\n'), ((7838, 7856), 'numpy.sort', 'np.sort', (['simulated'], {}), '(simulated)\n', (7845, 7856), True, 'import numpy as np\n'), ((3627, 3679), 'numpy.sum', 'np.sum', (['Qdaily[start:start + DaysPerMonth[month], i]'], {}), '(Qdaily[start:start + DaysPerMonth[month], i])\n', (3633, 3679), True, 'import numpy as np\n'), ((6137, 6162), 'numpy.ones', 'np.ones', (['[Nyears, Nyears]'], {}), '([Nyears, Nyears])\n', (6144, 6162), True, 'import numpy as np\n'), ((6406, 6422), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (6419, 6422), True, 'import numpy as np\n'), ((6390, 6403), 'numpy.mean', 'np.mean', (['X', '(0)'], {}), '(X, 0)\n', (6397, 6403), True, 'import numpy as np\n'), ((6479, 6492), 'numpy.mean', 'np.mean', (['X', '(0)'], {}), '(X, 0)\n', (6486, 6492), True, 'import numpy as np\n')] |
from pymorph import hmax
import numpy as np
def test_hmax():
a = np.array([1,1,1,2,3,4,5,6,7,8,9,8,7,6,5,4,3,2,1,1,1,1,], np.uint8)
assert np.all(hmax(a, a.max()) == 0)
for i in range(1,5):
assert hmax(a,i).max() == a.max()-i
| [
"pymorph.hmax",
"numpy.array"
] | [((69, 159), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1]', 'np.uint8'], {}), '([1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1],\n np.uint8)\n', (77, 159), True, 'import numpy as np\n'), ((217, 227), 'pymorph.hmax', 'hmax', (['a', 'i'], {}), '(a, i)\n', (221, 227), False, 'from pymorph import hmax\n')] |
import numpy as np
import nltk
import string
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score,f1_score, roc_curve, confusion_matrix
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.model_selection import train_test_split, cross_val_predict
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
# read data
CONST_WIKI_ALL = "data/wiki_3classes2.csv"
dataset = np.genfromtxt(CONST_WIKI_ALL, delimiter="|\-/|", skip_header=1,
dtype={'names': ('klass', 'text'), 'formats': (np.int, '|S1000')})
docs = dataset['text']
labels = dataset['klass']
lemma = WordNetLemmatizer()
#fix the document
fixed = []
aux = 0
for line in docs:
line = line.strip()
line = line.decode('cp1252')
fixed.extend([nltk.re.sub(r'[^\x00-\x7F]+', ' ', line)])
doc_complete = fixed
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized
doc_clean = [clean(doc).split() for doc in doc_complete]
doc_tfidf = []
for line in doc_clean:
tmp = ""
i = 0
for word in line:
tmp = tmp + word
if i < (len(line) - 1):
tmp = tmp + " "
doc_tfidf.append(tmp)
# print(len(doc_tfidf))
doc_tfidf_clean = []
labels_clean = []
idx = 0
for word in doc_tfidf:
if len(word) > 2:
doc_tfidf_clean.append(word)
labels_clean.append(labels[idx])
idx += 1
data_iter = []
aux = 0
for t in doc_clean:
data_iter.extend(t)
feature_extraction = TfidfVectorizer()
X = feature_extraction.fit_transform(doc_tfidf_clean)
X_train, X_test, y_train, y_test = train_test_split(X, labels_clean, test_size=0.2, random_state=5)
clf = SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, max_iter=5, random_state=42)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print('ROC-AUC yields ' + str(accuracy_score(y_test, predictions)))
print(clf.score(X_test, y_test))
parameters_svm = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False),
'clf-svm__alpha': (1e-2, 1e-3)}
text_clf_svm = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf-svm', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=5, random_state=42))])
#tune
from sklearn.model_selection import GridSearchCV
gs_clf = GridSearchCV(text_clf_svm, parameters_svm, n_jobs=-1)
X_train, X_test, y_train, y_test = train_test_split(doc_tfidf_clean, labels_clean, test_size=0.2, random_state=5)
gs_clf = gs_clf.fit(X_train, y_train)
print(gs_clf.best_score_)
print(gs_clf.best_params_)
final_clf = gs_clf.best_estimator_.fit(X_train, y_train)
# joblib.dump(final_clf, 'tf-idf+svm.pkl', compress=9) # use to save model
predictions = final_clf.predict(X_test)
print('ROC-AUC yields ' + str(accuracy_score(y_test, predictions)))
print(final_clf.score(X_test, y_test))
y_train_pred = cross_val_predict(gs_clf, X_train, y_train, cv=3) | [
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_extraction.text.CountVectorizer",
"nltk.re.sub",
"sklearn.linear_model.SGDClassifier",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"numpy.genfromtxt",
"sklear... | [((529, 665), 'numpy.genfromtxt', 'np.genfromtxt', (['CONST_WIKI_ALL'], {'delimiter': '"""|\\\\-/|"""', 'skip_header': '(1)', 'dtype': "{'names': ('klass', 'text'), 'formats': (np.int, '|S1000')}"}), "(CONST_WIKI_ALL, delimiter='|\\\\-/|', skip_header=1, dtype={\n 'names': ('klass', 'text'), 'formats': (np.int, '|S1000')})\n", (542, 665), True, 'import numpy as np\n'), ((719, 738), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (736, 738), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((1822, 1839), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (1837, 1839), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\n'), ((1930, 1994), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'labels_clean'], {'test_size': '(0.2)', 'random_state': '(5)'}), '(X, labels_clean, test_size=0.2, random_state=5)\n', (1946, 1994), False, 'from sklearn.model_selection import train_test_split, cross_val_predict\n'), ((2002, 2089), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""', 'penalty': '"""l2"""', 'alpha': '(0.001)', 'max_iter': '(5)', 'random_state': '(42)'}), "(loss='hinge', penalty='l2', alpha=0.001, max_iter=5,\n random_state=42)\n", (2015, 2089), False, 'from sklearn.linear_model import SGDClassifier\n'), ((2661, 2714), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['text_clf_svm', 'parameters_svm'], {'n_jobs': '(-1)'}), '(text_clf_svm, parameters_svm, n_jobs=-1)\n', (2673, 2714), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2750, 2828), 'sklearn.model_selection.train_test_split', 'train_test_split', (['doc_tfidf_clean', 'labels_clean'], {'test_size': '(0.2)', 'random_state': '(5)'}), '(doc_tfidf_clean, labels_clean, test_size=0.2, random_state=5)\n', (2766, 2828), False, 'from sklearn.model_selection import train_test_split, cross_val_predict\n'), ((3215, 3264), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['gs_clf', 'X_train', 'y_train'], {'cv': '(3)'}), '(gs_clf, X_train, y_train, cv=3)\n', (3232, 3264), False, 'from sklearn.model_selection import train_test_split, cross_val_predict\n'), ((947, 973), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (962, 973), False, 'from nltk.corpus import stopwords\n'), ((871, 912), 'nltk.re.sub', 'nltk.re.sub', (['"""[^\\\\x00-\\\\x7F]+"""', '""" """', 'line'], {}), "('[^\\\\x00-\\\\x7F]+', ' ', line)\n", (882, 912), False, 'import nltk\n'), ((2176, 2211), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2190, 2211), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_curve, confusion_matrix\n'), ((2423, 2440), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (2438, 2440), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\n'), ((2453, 2471), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (2469, 2471), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\n'), ((2511, 2598), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""', 'penalty': '"""l2"""', 'alpha': '(0.001)', 'max_iter': '(5)', 'random_state': '(42)'}), "(loss='hinge', penalty='l2', alpha=0.001, max_iter=5,\n random_state=42)\n", (2524, 2598), False, 'from sklearn.linear_model import SGDClassifier\n'), ((3122, 3157), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (3136, 3157), False, 'from sklearn.metrics import accuracy_score, f1_score, roc_curve, confusion_matrix\n')] |
import copy
import numpy as np
from ..numerics.fem import domain2d_fem as FEM
from ._mesh import Mesh
from meshpy import triangle
from scipy import sparse
from numba import jit
from numpy.linalg import det
class Domain2d(Mesh):
def __init__(self, curves=None, triangles=None, vertices=None):
super(Domain2d, self).__init__()
if curves is not None:
triangles, vertices = self.mesh( curves )
elif (triangles is None) and (vertices is None):
raise ValueError("Either curves or (triangles,vertices) should be defined!")
self._triangles = triangles
self._vertices = vertices
self.FEM = FEM
self.ref_element = FEM.ReferenceElement()
local_to_global = self.ref_element.local_to_global
self.local_to_global = lambda s, mask, coarsened: \
local_to_global(s, self, mask, coarsened)
self.reset_data()
def copy(self):
new_domain = Domain2d( triangles = self._triangles.copy(),
vertices = self._vertices.copy() )
if self._coords is not None:
new_domain._coords = copy.deepcopy( self._coords )
if self._el_sizes is not None:
new_domain._el_sizes = self._el_sizes.copy()
if self._area is not None:
new_domain._area = self._area
new_domain.timestamp = self.timestamp
return new_domain
def dim(self):
return 2
def dim_of_world(self):
return 2
def reset_data(self):
self._coords = None
self._el_sizes = None
self._area = None
self.timestamp += 1
def size(self):
return self._triangles.shape[1]
def vertices(self):
return self._vertices
def triangles(self):
return (self._triangles, self._vertices)
def coords(self,mask=None):
if mask is None:
if self._coords is not None:
return self._coords
if mask is not None:
x0 = self._vertices[ :, self._triangles[0,mask] ]
x1 = self._vertices[ :, self._triangles[1,mask] ]
x2 = self._vertices[ :, self._triangles[2,mask] ]
return (x0, x1, x2)
else: # mask is None, compute coordinate triplets for all elements.
## # The old Numpy code for coordinate computation
## x0 = self._vertices[ :, self._triangles[0,:] ]
## x1 = self._vertices[ :, self._triangles[1,:] ]
## x2 = self._vertices[ :, self._triangles[2,:] ]
## self._coords = (x0, x1, x2)
# The following code moved a Numba jit-compiled function
# is supposed to speed up the coordinate computation.
# The tests indicate a speed-up of 3X-10X.
tri = self._triangles
vtx = self._vertices
n_tri = tri.shape[1]
x0 = np.empty( (2,n_tri), dtype=float )
x1 = np.empty( (2,n_tri), dtype=float )
x2 = np.empty( (2,n_tri), dtype=float )
Domain2d._retrieve_coords( vtx, tri, x0,x1,x2 )
self._coords = (x0, x1, x2)
return self._coords
@jit( nopython = True )
def _retrieve_coords(vtx, tri, x0, x1, x2):
n_tri = tri.shape[1]
for i in range(0, n_tri):
x0[0,i] = vtx[ 0, tri[0,i] ]
x1[0,i] = vtx[ 0, tri[1,i] ]
x2[0,i] = vtx[ 0, tri[2,i] ]
x0[1,i] = vtx[ 1, tri[0,i] ]
x1[1,i] = vtx[ 1, tri[1,i] ]
x2[1,i] = vtx[ 1, tri[2,i] ]
def element_sizes(self,mask=None):
if mask is None:
if self._el_sizes is not None:
return self._el_sizes
x0, x1, x2 = self.coords()
if mask is not None:
d = 0.5 * ( x0[0,mask] * (x1[1,mask] - x2[1,mask]) + \
x1[0,mask] * (x2[1,mask] - x0[1,mask]) + \
x2[0,mask] * (x0[1,mask] - x1[1,mask]) )
return d
else: # mask not given, compute element sizes for all elements.
## d = 0.5 * ( x0[0,:] * (x1[1,:] - x2[1,:]) + \
## x1[0,:] * (x2[1,:] - x0[1,:]) + \
## x2[0,:] * (x0[1,:] - x1[1,:]) )
self._el_sizes = d = Domain2d._fast_element_sizes( x0, x1, x2 )
return d.copy()
@jit( nopython = True )
def _fast_element_sizes(x0, x1, x2):
n = x0.shape[1]
d = np.empty(n)
for i in range(0,n):
d[i] = 0.5 * ( x0[0,i] * (x1[1,i] - x2[1,i]) +
x1[0,i] * (x2[1,i] - x0[1,i]) +
x2[0,i] * (x0[1,i] - x1[1,i]) )
return d
def area(self):
if self._area is not None:
return self._area
else:
self._area = np.sum( self.element_sizes() )
return self._area
def volume(self):
return self.area()
def mesh(self, curves, options='p'):
if curves.size() < 3:
raise ValueError("Curve should have at least three vertices.")
points = curves.coords().T
segments = curves.edges().T
info = triangle.MeshInfo()
info.set_points( points )
info.set_facets( segments )
try:
hole_pts = curves.hole_points().T
if len(hole_pts) > 0: info.set_holes( hole_pts )
except AttributeError: # if input is a single curve, it doesn't have holes() func
pass
triangulation = triangle.build(info)
tri = np.array( triangulation.elements ).T
vtx = np.array( triangulation.points ).T
return (tri,vtx)
## def _compute_geom_info(self):
## elem = self._triangles
## n = self._vertices.shape[1]
## # Indices of (i,j), and possibly some (j,i), which are the same edges.
## I = elem.flatten()
## J = np.hstack( (elem[1,:], elem[2,:], elem[0,:]) )
## # I2,J2 has all (i,j) and (j,i) converted to (i,j), (i,j).
## # Now we need to get rid of duplicates.
## I2 = np.minimum(I,J)
## J2 = np.maximum(I,J)
## # Use new_edge_id_mtx to get rid of duplicates.
## marker = np.ones( len(I2), dtype=int )
## new_edge_id_mtx = sparse.csr_matrix( (marker, (I2,J2)), (n,n) )
## # Two arrays (I,J) giving the unique edges, (j,i)'s have been eliminated.
## unique_edges = new_edge_id_mtx.nonzero()
## n_unique_edges = len( unique_edges[0] )
## # Assign indices 1,...,n to the unique edges.
## new_edge_id_mtx.data = np.arange( 1, n_unique_edges+1 )
## # Mapping between new indices and both versions of edges: (i,j),(j,i)
## new_edge_id_mtx = new_edge_id_mtx + new_edge_id_mtx.T
## # In any case, some (j,i)'s don't exist, so create a mask
## # for only the existing edges.
## mask = sparse.csr_matrix( (np.ones(len(I),dtype=int),(I,J)), (n,n) )
## # And extract the ids of the existing edges from new_edge_id_mtx.
## new_edge_ids = ( mask.multiply( new_edge_id_mtx ) ).data - 1
## # The information obtained from sparse matrix operations has
## # a different ordering than the original (I,J); therefore,
## # they need to mapped to the original edge locations/indices.
## edge_indices = sparse.csr_matrix( (np.arange(len(I)),(I,J)), (n,n) ).data
## # Assign the new edge ids to each of (i,j) in (I,J)
## edge_number = np.empty(len(I),dtype=int)
## edge_number[ edge_indices ] = new_edge_ids
## element2edges = np.reshape( edge_number, elem.shape )
## edge2nodes = np.vstack( unique_edges )
## return (element2edges, edge2nodes)
## def refine_coarsen(self, markers, data_vectors=[], conforming=False):
## tri = self._triangles
## vtx = self._vertices
## n_elem = tri.shape[1]
## element2edges, edge2nodes = self._compute_geom_info()
## edge2newnode = np.zeros( np.max(element2edges) + 1, dtype=int )
## # Split the marked element into by cutting the ref edge (1st edge).
## edge2newnode[ element2edges[0,markers] ] = 1
## # Alternatively split the marked element into four.
## # edge2newnode[ element2edges[:,markers] ] = 1
## # If conforming, both sides of an edge should be refined, no hanging nodes.
## if conforming: # Iterate until there are no hanging nodes.
## swap = np.array([0.0])
## while len(swap) > 0:
## marked = edge2newnode[ element2edges ]
## swap = np.nonzero( ~marked[0,:] & (marked[1,:] | marked[2,:]) )[0]
## edge2newnode[ element2edges[0,swap] ] = 1
## else:
## marked_elem = markers
## n_old_nodes = vtx.shape[1]
## n_new_nodes = np.sum( edge2newnode )
## # Assign the new node numbers.
## edge2newnode[ edge2newnode != 0 ] = range( n_old_nodes,
## n_old_nodes + n_new_nodes )
## # Calculate the new node coordinates.
## idx = np.nonzero( edge2newnode )[0]
## new_coords = np.empty(( 2, n_old_nodes + n_new_nodes ))
## new_coords[:,0:n_old_nodes] = vtx
## new_coords[:,edge2newnode[idx]] = 0.5 * (vtx[:, edge2nodes[0,idx] ] +
## vtx[:, edge2nodes[1,idx] ])
## new_nodes = edge2newnode[ element2edges ]
## marked = (new_nodes != 0) # marked edges are the ones with new nodes
## if conforming:
## none = ~marked[0,:]
## bisec1 = marked[0,:] & ~marked[1,:] & ~marked[2,:]
## bisec12 = marked[0,:] & marked[1,:] & ~marked[2,:]
## bisec13 = marked[0,:] & ~marked[1,:] & marked[2,:]
## bisec123 = marked[0,:] & marked[1,:] & marked[2,:]
## else: # not conforming
## mask = marked_elem
## none = np.ones( n_elem, dtype=bool )
## none[ mask ] = False
## bisec1 = np.zeros( n_elem, dtype=bool )
## bisec12 = np.zeros( n_elem, dtype=bool )
## bisec13 = np.zeros( n_elem, dtype=bool )
## bisec123 = np.zeros( n_elem, dtype=bool )
## bisec1[mask] = marked[0,mask] & ~marked[1,mask] & ~marked[2,mask]
## bisec12[mask] = marked[0,mask] & marked[1,mask] & ~marked[2,mask]
## bisec13[mask] = marked[0,mask] & ~marked[1,mask] & marked[2,mask]
## bisec123[mask] = marked[0,mask] & marked[1,mask] & marked[2,mask]
## idx = np.ones( n_elem, dtype=int )
## idx[bisec1] = 2 # bisec1 creates two new elements
## idx[bisec12] = 3 # bisec12 creates three new elements
## idx[bisec13] = 3 # bisec13 creates three new elements
## idx[bisec123] = 4 # bisec123 creates four new elements
## idx = np.hstack(( 0, np.cumsum(idx) ))
## new_elem = np.zeros((3, idx[-1]), dtype=int)
## new_elem[:,idx[none]] = tri[:,none]
## new_elem[:, idx[bisec1] ] = \
## (tri[ 2, bisec1 ], tri[ 0, bisec1 ], new_nodes[ 0, bisec1 ])
## new_elem[:, 1+idx[bisec1] ] = \
## (tri[ 1, bisec1 ], tri[ 2, bisec1 ], new_nodes[ 0, bisec1 ])
## new_elem[:, idx[bisec12] ] = \
## (tri[ 2, bisec12 ], tri[ 0, bisec12 ], new_nodes[ 0, bisec12 ])
## new_elem[:, 1+idx[bisec12] ] = \
## (new_nodes[ 0, bisec12 ], tri[ 1, bisec12 ], new_nodes[ 1, bisec12 ])
## new_elem[:, 2+idx[bisec12] ] = \
## (tri[ 2, bisec12 ], new_nodes[ 0, bisec12 ], new_nodes[ 1, bisec12 ])
## new_elem[:, idx[bisec13] ] = \
## (new_nodes[ 0, bisec13 ], tri[ 2, bisec13 ], new_nodes[ 2, bisec13 ])
## new_elem[:, 1+idx[bisec13] ] = \
## (tri[ 0, bisec13 ], new_nodes[ 0, bisec13 ], new_nodes[ 2, bisec13 ])
## new_elem[:, 2+idx[bisec13] ] = \
## (tri[ 1, bisec13 ], tri[ 2, bisec13 ], new_nodes[ 0, bisec13 ])
## new_elem[:, idx[bisec123] ] = \
## (new_nodes[ 0, bisec123 ], tri[ 2, bisec123 ], new_nodes[ 2, bisec123 ])
## new_elem[:, 1+idx[bisec123] ] = \
## (tri[ 0, bisec123 ], new_nodes[ 0, bisec123 ], new_nodes[ 2, bisec123 ])
## new_elem[:, 2+idx[bisec123] ] = \
## (new_nodes[ 0, bisec123 ], tri[ 1, bisec123 ], new_nodes[ 1, bisec123 ])
## new_elem[:, 3+idx[bisec123] ] = \
## (tri[ 2, bisec123 ], new_nodes[ 0, bisec123 ], new_nodes[ 1, bisec123 ])
## self._triangles = new_elem
## self._vertices = new_coords
## self.reset_data()
## # Create new data vectors preserving data on unchanged elements.
## new_vectors = []
## for old_vec in data_vectors:
## new_vec_size = old_vec.shape[0:-1] + (new_elem.shape[1],)
## new_vec = np.empty( new_vec_size )
## new_vec[:] = np.nan
## if new_vec.ndim == 1:
## new_vec[idx[none]] = old_vec[none]
## elif new_vec.ndim == 2:
## new_vec[:,idx[none]] = old_vec[:,none]
## elif new_vec.ndim == 3:
## new_vec[:,:,idx[none]] = old_vec[:,:,none]
## else:
## raise ValueError("Data_vector.ndim > 3 not allowed!")
## new_vectors.append( new_vec )
## return new_vectors
def _compute_new_nodes(self, marked):
"""Computes new nodes & coordinates for marked edges.
The marked edges will be split into two edges from midpoint of
the marked edge. This function only computes the new coordinates
and the corresponding node ids for the new midpoint nodes.
The actual splitting and changes to the triangulation data
structures are done by the caller routine.
Parameters
----------
marked : tuple of NumPy arrays
A pair of integer arrays (mark_i, mark_j), which store the
indices of the marked edges of the elements to be refined.
mark_i stores the edge number and mark_j stores the element
index.
Returns
-------
new_nodes : NumPy array
A Numpy array of integers storing the node id numbers
for the new nodes of the marked edges.
new_coords : NumPy array
A (2,N) dimensional Numpy float array storing the x,y
coordinates of the new nodes.
"""
elem = self._triangles
n_elem = elem.shape[1]
coords = self._vertices
n = coords.shape[1]
mark_i, mark_j = marked
original_edge_index = mark_i * n_elem + mark_j
I = elem[ mark_i, mark_j ]
J = elem[ (mark_i+1) % 3, mark_j ]
min_IJ = np.minimum(I,J)
max_IJ = np.maximum(I,J)
dummy = np.empty( len(I), dtype=int )
new_node_id_mtx = sparse.csr_matrix( (dummy, (min_IJ,max_IJ)), (n,n) )
n_new_nodes = new_node_id_mtx.getnnz()
new_node_id_mtx.data = np.arange( n, n + n_new_nodes )
I2, J2 = new_node_id_mtx.nonzero()
new_coords = 0.5 * (coords[:,I2] + coords[:,J2])
new_node_id_mtx = new_node_id_mtx + new_node_id_mtx.T
original_edges_mtx = sparse.csr_matrix( (original_edge_index,(I,J)), (n,n) )
vec_len = len(original_edges_mtx.data)
mask = sparse.csr_matrix( (np.ones(vec_len, dtype=int),
original_edges_mtx.indices,
original_edges_mtx.indptr ), (n,n) )
new_node_ids = ( mask.multiply( new_node_id_mtx ) ).data
edge_indices = original_edges_mtx.data
index0 = edge_indices / n_elem
index1 = edge_indices % n_elem
new_nodes = np.zeros_like( elem )
new_nodes[ index0, index1 ] = new_node_ids
return (new_nodes, new_coords)
def refine_coarsen(self, markers, data_vectors=[], conforming=False):
if conforming:
raise ValueError("Does not work for conforming refinement yet!")
old_elem = self._triangles
n_old_elem = old_elem.shape[1]
if markers.dtype == bool:
marked = markers.nonzero()[0]
elif markers.dtype == int:
marked = markers
else:
raise ValueError("markers should either be a boolean array or an integer array of element indices!")
# n_new_elem = len(marked)
n_new_elem = 3*len(marked)
# (i,j) indices of primary edges of the marked triangles
# mark_j = marked
# mark_i = np.zeros_like(mark_j)
n_marked = len(marked)
mark_j = np.hstack(( marked, marked, marked ))
mark_i = np.empty( 3*n_marked, dtype=int )
mark_i[0:n_marked] = 0
mark_i[n_marked:2*n_marked] = 1
mark_i[2*n_marked:3*n_marked] = 2
need_to_split = ( mark_i, mark_j ) # edges to be split
# Compute the new node, their coords and assign to correct elems.
new_nodes, new_coords = self._compute_new_nodes( need_to_split )
new_coords = np.hstack( (self._vertices, new_coords) )
# Create the new element array.
new_elem = np.empty( (3, n_old_elem + n_new_elem), dtype=int )
# Copy all the old elements to the new_elem array.
# The marked locations will be overwritten next.
new_elem[:,0:n_old_elem] = old_elem
## # Overwrite some locations with the firsts of the new element pairs.
## new_elem[:,marked] = \
## (old_elem[2,marked], old_elem[0,marked], new_nodes[0,marked])
## # The seconds of the new element pairs are added to the end.
## new_elem[:,n_old_elem:] = \
## (old_elem[1,marked], old_elem[2,marked], new_nodes[0,marked])
inc = n_new_elem / 3
# Overwrite some locations with the first of the four new elements.
new_elem[:, marked] = \
(new_nodes[0,marked], old_elem[2,marked], new_nodes[2,marked])
# The seconds, thirds & fourths are are added to the end.
new_elem[:, n_old_elem:(n_old_elem + inc)] = \
(old_elem[0,marked], new_nodes[0,marked], new_nodes[2,marked])
new_elem[:, (n_old_elem + inc):(n_old_elem + 2*inc)] = \
(new_nodes[0,marked], old_elem[1,marked], new_nodes[1,marked])
new_elem[:, (n_old_elem + 2*inc):(n_old_elem + 3*inc)] = \
(old_elem[2,marked], new_nodes[0,marked], new_nodes[1,marked])
# Assign the new elements and new coords for the triangulation.
self._triangles = new_elem
self._vertices = new_coords
self.timestamp += 1
#-----------------------------------------------------------------
# Indices of the elements added to the triangulation.
new_indices = np.hstack((marked,
np.arange(n_old_elem, n_old_elem + n_new_elem)))
# If self._coords and _el_sizes have been precomputed, update them.
if self._coords is not None:
missing_coords = self.coords(new_indices)
coords = []
for i in range(3):
coords.append( np.empty((2, new_elem.shape[1])) )
coords[i][:,0:n_old_elem] = self._coords[i]
coords[i][:,new_indices] = missing_coords[i]
self._coords = tuple(coords)
if self._el_sizes is not None:
el_sizes = np.empty( new_elem.shape[1] )
el_sizes[0:n_old_elem] = self._el_sizes
el_sizes[new_indices] = self.element_sizes(new_indices)
self._el_sizes = el_sizes
# !!! In the previous, it is critical that the order of operations !!!
# !!! is (triangles, vertices), coords, el_sizes. !!!
#--------------------------------------------------------------
# Data vectors store data associated with elements.
# Create new data vectors preserving the data on unchanged elements.
new_vectors = []
for old_vec in data_vectors:
new_vec_size = old_vec.shape[0:-1] + (new_elem.shape[1],)
new_vec = np.empty( new_vec_size )
if new_vec.ndim == 1:
new_vec[0:n_old_elem] = old_vec
new_vec[new_indices] = np.nan
elif new_vec.ndim == 2:
new_vec[:,0:n_old_elem] = old_vec
new_vec[:,new_indices] = np.nan
elif new_vec.ndim == 3:
new_vec[:,:,0:n_old_elem] = old_vec
new_vec[:,:,new_indices] = np.nan
else:
raise ValueError("Data_vector.ndim > 3 not allowed!")
new_vectors.append( new_vec )
return (new_vectors, new_indices)
def mark_grid(self, grid, origin=(0.0,0.0), size=(1.0,1.0), value=1):
x0, x1, x2 = self.coords()
Domain2d._mark_grid_loop(grid, x0,x1,x2, origin, size, value)
@jit( nopython = True )
def _mark_grid_loop(grid, x0,x1,x2, origin, size, value):
n_tri = x0.shape[1]
nx, ny = grid.shape
size_x, size_y = size
origin_x, origin_y = origin
inc_x = size_x / (nx - 1.0)
inc_y = size_y / (ny - 1.0)
offset_x = origin_x / inc_x
offset_y = origin_y / inc_y
domain_min_x = origin_x
domain_max_x = origin_x + size_x
domain_min_y = origin_y
domain_max_y = origin_y + size_y
for k in range(0,n_tri):
min_x = min( min(x0[0,k], x1[0,k]), x2[0,k] )
max_x = max( max(x0[0,k], x1[0,k]), x2[0,k] )
min_y = min( min(x0[1,k], x1[1,k]), x2[1,k] )
max_y = max( max(x0[1,k], x1[1,k]), x2[1,k] )
if max_x < domain_min_x: continue
if max_y < domain_min_y: continue
if min_x > domain_max_x: continue
if min_y > domain_max_y: continue
v0_x = x0[0,k]; v0_y = x0[1,k]
v1_x = x1[0,k] - v0_x; v1_y = x1[1,k] - v0_y
v2_x = x2[0,k] - v0_x; v2_y = x2[1,k] - v0_y
det_v0_v1 = v0_x*v1_y - v0_y*v1_x
det_v0_v2 = v0_x*v2_y - v0_y*v2_x
det_v1_v2 = v1_x*v2_y - v1_y*v2_x
min_i = max( 0, np.ceil( min_x/inc_x - offset_x ) )
max_i = min( nx-1, np.floor( max_x/inc_x - offset_x ) )
min_j = max( 0, np.ceil( min_y/inc_y - offset_y ) )
max_j = min( ny-1, np.floor( max_y/inc_y - offset_y ) )
min_x = domain_min_x + inc_x * min_i
max_x = domain_min_x + inc_x * max_i
min_y = domain_min_y + inc_y * min_j
max_y = domain_min_y + inc_y * max_j
px = min_x
for i in range(min_i, max_i):
py = min_y
for j in range(min_j, max_j):
det_p_v1 = px*v1_y - py*v1_x
det_p_v2 = px*v2_y - py*v2_x
a = (det_p_v2 - det_v0_v2) / det_v1_v2
b = -(det_p_v1 - det_v0_v1) / det_v1_v2
if (a >= 0.0) and (b >= 0.0) and (a+b <= 1.0):
grid[i,j] = value
py += inc_y
px += inc_x
def show(self, tri=None, vtx=None, mask=None, format='k-', factor=1.0):
import matplotlib.pyplot as plt
if (tri is None) or (vtx is None):
tri = self._triangles
vtx = factor*self._vertices
if mask is None:
plt.triplot( vtx[0], vtx[1], format, triangles=tri.T )
else:
mask2 = np.ones( tri.shape[1], dtype=boolean ) # Set all to true.
mask2[mask] = False # Those given by mask will be displayed.
plt.triplot( vtx[0], vtx[1], format, triangles=tri.T, mask=mask2 )
plt.axis([ vtx[0].min(), vtx[0].max(), vtx[1].min(), vtx[1].max() ])
plt.show()
| [
"meshpy.triangle.build",
"copy.deepcopy",
"numpy.minimum",
"numpy.maximum",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"numpy.ceil",
"numpy.empty",
"numpy.floor",
"matplotlib.pyplot.triplot",
"numpy.ones",
"meshpy.triangle.MeshInfo",
"numpy.hstack",
"scipy.sparse.csr_matrix",
"numba.j... | [((3195, 3213), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3198, 3213), False, 'from numba import jit\n'), ((4374, 4392), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4377, 4392), False, 'from numba import jit\n'), ((21200, 21218), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (21203, 21218), False, 'from numba import jit\n'), ((4482, 4493), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (4490, 4493), True, 'import numpy as np\n'), ((5208, 5227), 'meshpy.triangle.MeshInfo', 'triangle.MeshInfo', ([], {}), '()\n', (5225, 5227), False, 'from meshpy import triangle\n'), ((5552, 5572), 'meshpy.triangle.build', 'triangle.build', (['info'], {}), '(info)\n', (5566, 5572), False, 'from meshpy import triangle\n'), ((15051, 15067), 'numpy.minimum', 'np.minimum', (['I', 'J'], {}), '(I, J)\n', (15061, 15067), True, 'import numpy as np\n'), ((15084, 15100), 'numpy.maximum', 'np.maximum', (['I', 'J'], {}), '(I, J)\n', (15094, 15100), True, 'import numpy as np\n'), ((15173, 15225), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(dummy, (min_IJ, max_IJ))', '(n, n)'], {}), '((dummy, (min_IJ, max_IJ)), (n, n))\n', (15190, 15225), False, 'from scipy import sparse\n'), ((15304, 15333), 'numpy.arange', 'np.arange', (['n', '(n + n_new_nodes)'], {}), '(n, n + n_new_nodes)\n', (15313, 15333), True, 'import numpy as np\n'), ((15529, 15585), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(original_edge_index, (I, J))', '(n, n)'], {}), '((original_edge_index, (I, J)), (n, n))\n', (15546, 15585), False, 'from scipy import sparse\n'), ((16042, 16061), 'numpy.zeros_like', 'np.zeros_like', (['elem'], {}), '(elem)\n', (16055, 16061), True, 'import numpy as np\n'), ((16925, 16960), 'numpy.hstack', 'np.hstack', (['(marked, marked, marked)'], {}), '((marked, marked, marked))\n', (16934, 16960), True, 'import numpy as np\n'), ((16980, 17013), 'numpy.empty', 'np.empty', (['(3 * n_marked)'], {'dtype': 'int'}), '(3 * n_marked, dtype=int)\n', (16988, 17013), True, 'import numpy as np\n'), ((17359, 17398), 'numpy.hstack', 'np.hstack', (['(self._vertices, new_coords)'], {}), '((self._vertices, new_coords))\n', (17368, 17398), True, 'import numpy as np\n'), ((17461, 17510), 'numpy.empty', 'np.empty', (['(3, n_old_elem + n_new_elem)'], {'dtype': 'int'}), '((3, n_old_elem + n_new_elem), dtype=int)\n', (17469, 17510), True, 'import numpy as np\n'), ((24109, 24119), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24117, 24119), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1189), 'copy.deepcopy', 'copy.deepcopy', (['self._coords'], {}), '(self._coords)\n', (1175, 1189), False, 'import copy\n'), ((2916, 2949), 'numpy.empty', 'np.empty', (['(2, n_tri)'], {'dtype': 'float'}), '((2, n_tri), dtype=float)\n', (2924, 2949), True, 'import numpy as np\n'), ((2968, 3001), 'numpy.empty', 'np.empty', (['(2, n_tri)'], {'dtype': 'float'}), '((2, n_tri), dtype=float)\n', (2976, 3001), True, 'import numpy as np\n'), ((3020, 3053), 'numpy.empty', 'np.empty', (['(2, n_tri)'], {'dtype': 'float'}), '((2, n_tri), dtype=float)\n', (3028, 3053), True, 'import numpy as np\n'), ((5588, 5620), 'numpy.array', 'np.array', (['triangulation.elements'], {}), '(triangulation.elements)\n', (5596, 5620), True, 'import numpy as np\n'), ((5639, 5669), 'numpy.array', 'np.array', (['triangulation.points'], {}), '(triangulation.points)\n', (5647, 5669), True, 'import numpy as np\n'), ((19695, 19722), 'numpy.empty', 'np.empty', (['new_elem.shape[1]'], {}), '(new_elem.shape[1])\n', (19703, 19722), True, 'import numpy as np\n'), ((20409, 20431), 'numpy.empty', 'np.empty', (['new_vec_size'], {}), '(new_vec_size)\n', (20417, 20431), True, 'import numpy as np\n'), ((23724, 23776), 'matplotlib.pyplot.triplot', 'plt.triplot', (['vtx[0]', 'vtx[1]', 'format'], {'triangles': 'tri.T'}), '(vtx[0], vtx[1], format, triangles=tri.T)\n', (23735, 23776), True, 'import matplotlib.pyplot as plt\n'), ((23813, 23849), 'numpy.ones', 'np.ones', (['tri.shape[1]'], {'dtype': 'boolean'}), '(tri.shape[1], dtype=boolean)\n', (23820, 23849), True, 'import numpy as np\n'), ((23956, 24020), 'matplotlib.pyplot.triplot', 'plt.triplot', (['vtx[0]', 'vtx[1]', 'format'], {'triangles': 'tri.T', 'mask': 'mask2'}), '(vtx[0], vtx[1], format, triangles=tri.T, mask=mask2)\n', (23967, 24020), True, 'import matplotlib.pyplot as plt\n'), ((15667, 15694), 'numpy.ones', 'np.ones', (['vec_len'], {'dtype': 'int'}), '(vec_len, dtype=int)\n', (15674, 15694), True, 'import numpy as np\n'), ((19131, 19177), 'numpy.arange', 'np.arange', (['n_old_elem', '(n_old_elem + n_new_elem)'], {}), '(n_old_elem, n_old_elem + n_new_elem)\n', (19140, 19177), True, 'import numpy as np\n'), ((22486, 22519), 'numpy.ceil', 'np.ceil', (['(min_x / inc_x - offset_x)'], {}), '(min_x / inc_x - offset_x)\n', (22493, 22519), True, 'import numpy as np\n'), ((22554, 22588), 'numpy.floor', 'np.floor', (['(max_x / inc_x - offset_x)'], {}), '(max_x / inc_x - offset_x)\n', (22562, 22588), True, 'import numpy as np\n'), ((22622, 22655), 'numpy.ceil', 'np.ceil', (['(min_y / inc_y - offset_y)'], {}), '(min_y / inc_y - offset_y)\n', (22629, 22655), True, 'import numpy as np\n'), ((22690, 22724), 'numpy.floor', 'np.floor', (['(max_y / inc_y - offset_y)'], {}), '(max_y / inc_y - offset_y)\n', (22698, 22724), True, 'import numpy as np\n'), ((19435, 19467), 'numpy.empty', 'np.empty', (['(2, new_elem.shape[1])'], {}), '((2, new_elem.shape[1]))\n', (19443, 19467), True, 'import numpy as np\n')] |
# this file contains the integral kernel functions that are used for computing
# the elevation and velocity solutions
import numpy as np
from params import Nt,eps_shelf,delta,dt
from scipy.signal import fftconvolve
#---------------------------convolution operator--------------------------------
def conv(a,b):
return dt*fftconvolve(a,b,mode='full',axes=0)[0:Nt,:,:]
#-----------------------------indicator function--------------------------------
# used to trunctate the integral kernels at a small k=k_min
def ind(k,k_min):
return 0.5*(np.sign(k-k_min)+1)
#------------------------Functions relevant to kernels--------------------------
def R(k):
# relaxation function for floating ice
n = 2*np.pi*k # used to convert to SciPy's Fourier Transform definition
R1 = (1/n)*(np.exp(4*n) + (4*n)*np.exp(2*n) - 1 )
D = np.exp(4*n) -2*(1+2*n**2)*np.exp(2*n) + 1
f0 = D/R1
f = 1/(eps_shelf+f0)
return f
def B(k):
# buoyancy transfer function for floating ice
n = 2*np.pi*k # used to convert to SciPy's Fourier Transform definition
B1 = (1/n)*( 2*(n+1)*np.exp(3*n) + 2*(n-1)*np.exp(n))
D = np.exp(4*n) -2*(1+2*n**2)*np.exp(2*n) + 1
f0 = D/B1
f = 1/(eps_shelf+f0)
return f
def Lamda_p(k,kx,u0):
# expression for the larger eigenvalue in the problem
R_ = R(k)
B_ = B(k)
chi = (1-delta)*R_
mu = np.sqrt(4*delta*(B_)**2 + chi**2)
Lp = -0.5*(delta+1)*R_+0.5*mu-1j*(2*np.pi*kx)*u0
return Lp
def Lamda_m(k,kx,u0):
# expression for the smaller eigenvalue in the problem
R_ = R(k)
B_ = B(k)
chi = (1-delta)*R_
mu = np.sqrt(4*delta*(B_)**2 + chi**2)
Lm = -1j*(2*np.pi*kx)*u0-0.5*(delta+1)*R_-0.5*mu
return Lm
def Uh(k,z):
# horizontal velocity reponse function that multiplies the upper surface
# elevation, where z (z=0 is base, z=1 is surface) is the depth
n = 2*np.pi*k
R = z*(2*n*(np.exp(2*n) +np.exp(2*n*z)))-(1-z)*(np.exp(2*n*(z+1)) +np.exp(2*n)-np.exp(2*n*z)-1)
D = np.exp(n*z)*(np.exp(4*n)-2*(1+2*n**2)*np.exp(2*n) +1)/n
f = np.exp(n)*R/D
return f
def Us(k,z):
# horizontal velocity reponse function that multiplies the lower surface
# elevation, where z (z=0 is base, z=1 is surface) is the depth
n = 2*np.pi*k
R = z*(np.exp(4*n)+np.exp(2*n*(z+1)) -np.exp(2*n)-np.exp(2*n*z))-(1-z)*2*n*np.exp(2*n)*(np.exp(2*n*z)+1)
D = np.exp(n*z)*(np.exp(4*n)-2*(1+2*n**2)*np.exp(2*n) +1)/n
f = R/D
return f
def Wh(k,z):
# vertical velocity reponse function that multiplies the upper surface
# elevation, where z (z=0 is base, z=1 is surface) is the depth
n = 2*np.pi*k
R = (n*z*(2*n*np.exp(2*n) + np.exp(2*n) - 1) + n*np.exp(2*n) + n + (-n*z*(2*n + np.exp(2*n) - 1) + n*np.exp(2*n) + n + np.exp(2*n) - 1)*np.exp(2*n*z) + np.exp(2*n) - 1)*np.exp(-n*(z - 1))
D = np.exp(4*n) -2*(1+2*n**2)*np.exp(2*n) + 1
return -(1/n)*R/D
def Ws(k,z):
# vertical velocity reponse function that multiplies the lower surface
# elevation, where z (z=0 is base, z=1 is surface) is the depth
n = 2*np.pi*k
R = ((-2*n**2 + n*z*(2*n + np.exp(2*n) - 1) + 2*n + np.exp(2*n) - 1)*np.exp(2*n) + (2*n**2*np.exp(2*n) - n*z*(2*n*np.exp(2*n) + np.exp(2*n) - 1) + 2*n*np.exp(2*n) + np.exp(2*n) - 1)*np.exp(2*n*z))*np.exp(-n*z)
D = np.exp(4*n) -2*(1+2*n**2)*np.exp(2*n) + 1
return -(1/n)*R/D
#------------------------------ Kernels-----------------------------------------
def ker_h(t,k,kx,u0):
# kernel for computing the upper surface elevation when the melt-rate forcing
# is time-dependent
R_ = R(k)
B_ = B(k)
chi = (1-delta)*R_
mu = np.sqrt(4*delta*(B_)**2 + chi**2)
Lp = Lamda_p(k,kx,u0)
Lm = Lamda_m(k,kx,u0)
ker0 = (delta*B_/mu)*np.exp(Lp*t)
ker1 = (delta*B_/mu)*np.exp(Lm*t)
K = ker1-ker0
return K*ind(k,1e-4)
def ker_s(t,k,kx,u0):
# kernel for computing the lower surface elevation when the melt-rate forcing
# is time-dependent
R_ = R(k)
B_ = B(k)
chi = (1-delta)*R_
mu = np.sqrt(4*delta*(B_)**2 + chi**2)
Lp = Lamda_p(k,kx,u0)
Lm = Lamda_m(k,kx,u0)
ker0 = (1/(2*mu))*(mu-chi)*np.exp(Lm*t)
ker1 = (1/(2*mu))*(mu+chi)*np.exp(Lp*t)
K = ker0+ker1
return K*ind(k,1e-4)
| [
"scipy.signal.fftconvolve",
"numpy.sign",
"numpy.exp",
"numpy.sqrt"
] | [((1396, 1435), 'numpy.sqrt', 'np.sqrt', (['(4 * delta * B_ ** 2 + chi ** 2)'], {}), '(4 * delta * B_ ** 2 + chi ** 2)\n', (1403, 1435), True, 'import numpy as np\n'), ((1639, 1678), 'numpy.sqrt', 'np.sqrt', (['(4 * delta * B_ ** 2 + chi ** 2)'], {}), '(4 * delta * B_ ** 2 + chi ** 2)\n', (1646, 1678), True, 'import numpy as np\n'), ((3662, 3701), 'numpy.sqrt', 'np.sqrt', (['(4 * delta * B_ ** 2 + chi ** 2)'], {}), '(4 * delta * B_ ** 2 + chi ** 2)\n', (3669, 3701), True, 'import numpy as np\n'), ((4057, 4096), 'numpy.sqrt', 'np.sqrt', (['(4 * delta * B_ ** 2 + chi ** 2)'], {}), '(4 * delta * B_ ** 2 + chi ** 2)\n', (4064, 4096), True, 'import numpy as np\n'), ((2840, 2860), 'numpy.exp', 'np.exp', (['(-n * (z - 1))'], {}), '(-n * (z - 1))\n', (2846, 2860), True, 'import numpy as np\n'), ((3307, 3321), 'numpy.exp', 'np.exp', (['(-n * z)'], {}), '(-n * z)\n', (3313, 3321), True, 'import numpy as np\n'), ((3774, 3788), 'numpy.exp', 'np.exp', (['(Lp * t)'], {}), '(Lp * t)\n', (3780, 3788), True, 'import numpy as np\n'), ((3812, 3826), 'numpy.exp', 'np.exp', (['(Lm * t)'], {}), '(Lm * t)\n', (3818, 3826), True, 'import numpy as np\n'), ((4175, 4189), 'numpy.exp', 'np.exp', (['(Lm * t)'], {}), '(Lm * t)\n', (4181, 4189), True, 'import numpy as np\n'), ((4219, 4233), 'numpy.exp', 'np.exp', (['(Lp * t)'], {}), '(Lp * t)\n', (4225, 4233), True, 'import numpy as np\n'), ((327, 365), 'scipy.signal.fftconvolve', 'fftconvolve', (['a', 'b'], {'mode': '"""full"""', 'axes': '(0)'}), "(a, b, mode='full', axes=0)\n", (338, 365), False, 'from scipy.signal import fftconvolve\n'), ((549, 567), 'numpy.sign', 'np.sign', (['(k - k_min)'], {}), '(k - k_min)\n', (556, 567), True, 'import numpy as np\n'), ((853, 866), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (859, 866), True, 'import numpy as np\n'), ((1161, 1174), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (1167, 1174), True, 'import numpy as np\n'), ((2025, 2038), 'numpy.exp', 'np.exp', (['(n * z)'], {}), '(n * z)\n', (2031, 2038), True, 'import numpy as np\n'), ((2089, 2098), 'numpy.exp', 'np.exp', (['n'], {}), '(n)\n', (2095, 2098), True, 'import numpy as np\n'), ((2410, 2423), 'numpy.exp', 'np.exp', (['(n * z)'], {}), '(n * z)\n', (2416, 2423), True, 'import numpy as np\n'), ((2867, 2880), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (2873, 2880), True, 'import numpy as np\n'), ((3328, 3341), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (3334, 3341), True, 'import numpy as np\n'), ((807, 820), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (813, 820), True, 'import numpy as np\n'), ((879, 892), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (885, 892), True, 'import numpy as np\n'), ((1120, 1133), 'numpy.exp', 'np.exp', (['(3 * n)'], {}), '(3 * n)\n', (1126, 1133), True, 'import numpy as np\n'), ((1142, 1151), 'numpy.exp', 'np.exp', (['n'], {}), '(n)\n', (1148, 1151), True, 'import numpy as np\n'), ((1187, 1200), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (1193, 1200), True, 'import numpy as np\n'), ((2347, 2364), 'numpy.exp', 'np.exp', (['(2 * n * z)'], {}), '(2 * n * z)\n', (2353, 2364), True, 'import numpy as np\n'), ((2372, 2385), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2378, 2385), True, 'import numpy as np\n'), ((2385, 2402), 'numpy.exp', 'np.exp', (['(2 * n * z)'], {}), '(2 * n * z)\n', (2391, 2402), True, 'import numpy as np\n'), ((2823, 2836), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2829, 2836), True, 'import numpy as np\n'), ((2893, 2906), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2899, 2906), True, 'import numpy as np\n'), ((3179, 3192), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3185, 3192), True, 'import numpy as np\n'), ((3292, 3309), 'numpy.exp', 'np.exp', (['(2 * n * z)'], {}), '(2 * n * z)\n', (3298, 3309), True, 'import numpy as np\n'), ((3354, 3367), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3360, 3367), True, 'import numpy as np\n'), ((827, 840), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (833, 840), True, 'import numpy as np\n'), ((1933, 1946), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (1939, 1946), True, 'import numpy as np\n'), ((1946, 1963), 'numpy.exp', 'np.exp', (['(2 * n * z)'], {}), '(2 * n * z)\n', (1952, 1963), True, 'import numpy as np\n'), ((2000, 2017), 'numpy.exp', 'np.exp', (['(2 * n * z)'], {}), '(2 * n * z)\n', (2006, 2017), True, 'import numpy as np\n'), ((2038, 2051), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (2044, 2051), True, 'import numpy as np\n'), ((2335, 2348), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2341, 2348), True, 'import numpy as np\n'), ((2423, 2436), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (2429, 2436), True, 'import numpy as np\n'), ((1969, 1992), 'numpy.exp', 'np.exp', (['(2 * n * (z + 1))'], {}), '(2 * n * (z + 1))\n', (1975, 1992), True, 'import numpy as np\n'), ((1988, 2001), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (1994, 2001), True, 'import numpy as np\n'), ((2063, 2076), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2069, 2076), True, 'import numpy as np\n'), ((2304, 2317), 'numpy.exp', 'np.exp', (['(4 * n)'], {}), '(4 * n)\n', (2310, 2317), True, 'import numpy as np\n'), ((2316, 2339), 'numpy.exp', 'np.exp', (['(2 * n * (z + 1))'], {}), '(2 * n * (z + 1))\n', (2322, 2339), True, 'import numpy as np\n'), ((2448, 2461), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2454, 2461), True, 'import numpy as np\n'), ((2807, 2824), 'numpy.exp', 'np.exp', (['(2 * n * z)'], {}), '(2 * n * z)\n', (2813, 2824), True, 'import numpy as np\n'), ((3162, 3175), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3168, 3175), True, 'import numpy as np\n'), ((3275, 3288), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3281, 3288), True, 'import numpy as np\n'), ((2720, 2733), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2726, 2733), True, 'import numpy as np\n'), ((2790, 2803), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2796, 2803), True, 'import numpy as np\n'), ((3261, 3274), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3267, 3274), True, 'import numpy as np\n'), ((3201, 3214), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3207, 3214), True, 'import numpy as np\n'), ((2699, 2712), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2705, 2712), True, 'import numpy as np\n'), ((2685, 2698), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2691, 2698), True, 'import numpy as np\n'), ((2772, 2785), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2778, 2785), True, 'import numpy as np\n'), ((3137, 3150), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3143, 3150), True, 'import numpy as np\n'), ((3238, 3251), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3244, 3251), True, 'import numpy as np\n'), ((3224, 3237), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (3230, 3237), True, 'import numpy as np\n'), ((2751, 2764), 'numpy.exp', 'np.exp', (['(2 * n)'], {}), '(2 * n)\n', (2757, 2764), True, 'import numpy as np\n')] |
import numpy as np
from sklearntools.earth import Earth
import pandas
from sklearntools.sym.printers import model_to_code, exec_module
from sklearntools.sym.sym_predict import sym_predict
from numpy.ma.testutils import assert_array_almost_equal
import execjs
from sklearntools.calibration import LogTransformer,\
ResponseTransformingEstimator, CalibratedEstimatorCV, \
MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, \
PredictorTransformer, ProbaPredictingEstimator
from sklearntools.kfold import CrossValidatingEstimator
from sklearn.tree.tree import DecisionTreeRegressor
from nose.tools import assert_almost_equal
from sklearntools.sym.syms import syms
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier,\
LeastSquaresError, LeastAbsoluteError, HuberLossFunction,\
GradientBoostingRegressor
from sklearntools.sym.sym_predict_proba import sym_predict_proba
from sklearntools.gb import GradientBoostingEstimator
from nose import SkipTest
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model.logistic import LogisticRegression
from sympy.core.symbol import Symbol
def test_sklearn_gradient_boosting_classifier_export():
np.random.seed(1)
# Create some data
m = 10000
X = np.random.normal(size=(m,10))
thresh = np.random.normal(size=10)
X_transformed = X * (X > thresh)
beta = np.random.normal(size=10)
y = (np.dot(X_transformed, beta) + np.random.normal(size=m)) > 0
# Train a gradient boosting classifier
model = GradientBoostingClassifier(max_depth=10, n_estimators=10)
model.fit(X, y)
# Export python code and check output
y_pred = model.predict_proba(X)[:,1]
X_ = pandas.DataFrame(X, columns=[s.name for s in syms(model)])
code = model_to_code(model, 'numpy', 'predict_proba', 'test_model')
numpy_test_module = exec_module('numpy_test_module', code)
y_pred_numpy = numpy_test_module.test_model(**X_)
assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
def test_least_squares_error_export():
np.random.seed(1)
# Create some data
m = 10000
X = np.random.normal(size=(m,10))
beta = np.random.normal(size=10)
y = np.random.normal(np.dot(X, beta))
# Fit a model
model = GradientBoostingEstimator(Earth(max_terms=5), loss_function=LeastSquaresError(1))
model.fit(X, y)
# Export as sympy expression
expr = sym_predict(model)
# Check some values
y_pred = model.predict(X)
X_ = pandas.DataFrame(X, columns=[s.name for s in syms(model)])
for i in range(10):
row = dict(X_.iloc[i,:])
assert_almost_equal(y_pred[i], expr.evalf(16, row))
# Export python code and check output
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred_numpy = numpy_test_module.test_model(**X_)
assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
def test_least_absolute_error_export():
np.random.seed(1)
# Create some data
m = 10000
X = np.random.normal(size=(m,10))
beta = np.random.normal(size=10)
y = np.random.normal(np.dot(X, beta))
# Fit a model
model = GradientBoostingEstimator(Earth(max_terms=5), loss_function=LeastAbsoluteError(1))
model.fit(X, y)
# Export as sympy expression
expr = sym_predict(model)
# Check some values
y_pred = model.predict(X)
X_ = pandas.DataFrame(X, columns=[s.name for s in syms(model)])
for i in range(10):
row = dict(X_.iloc[i,:])
assert_almost_equal(y_pred[i], expr.evalf(16, row))
# Export python code and check output
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred_numpy = numpy_test_module.test_model(**X_)
assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
def test_huber_loss_export():
np.random.seed(1)
# Create some data
m = 10000
X = np.random.normal(size=(m,10))
beta = np.random.normal(size=10)
y = np.random.normal(np.dot(X, beta))
# Fit a model
model = GradientBoostingEstimator(Earth(max_terms=5, smooth=True), loss_function=HuberLossFunction(1))
model.fit(X, y)
# Export as sympy expression
expr = sym_predict(model)
# Check some values
y_pred = model.predict(X)
X_ = pandas.DataFrame(X, columns=[s.name for s in syms(model)])
for i in range(10):
row = dict(X_.iloc[i,:])
assert_almost_equal(y_pred[i], expr.evalf(16, row))
# Export python code and check output
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred_numpy = numpy_test_module.test_model(**X_)
assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
def test_gradient_boosting_regressor():
np.random.seed(1)
# Create some data
m = 10000
X = np.random.normal(size=(m,10))
beta = np.random.normal(size=10)
y = np.random.normal(np.dot(X, beta))
# Fit a model
model = GradientBoostingRegressor(n_estimators=10)
model.fit(X, y)
# Export as sympy expression
expr = sym_predict(model)
# Check some values
y_pred = model.predict(X)
X_ = pandas.DataFrame(X, columns=[s.name for s in syms(model)])
for i in range(10):
row = dict(X_.iloc[i,:])
assert_almost_equal(y_pred[i], expr.evalf(16, row))
# Export python code and check output
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred_numpy = numpy_test_module.test_model(**X_)
assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
def test_decision_tree_export():
np.random.seed(1)
# Create some data
m = 10000
X = np.random.normal(size=(m,10))
thresh = np.random.normal(size=10)
X_transformed = X * (X > thresh)
beta = np.random.normal(size=10)
y = np.dot(X_transformed, beta) + np.random.normal(size=m)
# Train a decision tree regressor
model = DecisionTreeRegressor(max_depth=10)
model.fit(X, y)
# Export as sympy expression
expr = sym_predict(model)
# Check some values
y_pred = model.predict(X)
X_ = pandas.DataFrame(X, columns=[s.name for s in syms(model)])
for i in range(10):
row = dict(X_.iloc[i,:])
assert_almost_equal(y_pred[i], expr.evalf(16, row))
# Export python code and check output
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred_numpy = numpy_test_module.test_model(**X_)
assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
def test_sympy_export():
np.random.seed(1)
m = 1000
n = 10
X = np.random.normal(scale=.5,size=(m,n))**2
beta = np.random.normal(scale=1.5,size=n)**2
eta = np.dot(X, beta)
missing = np.random.binomial(p=.5, n=1, size=(m,n)) == 1
X[missing] = None
X = pandas.DataFrame(X, columns=['col%d' % i for i in range(n)])
y = np.random.exponential(eta)
# y = np.random.binomial(1, 1. / (1. + np.exp(-eta)))
model = ResponseTransformingEstimator(estimator = Earth(allow_missing=True, max_terms=10),
transformer = LogTransformer())
model >>= Earth(allow_missing=True, max_terms=10, verbose=False)
model = CalibratedEstimatorCV(estimator=model,
calibrator=MovingAverageSmoothingEstimator(estimator=Earth(), window_size=10))
# ProbaPredictingEstimator(ThresholdClassifier(LogisticRegression()))
model = CrossValidatingEstimator(estimator=model)
model.fit(X, y)
# print model_to_code(model, 'numpy', 'predict', 'test_model')
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred = numpy_test_module.test_model(**X)
assert_array_almost_equal(np.ravel(y_pred), np.ravel(model.predict(X)))
python_test_module = exec_module('python_test_module', model_to_code(model, 'python', 'predict', 'test_model'))
y_pred = [python_test_module.test_model(**row) for i, row in X.iterrows()]
assert_array_almost_equal(np.ravel(y_pred), np.ravel(model.predict(X)))
# Skip the javascript part for now
return
js = execjs.get(execjs.runtime_names.PyV8)
# print model_to_code(model, 'javascript', 'predict', 'test_model')
context = js.compile(model_to_code(model, 'javascript', 'predict', 'test_model'))
y_pred = [context.eval('test_model(col3=%s, col8=%s)' % (str(row['col3']) if not np.isnan(row['col3']) else 'NaN',
str(row['col8']) if not np.isnan(row['col8']) else 'NaN'))
for i, row in X.iloc[:10,:].iterrows()]
assert_array_almost_equal(np.ravel(y_pred), np.ravel(model.predict(X.iloc[:10,:])))
def test_more_sym_stuff():
np.random.seed(1)
m = 1000
n = 10
X = np.random.normal(size=(m,n))
X_bin = X > 0
X[:,1] = X[:,1] ** 2
X_bin[:,1] = np.log(X[:,1] + 1)
beta = np.random.normal(size=n)
eta = np.dot(X_bin, beta)
y = eta + 0.1 * np.random.normal(size=m)
cols = map(lambda i: 'x%d'%i, range(n))
X = pandas.DataFrame(X, columns=cols)
model = (SelectorTransformer(['x1']) >> LogTransformer()) & (IntervalTransformer(lower=0., lower_closed=False) & SelectorTransformer(cols))
model >>= (Earth() & Earth())
model >>= Earth()
model = PredictorTransformer(model)
model.fit(X, y)
# print sym_predict(model)
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred = numpy_test_module.test_model(**X)
assert_array_almost_equal(np.ravel(y_pred), np.ravel(model.predict(X)))
@SkipTest
def test_sym_predict_prior_probability_estimator():
np.random.seed(1)
m = 1000
n = 10
X = np.random.normal(scale=.5,size=(m,n))**2
beta = np.random.normal(scale=1.5,size=n)**2
eta = np.dot(X, beta)
X = pandas.DataFrame(X, columns=['col%d' % i for i in range(n)])
p = 1. / (1. + np.exp(-eta))
y = np.random.binomial(3, p)
model = ProbaPredictingEstimator(GradientBoostingClassifier(n_estimators=100))
model.fit(X, y)
numpy_test_module = exec_module('numpy_test_module', model_to_code(model, 'numpy', 'predict', 'test_model'))
y_pred = numpy_test_module.test_model(**X)
assert_array_almost_equal(np.ravel(y_pred), np.ravel(model.predict(X)))
def test_sym_predict_calibrated_classifier_cv():
np.random.seed(1)
# Create some data
m = 10000
n = 10
X = np.random.normal(size=(m,n))
thresh = np.random.normal(size=n)
X_transformed = X * (X > thresh)
beta = np.random.normal(size=n)
y = (np.dot(X_transformed, beta) + np.random.normal(size=m)) > 0
X = pandas.DataFrame(X, columns=['x%d' % i for i in range(n)])
model = ProbaPredictingEstimator(CalibratedClassifierCV(LogisticRegression(), method='isotonic'))
model.fit(X, y)
code = model_to_code(model, 'numpy', 'predict', 'test_model')#, substitutions=dict(zip(map(Symbol, ['x%d'%i for i in range(n)]), map(Symbol, X.columns))))
numpy_test_module = exec_module('numpy_test_module', code)
try:
y_pred = numpy_test_module.test_model(**X)
y_pred_correct = np.ravel(model.predict(X))
assert_array_almost_equal(np.ravel(y_pred), y_pred_correct)
except:
print(code)
print(X)
raise
if __name__ == '__main__':
test_sym_predict_calibrated_classifier_cv()
exit()
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| [
"numpy.random.seed",
"numpy.ravel",
"sklearn.ensemble.gradient_boosting.HuberLossFunction",
"numpy.random.exponential",
"numpy.isnan",
"numpy.exp",
"numpy.random.normal",
"sklearn.ensemble.gradient_boosting.GradientBoostingClassifier",
"sklearntools.sym.sym_predict.sym_predict",
"pandas.DataFrame"... | [((1223, 1240), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1237, 1240), True, 'import numpy as np\n'), ((1291, 1321), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, 10)'}), '(size=(m, 10))\n', (1307, 1321), True, 'import numpy as np\n'), ((1334, 1359), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (1350, 1359), True, 'import numpy as np\n'), ((1408, 1433), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (1424, 1433), True, 'import numpy as np\n'), ((1563, 1620), 'sklearn.ensemble.gradient_boosting.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'max_depth': '(10)', 'n_estimators': '(10)'}), '(max_depth=10, n_estimators=10)\n', (1589, 1620), False, 'from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier, LeastSquaresError, LeastAbsoluteError, HuberLossFunction, GradientBoostingRegressor\n'), ((1808, 1868), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict_proba"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict_proba', 'test_model')\n", (1821, 1868), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((1893, 1931), 'sklearntools.sym.printers.exec_module', 'exec_module', (['"""numpy_test_module"""', 'code'], {}), "('numpy_test_module', code)\n", (1904, 1931), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((2102, 2119), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2116, 2119), True, 'import numpy as np\n'), ((2170, 2200), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, 10)'}), '(size=(m, 10))\n', (2186, 2200), True, 'import numpy as np\n'), ((2211, 2236), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (2227, 2236), True, 'import numpy as np\n'), ((2465, 2483), 'sklearntools.sym.sym_predict.sym_predict', 'sym_predict', (['model'], {}), '(model)\n', (2476, 2483), False, 'from sklearntools.sym.sym_predict import sym_predict\n'), ((3059, 3076), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3073, 3076), True, 'import numpy as np\n'), ((3127, 3157), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, 10)'}), '(size=(m, 10))\n', (3143, 3157), True, 'import numpy as np\n'), ((3168, 3193), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (3184, 3193), True, 'import numpy as np\n'), ((3423, 3441), 'sklearntools.sym.sym_predict.sym_predict', 'sym_predict', (['model'], {}), '(model)\n', (3434, 3441), False, 'from sklearntools.sym.sym_predict import sym_predict\n'), ((4007, 4024), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (4021, 4024), True, 'import numpy as np\n'), ((4075, 4105), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, 10)'}), '(size=(m, 10))\n', (4091, 4105), True, 'import numpy as np\n'), ((4116, 4141), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (4132, 4141), True, 'import numpy as np\n'), ((4383, 4401), 'sklearntools.sym.sym_predict.sym_predict', 'sym_predict', (['model'], {}), '(model)\n', (4394, 4401), False, 'from sklearntools.sym.sym_predict import sym_predict\n'), ((4977, 4994), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (4991, 4994), True, 'import numpy as np\n'), ((5045, 5075), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, 10)'}), '(size=(m, 10))\n', (5061, 5075), True, 'import numpy as np\n'), ((5086, 5111), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (5102, 5111), True, 'import numpy as np\n'), ((5189, 5231), 'sklearn.ensemble.gradient_boosting.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (5214, 5231), False, 'from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier, LeastSquaresError, LeastAbsoluteError, HuberLossFunction, GradientBoostingRegressor\n'), ((5301, 5319), 'sklearntools.sym.sym_predict.sym_predict', 'sym_predict', (['model'], {}), '(model)\n', (5312, 5319), False, 'from sklearntools.sym.sym_predict import sym_predict\n'), ((5888, 5905), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (5902, 5905), True, 'import numpy as np\n'), ((5956, 5986), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, 10)'}), '(size=(m, 10))\n', (5972, 5986), True, 'import numpy as np\n'), ((5999, 6024), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (6015, 6024), True, 'import numpy as np\n'), ((6073, 6098), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (6089, 6098), True, 'import numpy as np\n'), ((6217, 6252), 'sklearn.tree.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': '(10)'}), '(max_depth=10)\n', (6238, 6252), False, 'from sklearn.tree.tree import DecisionTreeRegressor\n'), ((6322, 6340), 'sklearntools.sym.sym_predict.sym_predict', 'sym_predict', (['model'], {}), '(model)\n', (6333, 6340), False, 'from sklearntools.sym.sym_predict import sym_predict\n'), ((6905, 6922), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6919, 6922), True, 'import numpy as np\n'), ((7055, 7070), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (7061, 7070), True, 'import numpy as np\n'), ((7231, 7257), 'numpy.random.exponential', 'np.random.exponential', (['eta'], {}), '(eta)\n', (7252, 7257), True, 'import numpy as np\n'), ((7504, 7558), 'sklearntools.earth.Earth', 'Earth', ([], {'allow_missing': '(True)', 'max_terms': '(10)', 'verbose': '(False)'}), '(allow_missing=True, max_terms=10, verbose=False)\n', (7509, 7558), False, 'from sklearntools.earth import Earth\n'), ((7813, 7854), 'sklearntools.kfold.CrossValidatingEstimator', 'CrossValidatingEstimator', ([], {'estimator': 'model'}), '(estimator=model)\n', (7837, 7854), False, 'from sklearntools.kfold import CrossValidatingEstimator\n'), ((8528, 8565), 'execjs.get', 'execjs.get', (['execjs.runtime_names.PyV8'], {}), '(execjs.runtime_names.PyV8)\n', (8538, 8565), False, 'import execjs\n'), ((9139, 9156), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (9153, 9156), True, 'import numpy as np\n'), ((9189, 9218), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, n)'}), '(size=(m, n))\n', (9205, 9218), True, 'import numpy as np\n'), ((9278, 9297), 'numpy.log', 'np.log', (['(X[:, 1] + 1)'], {}), '(X[:, 1] + 1)\n', (9284, 9297), True, 'import numpy as np\n'), ((9308, 9332), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (9324, 9332), True, 'import numpy as np\n'), ((9343, 9362), 'numpy.dot', 'np.dot', (['X_bin', 'beta'], {}), '(X_bin, beta)\n', (9349, 9362), True, 'import numpy as np\n'), ((9460, 9493), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {'columns': 'cols'}), '(X, columns=cols)\n', (9476, 9493), False, 'import pandas\n'), ((9691, 9698), 'sklearntools.earth.Earth', 'Earth', ([], {}), '()\n', (9696, 9698), False, 'from sklearntools.earth import Earth\n'), ((9711, 9738), 'sklearntools.calibration.PredictorTransformer', 'PredictorTransformer', (['model'], {}), '(model)\n', (9731, 9738), False, 'from sklearntools.calibration import LogTransformer, ResponseTransformingEstimator, CalibratedEstimatorCV, MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, PredictorTransformer, ProbaPredictingEstimator\n'), ((10098, 10115), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (10112, 10115), True, 'import numpy as np\n'), ((10248, 10263), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (10254, 10263), True, 'import numpy as np\n'), ((10374, 10398), 'numpy.random.binomial', 'np.random.binomial', (['(3)', 'p'], {}), '(3, p)\n', (10392, 10398), True, 'import numpy as np\n'), ((10812, 10829), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (10826, 10829), True, 'import numpy as np\n'), ((10891, 10920), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, n)'}), '(size=(m, n))\n', (10907, 10920), True, 'import numpy as np\n'), ((10933, 10957), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (10949, 10957), True, 'import numpy as np\n'), ((11006, 11030), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (11022, 11030), True, 'import numpy as np\n'), ((11310, 11364), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (11323, 11364), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((11482, 11520), 'sklearntools.sym.printers.exec_module', 'exec_module', (['"""numpy_test_module"""', 'code'], {}), "('numpy_test_module', code)\n", (11493, 11520), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((11999, 12052), 'nose.run', 'nose.run', ([], {'argv': "[sys.argv[0], module_name, '-s', '-v']"}), "(argv=[sys.argv[0], module_name, '-s', '-v'])\n", (12007, 12052), False, 'import nose\n'), ((2016, 2038), 'numpy.ravel', 'np.ravel', (['y_pred_numpy'], {}), '(y_pred_numpy)\n', (2024, 2038), True, 'import numpy as np\n'), ((2040, 2056), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (2048, 2056), True, 'import numpy as np\n'), ((2262, 2277), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2268, 2277), True, 'import numpy as np\n'), ((2340, 2358), 'sklearntools.earth.Earth', 'Earth', ([], {'max_terms': '(5)'}), '(max_terms=5)\n', (2345, 2358), False, 'from sklearntools.earth import Earth\n'), ((2832, 2886), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (2845, 2886), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((2972, 2994), 'numpy.ravel', 'np.ravel', (['y_pred_numpy'], {}), '(y_pred_numpy)\n', (2980, 2994), True, 'import numpy as np\n'), ((2996, 3012), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (3004, 3012), True, 'import numpy as np\n'), ((3219, 3234), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (3225, 3234), True, 'import numpy as np\n'), ((3297, 3315), 'sklearntools.earth.Earth', 'Earth', ([], {'max_terms': '(5)'}), '(max_terms=5)\n', (3302, 3315), False, 'from sklearntools.earth import Earth\n'), ((3790, 3844), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (3803, 3844), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((3930, 3952), 'numpy.ravel', 'np.ravel', (['y_pred_numpy'], {}), '(y_pred_numpy)\n', (3938, 3952), True, 'import numpy as np\n'), ((3954, 3970), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (3962, 3970), True, 'import numpy as np\n'), ((4167, 4182), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (4173, 4182), True, 'import numpy as np\n'), ((4245, 4276), 'sklearntools.earth.Earth', 'Earth', ([], {'max_terms': '(5)', 'smooth': '(True)'}), '(max_terms=5, smooth=True)\n', (4250, 4276), False, 'from sklearntools.earth import Earth\n'), ((4750, 4804), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (4763, 4804), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((4890, 4912), 'numpy.ravel', 'np.ravel', (['y_pred_numpy'], {}), '(y_pred_numpy)\n', (4898, 4912), True, 'import numpy as np\n'), ((4914, 4930), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (4922, 4930), True, 'import numpy as np\n'), ((5137, 5152), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (5143, 5152), True, 'import numpy as np\n'), ((5668, 5722), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (5681, 5722), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((5808, 5830), 'numpy.ravel', 'np.ravel', (['y_pred_numpy'], {}), '(y_pred_numpy)\n', (5816, 5830), True, 'import numpy as np\n'), ((5832, 5848), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (5840, 5848), True, 'import numpy as np\n'), ((6107, 6134), 'numpy.dot', 'np.dot', (['X_transformed', 'beta'], {}), '(X_transformed, beta)\n', (6113, 6134), True, 'import numpy as np\n'), ((6137, 6161), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'm'}), '(size=m)\n', (6153, 6161), True, 'import numpy as np\n'), ((6689, 6743), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (6702, 6743), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((6829, 6851), 'numpy.ravel', 'np.ravel', (['y_pred_numpy'], {}), '(y_pred_numpy)\n', (6837, 6851), True, 'import numpy as np\n'), ((6853, 6869), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (6861, 6869), True, 'import numpy as np\n'), ((6955, 6995), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.5)', 'size': '(m, n)'}), '(scale=0.5, size=(m, n))\n', (6971, 6995), True, 'import numpy as np\n'), ((7007, 7042), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1.5)', 'size': 'n'}), '(scale=1.5, size=n)\n', (7023, 7042), True, 'import numpy as np\n'), ((7085, 7128), 'numpy.random.binomial', 'np.random.binomial', ([], {'p': '(0.5)', 'n': '(1)', 'size': '(m, n)'}), '(p=0.5, n=1, size=(m, n))\n', (7103, 7128), True, 'import numpy as np\n'), ((8004, 8058), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (8017, 8058), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((8137, 8153), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (8145, 8153), True, 'import numpy as np\n'), ((8247, 8302), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""python"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'python', 'predict', 'test_model')\n", (8260, 8302), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((8413, 8429), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (8421, 8429), True, 'import numpy as np\n'), ((8663, 8722), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""javascript"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'javascript', 'predict', 'test_model')\n", (8676, 8722), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((9049, 9065), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (9057, 9065), True, 'import numpy as np\n'), ((9658, 9665), 'sklearntools.earth.Earth', 'Earth', ([], {}), '()\n', (9663, 9665), False, 'from sklearntools.earth import Earth\n'), ((9668, 9675), 'sklearntools.earth.Earth', 'Earth', ([], {}), '()\n', (9673, 9675), False, 'from sklearntools.earth import Earth\n'), ((9852, 9906), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (9865, 9906), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((9985, 10001), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (9993, 10001), True, 'import numpy as np\n'), ((10148, 10188), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.5)', 'size': '(m, n)'}), '(scale=0.5, size=(m, n))\n', (10164, 10188), True, 'import numpy as np\n'), ((10200, 10235), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1.5)', 'size': 'n'}), '(scale=1.5, size=n)\n', (10216, 10235), True, 'import numpy as np\n'), ((10441, 10485), 'sklearn.ensemble.gradient_boosting.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (10467, 10485), False, 'from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier, LeastSquaresError, LeastAbsoluteError, HuberLossFunction, GradientBoostingRegressor\n'), ((10574, 10628), 'sklearntools.sym.printers.model_to_code', 'model_to_code', (['model', '"""numpy"""', '"""predict"""', '"""test_model"""'], {}), "(model, 'numpy', 'predict', 'test_model')\n", (10587, 10628), False, 'from sklearntools.sym.printers import model_to_code, exec_module\n'), ((10707, 10723), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (10715, 10723), True, 'import numpy as np\n'), ((1443, 1470), 'numpy.dot', 'np.dot', (['X_transformed', 'beta'], {}), '(X_transformed, beta)\n', (1449, 1470), True, 'import numpy as np\n'), ((1473, 1497), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'm'}), '(size=m)\n', (1489, 1497), True, 'import numpy as np\n'), ((2374, 2394), 'sklearn.ensemble.gradient_boosting.LeastSquaresError', 'LeastSquaresError', (['(1)'], {}), '(1)\n', (2391, 2394), False, 'from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier, LeastSquaresError, LeastAbsoluteError, HuberLossFunction, GradientBoostingRegressor\n'), ((3331, 3352), 'sklearn.ensemble.gradient_boosting.LeastAbsoluteError', 'LeastAbsoluteError', (['(1)'], {}), '(1)\n', (3349, 3352), False, 'from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier, LeastSquaresError, LeastAbsoluteError, HuberLossFunction, GradientBoostingRegressor\n'), ((4292, 4312), 'sklearn.ensemble.gradient_boosting.HuberLossFunction', 'HuberLossFunction', (['(1)'], {}), '(1)\n', (4309, 4312), False, 'from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier, LeastSquaresError, LeastAbsoluteError, HuberLossFunction, GradientBoostingRegressor\n'), ((7375, 7414), 'sklearntools.earth.Earth', 'Earth', ([], {'allow_missing': '(True)', 'max_terms': '(10)'}), '(allow_missing=True, max_terms=10)\n', (7380, 7414), False, 'from sklearntools.earth import Earth\n'), ((7472, 7488), 'sklearntools.calibration.LogTransformer', 'LogTransformer', ([], {}), '()\n', (7486, 7488), False, 'from sklearntools.calibration import LogTransformer, ResponseTransformingEstimator, CalibratedEstimatorCV, MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, PredictorTransformer, ProbaPredictingEstimator\n'), ((9383, 9407), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'm'}), '(size=m)\n', (9399, 9407), True, 'import numpy as np\n'), ((9512, 9539), 'sklearntools.calibration.SelectorTransformer', 'SelectorTransformer', (["['x1']"], {}), "(['x1'])\n", (9531, 9539), False, 'from sklearntools.calibration import LogTransformer, ResponseTransformingEstimator, CalibratedEstimatorCV, MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, PredictorTransformer, ProbaPredictingEstimator\n'), ((9543, 9559), 'sklearntools.calibration.LogTransformer', 'LogTransformer', ([], {}), '()\n', (9557, 9559), False, 'from sklearntools.calibration import LogTransformer, ResponseTransformingEstimator, CalibratedEstimatorCV, MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, PredictorTransformer, ProbaPredictingEstimator\n'), ((9564, 9614), 'sklearntools.calibration.IntervalTransformer', 'IntervalTransformer', ([], {'lower': '(0.0)', 'lower_closed': '(False)'}), '(lower=0.0, lower_closed=False)\n', (9583, 9614), False, 'from sklearntools.calibration import LogTransformer, ResponseTransformingEstimator, CalibratedEstimatorCV, MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, PredictorTransformer, ProbaPredictingEstimator\n'), ((9616, 9641), 'sklearntools.calibration.SelectorTransformer', 'SelectorTransformer', (['cols'], {}), '(cols)\n', (9635, 9641), False, 'from sklearntools.calibration import LogTransformer, ResponseTransformingEstimator, CalibratedEstimatorCV, MovingAverageSmoothingEstimator, SelectorTransformer, IntervalTransformer, PredictorTransformer, ProbaPredictingEstimator\n'), ((10352, 10364), 'numpy.exp', 'np.exp', (['(-eta)'], {}), '(-eta)\n', (10358, 10364), True, 'import numpy as np\n'), ((11040, 11067), 'numpy.dot', 'np.dot', (['X_transformed', 'beta'], {}), '(X_transformed, beta)\n', (11046, 11067), True, 'import numpy as np\n'), ((11070, 11094), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'm'}), '(size=m)\n', (11086, 11094), True, 'import numpy as np\n'), ((11232, 11252), 'sklearn.linear_model.logistic.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (11250, 11252), False, 'from sklearn.linear_model.logistic import LogisticRegression\n'), ((11667, 11683), 'numpy.ravel', 'np.ravel', (['y_pred'], {}), '(y_pred)\n', (11675, 11683), True, 'import numpy as np\n'), ((1783, 1794), 'sklearntools.sym.syms.syms', 'syms', (['model'], {}), '(model)\n', (1787, 1794), False, 'from sklearntools.sym.syms import syms\n'), ((2597, 2608), 'sklearntools.sym.syms.syms', 'syms', (['model'], {}), '(model)\n', (2601, 2608), False, 'from sklearntools.sym.syms import syms\n'), ((3555, 3566), 'sklearntools.sym.syms.syms', 'syms', (['model'], {}), '(model)\n', (3559, 3566), False, 'from sklearntools.sym.syms import syms\n'), ((4515, 4526), 'sklearntools.sym.syms.syms', 'syms', (['model'], {}), '(model)\n', (4519, 4526), False, 'from sklearntools.sym.syms import syms\n'), ((5433, 5444), 'sklearntools.sym.syms.syms', 'syms', (['model'], {}), '(model)\n', (5437, 5444), False, 'from sklearntools.sym.syms import syms\n'), ((6454, 6465), 'sklearntools.sym.syms.syms', 'syms', (['model'], {}), '(model)\n', (6458, 6465), False, 'from sklearntools.sym.syms import syms\n'), ((7701, 7708), 'sklearntools.earth.Earth', 'Earth', ([], {}), '()\n', (7706, 7708), False, 'from sklearntools.earth import Earth\n'), ((8809, 8830), 'numpy.isnan', 'np.isnan', (["row['col3']"], {}), "(row['col3'])\n", (8817, 8830), True, 'import numpy as np\n'), ((8929, 8950), 'numpy.isnan', 'np.isnan', (["row['col8']"], {}), "(row['col8'])\n", (8937, 8950), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
from collections import OrderedDict
class Generator(nn.Module):
def __init__(self, latent_dim, img_shape):
super().__init__()
self.img_shape = img_shape
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers.append(nn.Dropout(0.5))
return layers
self.model = nn.Sequential(
*block(latent_dim, 256, normalize=False),
*block(256, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape)))
)
def forward(self, z):
img = self.model(z)
img = img.view(img.size(0), *self.img_shape)
return img
class Discriminator(nn.Module):
def __init__(self, img_shape):
super().__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, img):
img_flat = img.view(img.size(0), -1)
validity = self.model(img_flat)
return validity
| [
"torch.nn.Dropout",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.Sigmoid"
] | [((1125, 1156), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1137, 1156), True, 'import torch.nn as nn\n'), ((1170, 1185), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1180, 1185), True, 'import torch.nn as nn\n'), ((1199, 1218), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (1208, 1218), True, 'import torch.nn as nn\n'), ((1232, 1263), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1244, 1263), True, 'import torch.nn as nn\n'), ((1277, 1292), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1287, 1292), True, 'import torch.nn as nn\n'), ((1306, 1323), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (1315, 1323), True, 'import torch.nn as nn\n'), ((1337, 1349), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1347, 1349), True, 'import torch.nn as nn\n'), ((309, 337), 'torch.nn.Linear', 'nn.Linear', (['in_feat', 'out_feat'], {}), '(in_feat, out_feat)\n', (318, 337), True, 'import torch.nn as nn\n'), ((452, 483), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (464, 483), True, 'import torch.nn as nn\n'), ((511, 526), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (521, 526), True, 'import torch.nn as nn\n'), ((395, 424), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_feat', '(0.8)'], {}), '(out_feat, 0.8)\n', (409, 424), True, 'import torch.nn as nn\n'), ((768, 786), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (775, 786), True, 'import numpy as np\n'), ((1086, 1104), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (1093, 1104), True, 'import numpy as np\n')] |
import numpy as np
import os, sys
import scipy.optimize as opt
import scipy.io
import time
import argparse
sys.path.append('../..')
from srlife import receiver, library
def id_cycles(times, period, days):
"""
Helper to separate out individual cycles by index
Parameters:
times Tube times
period Period of a single cycle
days Number of days in simulation
"""
tm = np.mod(times, period)
inds = list(np.where(tm == 0)[0])
if len(inds) != (days + 1):
raise ValueError("Tube times not compatible with the receiver number of days and cycle period!")
return inds
def cycle_fatigue(strains, temperatures, material, nu = 0.5):
"""
Calculate fatigue damage for a single cycle
Parameters:
strains single cycle strains
temperatures single cycle temperatures
material damage model
Additional parameters:
nu effective Poisson's ratio to use
"""
pt_temps = np.max(temperatures, axis = 0)
pt_eranges = np.zeros(pt_temps.shape)
nt = strains.shape[1]
for i in range(nt):
for j in range(nt):
de = strains[:,j] - strains[:,i]
eq = np.sqrt(2) / (2*(1+nu)) * np.sqrt(
(de[0] - de[1])**2 + (de[1]-de[2])**2 + (de[2]-de[0])**2.0
+ 3.0/2.0 * (de[3]**2.0 + de[4]**2.0 + de[5]**2.0)
)
pt_eranges = np.maximum(pt_eranges, eq)
dmg = np.zeros(pt_eranges.shape)
# pylint: disable=not-an-iterable
for ind in np.ndindex(*dmg.shape):
dmg[ind] = 1.0 / material.cycles_to_fail("nominalFatigue", pt_temps[ind], pt_eranges[ind])
return dmg
def calculate_max_cycles(Dc, Df, material, rep_min = 1, rep_max = 1e6):
"""
Actually calculate the maximum number of repetitions for a single point
Parameters:
Dc creep damage per simulated cycle
Df fatigue damage per simulated cycle
material damaged material properties
"""
if not material.inside_envelope("cfinteraction", Df(rep_min), Dc(rep_min)):
return 0
if material.inside_envelope("cfinteraction", Df(rep_max), Dc(rep_max)):
return np.inf
return opt.brentq(lambda N: material.inside_envelope("cfinteraction", Df(N), Dc(N)) - 0.5,
rep_min, rep_max)
def make_extrapolate(D, extrapolate="lump",order=1):
"""
Return a damage extrapolation function based on extrapolate
giving the damage for the nth cycle
Parameters:
D: raw, per cycle damage
"""
if extrapolate == "lump":
return lambda N, D = D: N * np.sum(D) / len(D)
elif extrapolate == "last":
def Dfn(N, D = D):
N = int(N)
if N < len(D)-1:
return np.sum(D[:N])
else:
return np.sum(D[:-1]) + D[-1] * N
return Dfn
elif extrapolate == "poly":
p = np.polyfit(np.array(list(range(len(D))))+1, D, order)
return lambda N, p=p: np.polyval(p, N)
else:
raise ValueError("Unknown damage extrapolation approach %s!" % extrapolate)
def calculate_damage(fileName,clearSky,days,material,source):
if clearSky:
mydict = scipy.io.loadmat('%s/input_clear_sky.mat'%source)
case = 'clear'
else:
mydict = scipy.io.loadmat('%s/input_tmy_data.mat'%source)
case = 'tmy'
times = mydict['times'].flatten()
index = np.where((times>=days[0]*24) & (times<=days[1]*24))[0]
quadrature_results = scipy.io.loadmat(fileName)
thermal_mat, deformation_mat, damage_mat = library.load_material(material, 'base', 'base', 'base')
### Creep damage ###
# Von Mises Stress
vm = np.sqrt((
(quadrature_results['stress_xx'] - quadrature_results['stress_yy'])**2.0 +
(quadrature_results['stress_yy'] - quadrature_results['stress_zz'])**2.0 +
(quadrature_results['stress_zz'] - quadrature_results['stress_xx'])**2.0 +
6.0 * (quadrature_results['stress_xy']**2.0 +
quadrature_results['stress_yz']**2.0 +
quadrature_results['stress_xz']**2.0))/2.0)
# Time to rupture
ntimes = np.shape(quadrature_results['temperature'])[0]
period = 24
days = days[1] - days[0]
times = times[index]
tR = damage_mat.time_to_rupture("averageRupture", quadrature_results['temperature'], vm)
dts = np.diff(times)
time_dmg = dts[:,np.newaxis,np.newaxis]/tR[1:]
# Break out to cycle damage
inds = id_cycles(times, period, days)
# Cycle damage
Dc = np.array([np.sum(time_dmg[inds[i]:inds[i+1]], axis = 0) for i in range(days)])
### Fatigue cycles ###
# Identify cycle boundaries
inds = id_cycles(times, period, days)
# Run through each cycle and ID max strain range and fatigue damage
strain_names = ['mechanical_strain_xx', 'mechanical_strain_yy', 'mechanical_strain_zz',
'mechanical_strain_yz', 'mechanical_strain_xz', 'mechanical_strain_xy']
strain_factors = [1.0,1.0,1.0,2.0, 2.0, 2.0]
Df = np.array([cycle_fatigue(np.array([ef*quadrature_results[en][
inds[i]:inds[i+1]] for
en,ef in zip(strain_names, strain_factors)]),
quadrature_results['temperature'][inds[i]:inds[i+1]], damage_mat)
for i in range(days)])
### Calculating the number of cycles
# Defining the number of columns as the number of days
# This is used to create an array with nrows = nelements x nquad,
# and ncols = number of days
nc = days
max_cycles = []
for c,f in zip(Dc.reshape(nc,-1).T, Df.reshape(nc,-1).T):
# The damage is extrapolated and the number of cycles is determined
# There are three extrapolation approaches. Here we use the 'lump' one
max_cycles.append(calculate_max_cycles(make_extrapolate(c), make_extrapolate(f), damage_mat))
max_cycles = np.array(max_cycles)
print(min(max_cycles))
quadrature_results['cumDc'] = np.cumsum(Dc.reshape(nc,-1).T, axis=1)
quadrature_results['cumDf'] = np.cumsum(Df.reshape(nc,-1).T, axis=1)
quadrature_results['Dc'] = Dc.reshape(nc,-1).T
quadrature_results['Df'] = Df.reshape(nc,-1).T
scipy.io.savemat('damage_results.mat', quadrature_results)
#scipy.io.savemat(fileName, quadrature_results)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Estimates average damage of a representative tube in a receiver panel')
parser.add_argument('--filename', type=str, default='quadrature_results.mat', help='hdf5 containing the final results')
parser.add_argument('--clearSky', type=bool, default=False, help='Run clear sky DNI (requires to have the solartherm results)')
parser.add_argument('--days', nargs=2, type=int, default=[0,1], help='domain of days to simulate')
parser.add_argument('--material', type=str, default='A230', help='Damage material')
args = parser.parse_args()
# Running function
tinit = time.time()
source = os.getcwd()
calculate_damage(args.filename,args.clearSky,args.days,args.material,source)
# Estimating simulation time
seconds = time.time() - tinit
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print('Simulation time: {:d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
| [
"sys.path.append",
"numpy.ndindex",
"numpy.maximum",
"argparse.ArgumentParser",
"numpy.sum",
"os.getcwd",
"numpy.polyval",
"numpy.zeros",
"numpy.mod",
"time.time",
"numpy.shape",
"srlife.library.load_material",
"numpy.max",
"numpy.diff",
"numpy.array",
"numpy.where",
"numpy.sqrt"
] | [((108, 132), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (123, 132), False, 'import os, sys\n'), ((401, 422), 'numpy.mod', 'np.mod', (['times', 'period'], {}), '(times, period)\n', (407, 422), True, 'import numpy as np\n'), ((941, 969), 'numpy.max', 'np.max', (['temperatures'], {'axis': '(0)'}), '(temperatures, axis=0)\n', (947, 969), True, 'import numpy as np\n'), ((987, 1011), 'numpy.zeros', 'np.zeros', (['pt_temps.shape'], {}), '(pt_temps.shape)\n', (995, 1011), True, 'import numpy as np\n'), ((1336, 1362), 'numpy.zeros', 'np.zeros', (['pt_eranges.shape'], {}), '(pt_eranges.shape)\n', (1344, 1362), True, 'import numpy as np\n'), ((1410, 1432), 'numpy.ndindex', 'np.ndindex', (['*dmg.shape'], {}), '(*dmg.shape)\n', (1420, 1432), True, 'import numpy as np\n'), ((3253, 3308), 'srlife.library.load_material', 'library.load_material', (['material', '"""base"""', '"""base"""', '"""base"""'], {}), "(material, 'base', 'base', 'base')\n", (3274, 3308), False, 'from srlife import receiver, library\n'), ((3359, 3759), 'numpy.sqrt', 'np.sqrt', (["(((quadrature_results['stress_xx'] - quadrature_results['stress_yy']) ** \n 2.0 + (quadrature_results['stress_yy'] - quadrature_results['stress_zz'\n ]) ** 2.0 + (quadrature_results['stress_zz'] - quadrature_results[\n 'stress_xx']) ** 2.0 + 6.0 * (quadrature_results['stress_xy'] ** 2.0 + \n quadrature_results['stress_yz'] ** 2.0 + quadrature_results['stress_xz'\n ] ** 2.0)) / 2.0)"], {}), "(((quadrature_results['stress_xx'] - quadrature_results['stress_yy']\n ) ** 2.0 + (quadrature_results['stress_yy'] - quadrature_results[\n 'stress_zz']) ** 2.0 + (quadrature_results['stress_zz'] -\n quadrature_results['stress_xx']) ** 2.0 + 6.0 * (quadrature_results[\n 'stress_xy'] ** 2.0 + quadrature_results['stress_yz'] ** 2.0 + \n quadrature_results['stress_xz'] ** 2.0)) / 2.0)\n", (3366, 3759), True, 'import numpy as np\n'), ((3975, 3989), 'numpy.diff', 'np.diff', (['times'], {}), '(times)\n', (3982, 3989), True, 'import numpy as np\n'), ((5360, 5380), 'numpy.array', 'np.array', (['max_cycles'], {}), '(max_cycles)\n', (5368, 5380), True, 'import numpy as np\n'), ((5787, 5900), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Estimates average damage of a representative tube in a receiver panel"""'}), "(description=\n 'Estimates average damage of a representative tube in a receiver panel')\n", (5810, 5900), False, 'import argparse\n'), ((6388, 6399), 'time.time', 'time.time', ([], {}), '()\n', (6397, 6399), False, 'import time\n'), ((6410, 6421), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6419, 6421), False, 'import os, sys\n'), ((3103, 3162), 'numpy.where', 'np.where', (['((times >= days[0] * 24) & (times <= days[1] * 24))'], {}), '((times >= days[0] * 24) & (times <= days[1] * 24))\n', (3111, 3162), True, 'import numpy as np\n'), ((3770, 3813), 'numpy.shape', 'np.shape', (["quadrature_results['temperature']"], {}), "(quadrature_results['temperature'])\n", (3778, 3813), True, 'import numpy as np\n'), ((6541, 6552), 'time.time', 'time.time', ([], {}), '()\n', (6550, 6552), False, 'import time\n'), ((436, 453), 'numpy.where', 'np.where', (['(tm == 0)'], {}), '(tm == 0)\n', (444, 453), True, 'import numpy as np\n'), ((1301, 1327), 'numpy.maximum', 'np.maximum', (['pt_eranges', 'eq'], {}), '(pt_eranges, eq)\n', (1311, 1327), True, 'import numpy as np\n'), ((4140, 4185), 'numpy.sum', 'np.sum', (['time_dmg[inds[i]:inds[i + 1]]'], {'axis': '(0)'}), '(time_dmg[inds[i]:inds[i + 1]], axis=0)\n', (4146, 4185), True, 'import numpy as np\n'), ((1149, 1290), 'numpy.sqrt', 'np.sqrt', (['((de[0] - de[1]) ** 2 + (de[1] - de[2]) ** 2 + (de[2] - de[0]) ** 2.0 + 3.0 /\n 2.0 * (de[3] ** 2.0 + de[4] ** 2.0 + de[5] ** 2.0))'], {}), '((de[0] - de[1]) ** 2 + (de[1] - de[2]) ** 2 + (de[2] - de[0]) ** \n 2.0 + 3.0 / 2.0 * (de[3] ** 2.0 + de[4] ** 2.0 + de[5] ** 2.0))\n', (1156, 1290), True, 'import numpy as np\n'), ((1123, 1133), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1130, 1133), True, 'import numpy as np\n'), ((2416, 2425), 'numpy.sum', 'np.sum', (['D'], {}), '(D)\n', (2422, 2425), True, 'import numpy as np\n'), ((2530, 2543), 'numpy.sum', 'np.sum', (['D[:N]'], {}), '(D[:N])\n', (2536, 2543), True, 'import numpy as np\n'), ((2718, 2734), 'numpy.polyval', 'np.polyval', (['p', 'N'], {}), '(p, N)\n', (2728, 2734), True, 'import numpy as np\n'), ((2564, 2578), 'numpy.sum', 'np.sum', (['D[:-1]'], {}), '(D[:-1])\n', (2570, 2578), True, 'import numpy as np\n')] |
"""
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for ActivityType interface class.
"""
import unittest
import nose.tools as nt
import numpy as np
from kwiver.vital.types import ActivityType as AT
class TestActivityType(unittest.TestCase):
def test_constructor(self):
AT()
AT(np.array(["name1","class_name2","class3"]),np.array([1.0,2.3,3.14]))
AT("name", 2.0)
def test_methods(self):
t = AT(np.array(["name1","class_name2","class3"]),np.array([1.0,2.3,3.14]))
# str/repr/itr
self.assertIsInstance(str(t),str)
self.assertIsInstance(t.__repr__(), str)
itr = iter(t)
self.assertIsInstance(next(itr),tuple)
# Has Class Name
self.assertTrue(t.has_class_name("name1"))
self.assertFalse(t.has_class_name("foo"))
# Score
self.assertEqual(t.score("class_name2"),2.3)
# Get most likely
self.assertEqual(t.get_most_likely_class(),"class3")
self.assertEqual(3.14,t.get_most_likely_score())
# Set Score
t.set_score("foo1",3.8)
self.assertEqual(3.8,t.score("foo1"))
t.set_score("foo2",3.9)
self.assertTrue(t.has_class_name("foo2"))
self.assertEqual(t.score("foo2"),3.9)
# Delete Score
t.delete_score("foo1")
self.assertFalse(t.has_class_name("foo1"))
# Class Names
np.testing.assert_array_equal(t.class_names(), np.array(["foo2","class3","class_name2","name1"]))
np.testing.assert_array_equal(t.class_names(3.0), np.array(["foo2","class3"]))
np.testing.assert_array_equal(t.all_class_names(),np.array(["class3","class_name2","foo1","foo2","name","name1"]))
print()
print("--------------------------")
print(t.all_class_names())
print("--------------------------")
for item in t.all_class_names():
print()
print("--------------")
print(item)
print("--------------")
| [
"kwiver.vital.types.ActivityType",
"numpy.array"
] | [((1815, 1819), 'kwiver.vital.types.ActivityType', 'AT', ([], {}), '()\n', (1817, 1819), True, 'from kwiver.vital.types import ActivityType as AT\n'), ((1908, 1923), 'kwiver.vital.types.ActivityType', 'AT', (['"""name"""', '(2.0)'], {}), "('name', 2.0)\n", (1910, 1923), True, 'from kwiver.vital.types import ActivityType as AT\n'), ((1831, 1875), 'numpy.array', 'np.array', (["['name1', 'class_name2', 'class3']"], {}), "(['name1', 'class_name2', 'class3'])\n", (1839, 1875), True, 'import numpy as np\n'), ((1874, 1900), 'numpy.array', 'np.array', (['[1.0, 2.3, 3.14]'], {}), '([1.0, 2.3, 3.14])\n', (1882, 1900), True, 'import numpy as np\n'), ((1968, 2012), 'numpy.array', 'np.array', (["['name1', 'class_name2', 'class3']"], {}), "(['name1', 'class_name2', 'class3'])\n", (1976, 2012), True, 'import numpy as np\n'), ((2011, 2037), 'numpy.array', 'np.array', (['[1.0, 2.3, 3.14]'], {}), '([1.0, 2.3, 3.14])\n', (2019, 2037), True, 'import numpy as np\n'), ((2974, 3026), 'numpy.array', 'np.array', (["['foo2', 'class3', 'class_name2', 'name1']"], {}), "(['foo2', 'class3', 'class_name2', 'name1'])\n", (2982, 3026), True, 'import numpy as np\n'), ((3083, 3111), 'numpy.array', 'np.array', (["['foo2', 'class3']"], {}), "(['foo2', 'class3'])\n", (3091, 3111), True, 'import numpy as np\n'), ((3171, 3239), 'numpy.array', 'np.array', (["['class3', 'class_name2', 'foo1', 'foo2', 'name', 'name1']"], {}), "(['class3', 'class_name2', 'foo1', 'foo2', 'name', 'name1'])\n", (3179, 3239), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.