code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# **************************************************************************** #
# #
# ::: :::::::: #
# raster.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: tanwenxuan <<EMAIL>> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/04/20 17:05:30 by winshare #+# #+# #
# Updated: 2020/06/02 12:45:00 by tanwenxuan ### ########.fr #
# #
# **************************************************************************** #
# Copyright 2020 winshare
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
import gdal
import glob
import matplotlib.pyplot as plt
import numpy as np
class Raster():
def __init__(self, filename=None, channel=[
0, 1, 2], display=False, debug=False):
"""
filename: could be filedir or path of single file
"""
print("# ---------------------------------------------------------------------------- #")
print("# TIFF process Toolkit #")
print("# ---------------------------------------------------------------------------- #")
self.display = display
self.debug = debug
self.channel = channel
if not filename is None:
if os.path.isfile(filename):
print("# -----TIFF Class Init with :", filename)
self.filename = filename
self.readtif(self.filename)
else:
print("# -----Class TIF init without filename")
# ---------------------------------------------------------------------------- #
# Init #
# ---------------------------------------------------------------------------- #
def readtif(self, filename):
self.dataset = gdal.Open(filename) # 打开文件
assert self.dataset is not None, "Can't Read Dataset ,Invalid tif file : " + filename
self.width = self.dataset.RasterXSize # 栅格矩阵的列数
self.height = self.dataset.RasterYSize # 栅格矩阵的行数
self.geotransform = self.dataset.GetGeoTransform() # 仿射矩阵
self.projection = self.dataset.GetProjection() # 地图投影信息
self.image = self.dataset.ReadAsArray(0, 0, self.width, self.height)
# print('-----Original Data Shape : ',self.image.shape)
if 'uint8' in self.image.dtype.name:
self.datatype = gdal.GDT_Byte
# print('image type : uint8')
elif 'int8' in self.image.dtype.name:
# print('image type : int8')
self.datatype = gdal.GDT_Byte
elif 'int16' in self.image.dtype.name:
# print('image type : int16')
self.datatype = gdal.GDT_UInt16
else:
# print('image type : float32')
self.datatype = gdal.GDT_Float32
if len(self.image.shape) == 2:
self.channel_count = 1
if len(self.image.shape) == 3:
self.channel_count, _, _ = self.image.shape
if self.channel_count > 20:
_, _, self.channel_count = self.image.shape
else:
self.image = self.image.transpose(1, 2, 0)
self.image = self.image[:, :, self.channel[:]]
if self.display:
self.displayimagery()
def displayimagery(self):
self.percentage = self.fast_percentager_strentching(self.image)
plt.imshow(self.percentage), plt.show()
# ---------------------------------------------------------------------------- #
# Read #
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# fast_percentager_strentching #
# ---------------------------------------------------------------------------- #
def fast_percentager_strentching(
self, image=None, percentage=2, sample=10000):
"""
Image ndarray:(W,H,C)
Percentage N(0-100)%
"""
assert not percentage > 100 or percentage < 0, "Invalde Percentage Value"
print(
"# -------------------------- percentager_strentching -------------------------")
print(
"# ------------------- process with percentage : ",
percentage,
"% ------------------")
percentage = percentage / 100
if isinstance(image, None):
image = self.image
W, H = image.shape[0], image.shape[1]
w = np.random.randint(0, W, sample)
h = np.random.randint(0, H, sample)
if len(image.shape) == 3:
points = image[w, h, :]
point = [np.mean(channels) for channels in points]
else:
points = image[w, h]
point = points
pointset = sorted(point)
min = int(sample * percentage)
max = int(sample * (1 - percentage))
min = pointset[min]
max = pointset[max]
image[image > max] = max
image[image < min] = min
image = (image - min) / (max - min)
print("# ----- Max : ", max, " Min : ", min, "-----")
self.image = image
return image
def set(self, Data):
"""
write a new raster file with bool type data
"""
if len(Data.shape) == 2:
assert not isinstance(
Data[0, 0], bool), 'Polygonize Data ( SetRasterData(Data) ) Must be bool Ndarray,But now in ' + str(type(Data[0, 0]))
self.RasterSet = True
self.imageoutput = Data
def writeimagery(self, name=None, format=["png"]):
if name is None:
name = self.filename + "_imagery.png"
cv2.imwrite(name, self.image)
def writetif(self, outputname,):
"""
write file in tiff format
"""
pass
def resize_raster(self, resize_ratio=0.5):
"""
cv2 resize image data
6 parameter 1,5 is resolution ratio its need /resize_ratio
"""
size = (int(self.width * resize_ratio),
int(self.height * resize_ratio))
self.resizedimage = cv2.resize(
self.image_nparray, size, interpolation=cv2.INTER_AREA)
self.ResizeGeo = list(self.geotrans)
print('input Geo parameter, :', self.ResizeGeo)
self.ResizeGeo[1] = float(self.ResizeGeo[1] / resize_ratio)
self.ResizeGeo[5] = float(self.ResizeGeo[5] / resize_ratio)
print('resized Geo parameter ,:', self.ResizeGeo)
self.geotrans = tuple(self.ResizeGeo)
def writethreshold2shp(self):
"""
Set the Boolmap(ndarray) & do polygonize in boolmap to save
:return:
"""
assert self.dataset is not None, 'Null dataset'
assert self.RasterSet, 'Please Set Bool map in ndarray with SetRasterData() \n, Current output polygon src band is ' + \
str(self.imageoutput)
shp_name = self.out_middle_tif_name + '_polygonized.shp'
srcband = self.dataset.GetRasterBand(1)
maskband = None
format = 'ESRI Shapefile'
drv = ogr.GetDriverByName(format)
dst_ds = drv.CreateDataSource(shp_name)
srs = osr.SpatialReference()
srs.ImportFromWkt(self.outdataset.GetProjectionRef())
dst_layer = dst_ds.CreateLayer(
shp_name, geom_type=ogr.wkbPolygon, srs=srs)
if (dst_layer is None):
return 0, 0
dst_field = dst_layer.GetLayerDefn().GetFieldIndex(shp_name)
prog_func = gdal.TermProgress
options = []
result = gdal.Polygonize(srcband, maskband, dst_layer, dst_field, options,
callback=prog_func)
dst_ds = None
print('Shapefile has write in ', shp_name)
return shp_name
def clear(self):
print('-----TIF Object has been init with null')
self.geotransform = None
self.image = None
self.dataset = None
self.projection = None
def createdataset(self, out_put_tif_name):
driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大内存空间
self.outputdataset = driver.Create(
out_put_tif_name,
self.width,
self.height,
self.channel_count,
self.datatype)
self.outputdataset.SetGeoTransform(self.geotransform) # 写入仿射变换参数
self.outputdataset.SetProjection(self.projection) # 写入投影
print('Create Dataset With ', self.dataset)
print('Create Shape is ', (self.height, self.width))
# --------------------------------- Transform -------------------------------- #
# ---------------------------------------------------------------------------- #
# Cord Transform #
# ---------------------------------------------------------------------------- #
def getSRSPair(self):
'''
获得给定数据的投影参考系和地理参考系
:param dataset: GDAL地理数据
:return: 投影参考系和地理参考系
'''
prosrs = osr.SpatialReference()
prosrs.ImportFromWkt(self.dataset.GetProjection())
geosrs = prosrs.CloneGeogCS()
return prosrs, geosrs
def geo2lonlat(self, x, y):
'''
将投影坐标转为经纬度坐标(具体的投影坐标系由给定数据确定)
:param dataset: GDAL地理数据
:param x: 投影坐标x
:param y: 投影坐标y
:return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)
'''
prosrs, geosrs = self.getSRSPair()
ct = osr.CoordinateTransformation(prosrs, geosrs)
coords = ct.TransformPoint(x, y)
return coords[:2]
def lonlat2geo(self, lon, lat):
'''
将经纬度坐标转为投影坐标(具体的投影坐标系由给定数据确定)
:param dataset: GDAL地理数据
:param lon: 地理坐标lon经度
:param lat: 地理坐标lat纬度
:return: 经纬度坐标(lon, lat)对应的投影坐标
'''
prosrs, geosrs = self.getSRSPair()
ct = osr.CoordinateTransformation(geosrs, prosrs)
coords = ct.TransformPoint(lon, lat)
return coords[:2]
def imagexy2geo(self, row, col):
'''
根据GDAL的六参数模型将影像图上坐标(行列号)转为投影坐标或地理坐标(根据具体数据的坐标系统转换)
:param dataset: GDAL地理数据
:param row: 像素的行号
:param col: 像素的列号
:return: 行列号(row, col)对应的投影坐标或地理坐标(x, y)
'''
trans = self.dataset.GetGeoTransform()
px = trans[0] + col * trans[1] + row * trans[2]
py = trans[3] + col * trans[4] + row * trans[5]
return px, py
def geo2imagexy(self, x, y):
'''
根据GDAL的六 参数模型将给定的投影或地理坐标转为影像图上坐标(行列号)
:param dataset: GDAL地理数据
:param x: 投影或地理坐标x
:param y: 投影或地理坐标y
:return: 影坐标或地理坐标(x, y)对应的影像图上行列号(row, col)
'''
trans = self.dataset.GetGeoTransform()
a = np.array([[trans[1], trans[2]], [trans[4], trans[5]]])
b = np.array([x - trans[0], y - trans[3]])
return np.linalg.solve(a, b) # numpy linalg.solve equation
def lonlat2imagexy(self, x, y):
x1, y1 = self.lonlat2geo(x, y)
x2, y2 = self.geo2imagexy(x1, y1)
return x2, y2
def imagexy2lonlat(self, x, y):
x1, y1 = self.imagexy2geo(x, y)
x2, y2 = self.geo2lonlat(x1, y1)
return x2, y2
def getfiles_from_dir(self, dir):
"""
return the filename list of tiff file from dir
"""
assert not os.path.isdir(dir), "Invalid dir format" + str(dir)
print("# -----Read Dir :", dir)
self.files = glob.glob(os.path.join(dir, "./*.tif"))
def main():
"""
This part will show the standard function guide.
"""
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.linalg.solve",
"gdal.Open",
"gdal.GetDriverByName",
"osgeo.osr.SpatialReference",
"os.path.join",
"os.path.isfile",
"numpy.array",
"gdal.Polygonize",
"numpy.random.randint",
"os.path.isdir",
"osgeo.ogr.GetDriverByName",
"osgeo.osr.Coordinate... | [((2810, 2829), 'gdal.Open', 'gdal.Open', (['filename'], {}), '(filename)\n', (2819, 2829), False, 'import gdal\n'), ((5620, 5651), 'numpy.random.randint', 'np.random.randint', (['(0)', 'W', 'sample'], {}), '(0, W, sample)\n', (5637, 5651), True, 'import numpy as np\n'), ((5664, 5695), 'numpy.random.randint', 'np.random.randint', (['(0)', 'H', 'sample'], {}), '(0, H, sample)\n', (5681, 5695), True, 'import numpy as np\n'), ((8208, 8235), 'osgeo.ogr.GetDriverByName', 'ogr.GetDriverByName', (['format'], {}), '(format)\n', (8227, 8235), False, 'from osgeo import ogr\n'), ((8298, 8320), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (8318, 8320), False, 'from osgeo import osr\n'), ((8682, 8772), 'gdal.Polygonize', 'gdal.Polygonize', (['srcband', 'maskband', 'dst_layer', 'dst_field', 'options'], {'callback': 'prog_func'}), '(srcband, maskband, dst_layer, dst_field, options, callback=\n prog_func)\n', (8697, 8772), False, 'import gdal\n'), ((9160, 9189), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (9180, 9189), False, 'import gdal\n'), ((10148, 10170), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (10168, 10170), False, 'from osgeo import osr\n'), ((10576, 10620), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['prosrs', 'geosrs'], {}), '(prosrs, geosrs)\n', (10604, 10620), False, 'from osgeo import osr\n'), ((10976, 11020), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['geosrs', 'prosrs'], {}), '(geosrs, prosrs)\n', (11004, 11020), False, 'from osgeo import osr\n'), ((11839, 11893), 'numpy.array', 'np.array', (['[[trans[1], trans[2]], [trans[4], trans[5]]]'], {}), '([[trans[1], trans[2]], [trans[4], trans[5]]])\n', (11847, 11893), True, 'import numpy as np\n'), ((11906, 11944), 'numpy.array', 'np.array', (['[x - trans[0], y - trans[3]]'], {}), '([x - trans[0], y - trans[3]])\n', (11914, 11944), True, 'import numpy as np\n'), ((11960, 11981), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (11975, 11981), True, 'import numpy as np\n'), ((2247, 2271), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2261, 2271), False, 'import os\n'), ((4393, 4420), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.percentage'], {}), '(self.percentage)\n', (4403, 4420), True, 'import matplotlib.pyplot as plt\n'), ((4422, 4432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4430, 4432), True, 'import matplotlib.pyplot as plt\n'), ((12430, 12448), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (12443, 12448), False, 'import os\n'), ((12553, 12581), 'os.path.join', 'os.path.join', (['dir', '"""./*.tif"""'], {}), "(dir, './*.tif')\n", (12565, 12581), False, 'import os\n'), ((5787, 5804), 'numpy.mean', 'np.mean', (['channels'], {}), '(channels)\n', (5794, 5804), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import foolbox
from cnns.nnlib.robustness.utils import to_fft
from cnns.nnlib.robustness.utils import to_fft_magnitude
import torch
from cnns.nnlib.pytorch_layers.fft_band_2D import FFTBandFunction2D
from cnns.nnlib.pytorch_layers.fft_band_2D_complex_mask import \
FFTBandFunctionComplexMask2D
from cnns.nnlib.utils.complex_mask import get_hyper_mask
from cnns.nnlib.utils.complex_mask import get_disk_mask
from cnns.nnlib.utils.object import Object
from cnns.nnlib.utils.arguments import Arguments
from cnns.nnlib.utils.shift_DC_component import shift_DC
from mpl_toolkits.axes_grid1 import make_axes_locatable
from PIL import Image
from torch.nn.functional import pad as torch_pad
import cv2
# figuresizex = 10.0
# figuresizey = 10.0
# generate images
# dataset = "mnist"
dataset = "imagenet"
format = "png"
if dataset == "imagenet":
limx, limy = 224, 224
elif dataset == "mnist":
limx, limy = 28, 28
half = limx // 2
extent1 = [0, limx, 0, limy]
extent2 = [-half + 1, half, -half + 1, half]
images, labels = foolbox.utils.samples(dataset=dataset, index=0,
batchsize=20,
shape=(limx, limy),
data_format='channels_first')
print("max value in images pixels: ", np.max(images))
images = images / 255
image = images[0]
label = labels[0]
print("label: ", label)
is_log = True
# cv2.imwrite("image-cv2.png", (image * 255).astype(np.uint8))
def save_image(filename, image):
result_image = Image.fromarray(
(np.transpose(image, (1, 2, 0)) * 255).astype(np.uint8), mode="RGB")
result_image.save(filename + "." + format)
def save_image_CHW(filename, image):
result_image = Image.fromarray(
(image * 255).astype(np.uint8), mode="RGB")
result_image.save(filename + "." + format)
save_image("image", image)
save_image_CHW("image_CHW", image)
args = Arguments()
args.compress_fft_layer = 80
args.compress_rate = args.compress_fft_layer
args.next_power2 = False
args.is_DC_shift = False
result = Object()
image = torch.from_numpy(image).unsqueeze(0)
N, C, H, W = image.size()
pad = 3
pad_fft = 26
side = "one"
if side == "two":
Hfft = H + pad * 2 + pad_fft * 2
# (padLeft, padRight, padTop, padBottom)
image = torch_pad(image,
(pad + pad_fft, pad + pad_fft, pad + pad_fft, pad + pad_fft),
'constant', 0)
elif side == "one":
Hfft = H + pad * 2 + pad_fft
# (padLeft, padRight, padTop, padBottom)
image = torch_pad(image,
(pad, pad + pad_fft,
pad, pad + pad_fft),
'constant', 0)
else:
raise Exception(f"Unknown side type: {side}")
print("Hfft: ", Hfft)
image_proxy = FFTBandFunction2D.forward(
ctx=result,
input=image,
args=args,
onesided=False).numpy().squeeze(0)
if side == "two":
image_proxy = image_proxy[..., pad + pad_fft:limx + pad + pad_fft,
pad + pad_fft:limy + pad + pad_fft]
xfft_proxy = result.xfft.squeeze(0)
save_image(
"image_proxy" + str(args.compress_rate) + "_pad_fft_" + str(
pad_fft) + "_pad_" + str(pad) + "_side_" + str(side), image_proxy)
| [
"cnns.nnlib.utils.arguments.Arguments",
"cnns.nnlib.utils.object.Object",
"torch.from_numpy",
"numpy.max",
"cnns.nnlib.pytorch_layers.fft_band_2D.FFTBandFunction2D.forward",
"torch.nn.functional.pad",
"numpy.transpose",
"foolbox.utils.samples"
] | [((1131, 1246), 'foolbox.utils.samples', 'foolbox.utils.samples', ([], {'dataset': 'dataset', 'index': '(0)', 'batchsize': '(20)', 'shape': '(limx, limy)', 'data_format': '"""channels_first"""'}), "(dataset=dataset, index=0, batchsize=20, shape=(limx,\n limy), data_format='channels_first')\n", (1152, 1246), False, 'import foolbox\n'), ((2014, 2025), 'cnns.nnlib.utils.arguments.Arguments', 'Arguments', ([], {}), '()\n', (2023, 2025), False, 'from cnns.nnlib.utils.arguments import Arguments\n'), ((2159, 2167), 'cnns.nnlib.utils.object.Object', 'Object', ([], {}), '()\n', (2165, 2167), False, 'from cnns.nnlib.utils.object import Object\n'), ((1398, 1412), 'numpy.max', 'np.max', (['images'], {}), '(images)\n', (1404, 1412), True, 'import numpy as np\n'), ((2386, 2483), 'torch.nn.functional.pad', 'torch_pad', (['image', '(pad + pad_fft, pad + pad_fft, pad + pad_fft, pad + pad_fft)', '"""constant"""', '(0)'], {}), "(image, (pad + pad_fft, pad + pad_fft, pad + pad_fft, pad +\n pad_fft), 'constant', 0)\n", (2395, 2483), True, 'from torch.nn.functional import pad as torch_pad\n'), ((2176, 2199), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2192, 2199), False, 'import torch\n'), ((2626, 2699), 'torch.nn.functional.pad', 'torch_pad', (['image', '(pad, pad + pad_fft, pad, pad + pad_fft)', '"""constant"""', '(0)'], {}), "(image, (pad, pad + pad_fft, pad, pad + pad_fft), 'constant', 0)\n", (2635, 2699), True, 'from torch.nn.functional import pad as torch_pad\n'), ((2861, 2938), 'cnns.nnlib.pytorch_layers.fft_band_2D.FFTBandFunction2D.forward', 'FFTBandFunction2D.forward', ([], {'ctx': 'result', 'input': 'image', 'args': 'args', 'onesided': '(False)'}), '(ctx=result, input=image, args=args, onesided=False)\n', (2886, 2938), False, 'from cnns.nnlib.pytorch_layers.fft_band_2D import FFTBandFunction2D\n'), ((1653, 1683), 'numpy.transpose', 'np.transpose', (['image', '(1, 2, 0)'], {}), '(image, (1, 2, 0))\n', (1665, 1683), True, 'import numpy as np\n')] |
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import time
from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train,filter_boxes
from core.dataset import Dataset
from core.config import cfg, CFLAGS
from core import utils
from core.utils import freeze_all, unfreeze_all
from core.deblur_losses import get_loss, DoubleGAN, SingleGAN
FLAGS = CFLAGS()
def main():
INPUT_SIZE = FLAGS.size
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
CLASSES = utils.read_class_names(cfg.YOLO.CLASSES)
IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH
predicted_dir_path = './AP/predicted'
loss_dir_path = './AP/loss'
text_result_path = './AP/detect'
trainset = Dataset(FLAGS, is_training=True)
adv_lambda = 0.001
steps_per_epoch = len(trainset)
first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch
input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
feature_maps = YOLO(FLAGS.scale_v5, input_layer, NUM_CLASS)
bbox_tensors = []
for i, fm in enumerate(feature_maps):
if i == 0:
bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
bbox_tensors.append(fm)
bbox_tensors.append(bbox_tensor)
elif i == 1:
bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
bbox_tensors.append(fm)
bbox_tensors.append(bbox_tensor)
elif i==2:
bbox_tensor = decode_train(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
bbox_tensors.append(fm)
bbox_tensors.append(bbox_tensor)
else:
result_G = fm
bbox_tensors.append(result_G)
D2_model = tf.keras.Model(input_layer, bbox_tensors)
full_model = NLayerDiscriminator(ndf=64, n_layers=5)
full_model.build([None, cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
optimizer_D2 = tf.keras.optimizers.Adam()
optimizer_Dis = tf.keras.optimizers.SGD()
criterionG, criterionD = get_loss()
if cfg.TRAIN.DoubleGAN:
patch_model = NLayerDiscriminator(ndf=64, n_layers=3)
patch_model.build([None, cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
adv_trainer = DoubleGAN(patch_model, full_model, criterionD)
else:
adv_trainer = SingleGAN(full_model, criterionD)
if cfg.TRAIN.GRADNORM:
optimizer_P = tf.keras.optimizers.SGD()
D2_model.summary()
T_freeze_layers = utils.load_True_freeze_layer(FLAGS.scale_v5)
# metrics draw now
total_loss_metric = tf.metrics.Mean()
loss1_metric = tf.metrics.Mean()
loss2_metric = tf.metrics.Mean()
total_loss_result= []
loss1_result= []
loss2_result= []
Weightloss1 = tf.Variable(1.0)
Weightloss2 = tf.Variable(1.0)
params = [Weightloss1, Weightloss2]
alph = 0.16
## @tf.function
def train_step(image_data, target):
start_time = time.time()
# with tf.GradientTape() as tape1,tf.GradientTape() as tape2:
##Experiments have found that this performance is better than "persistent=True".
with tf.GradientTape() as tape1,tf.GradientTape() as tape2,tf.GradientTape() as tape3,tf.GradientTape() as tape4,tf.GradientTape() as tape5:
pred_result = D2_model(image_data[0], training=True)
G_im = pred_result[-1]
loss_D = loss_content = loss_adv = loss_G = giou_loss = conf_loss = prob_loss = 0
#update Discriminator
loss_D = 1000 * adv_lambda * adv_trainer.loss_d(G_im, image_data[1])
gradients_Dis = tape1.gradient(loss_D, adv_trainer.get_params())
optimizer_Dis.apply_gradients(zip(gradients_Dis, adv_trainer.get_params()))
#update D2Net
loss_content = criterionG(G_im, image_data[1])
loss_adv = adv_trainer.loss_g(G_im, image_data[1])
loss_G = 1000*(loss_content + adv_lambda * loss_adv)
for i in range(ANCHORS.shape[0]):
conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
loss_items = compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
giou_loss += loss_items[0]
conf_loss += loss_items[1]
prob_loss += loss_items[2]
yolo_loss = giou_loss + conf_loss + prob_loss
l1 = params[0]*yolo_loss
l2 = params[1]*loss_G
total_loss = (l1 + l2)/2
gradients_D2 = tape2.gradient(total_loss, D2_model.trainable_variables)
optimizer_D2.apply_gradients(zip(gradients_D2, D2_model.trainable_variables))
###Gradnorm###
L0 = 183
LP = D2_model.trainable_variables[162] #D_conv2d_54
G1R = tape3.gradient(l1, LP)
G1 = tf.norm(G1R, ord=2)
G2R = tape4.gradient(l2, LP)
G2 = tf.norm(G2R, ord=2)
G_avg = (G1+G2)/2
# Calculating relative losses
lhat1 = (l1)/L0
lhat2 = (l2)/L0
lhat_avg = (lhat1 + lhat2)/2
inv_rate1 = lhat1/lhat_avg
inv_rate2 = lhat2/lhat_avg
C1 = G_avg*(inv_rate1)**alph
C2 = G_avg*(inv_rate2)**alph
C1 = tf.stop_gradient(tf.identity(C1))
C2 = tf.stop_gradient(tf.identity(C2))
# Gradnorm loss
loss_gradnorm = tf.math.reduce_sum(tf.math.abs(G1-C1)) + tf.math.reduce_sum(tf.math.abs(G2-C2))
grad_grad = tape5.gradient(loss_gradnorm, params)
optimizer_P.apply_gradients(grads_and_vars=zip(grad_grad, params))
total_loss_metric.update_state(values=total_loss)
loss1_metric.update_state(values=yolo_loss)
loss2_metric.update_state(values=loss_G)
time_per_step = time.time() - start_time
print("Step: {}/{},lr: {:.6f}, {:.2f}s/step, total_loss: {:.5f}, "
"yolo loss: {:.5f}, G_loss: {:.5f}, loss_adv: {:.5f}, D_loss: {:.5f}".format(
global_steps.numpy(),
total_steps,
optimizer_D2.lr.numpy(),
time_per_step,
total_loss,
yolo_loss,
loss_G,
adv_lambda*loss_adv,
loss_D
))
# update learning rate
global_steps.assign_add(1)
if global_steps < warmup_steps:
lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
else:
lr = cfg.TRAIN.LR_END + 0.5 * (cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * (
(1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))
)
optimizer_D2.lr.assign(lr.numpy())
optimizer_Dis.lr.assign(10*lr.numpy())
loss_list_step = [optimizer_D2.lr.numpy(),total_loss,yolo_loss,
loss_G,loss_D,giou_loss,conf_loss, prob_loss,loss_content,adv_lambda * loss_adv]
return np.array(loss_list_step)
## @tf.function
def test_epoch(D2_model,dectect_epoch_path):
with open(cfg.TEST.ANNOT_PATH, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
predict_result_path = os.path.join(predicted_epoch_path, str(image_name) + '.txt')
original_image = cv2.imread(image_path)
image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
# Predict Process
## image_letter, ratio, (dw, dh) = utils.letterbox(image)
image_letter = utils.test_image_preprocess(np.copy(image), [INPUT_SIZE, INPUT_SIZE])
image_data = image_letter[np.newaxis, ...].astype(np.float32)
batch_data = tf.constant(image_data)
bbox_tensors = []
prob_tensors = []
pred_result = D2_model(batch_data,training=False)
G_im = pred_result[-1][0]
for i in range(ANCHORS.shape[0]):
fm = pred_result[i * 2]
if i == 0:
output_tensors = decode(fm, FLAGS.size // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
elif i == 1:
output_tensors = decode(fm, FLAGS.size // 16, NUM_CLASS, STRIDES, ANCHORS, 1, XYSCALE)
elif i==2:
output_tensors = decode(fm, FLAGS.size // 32, NUM_CLASS, STRIDES, ANCHORS, 2, XYSCALE)
bbox_tensors.append(output_tensors[0])
prob_tensors.append(output_tensors[1])
pred_bbox = tf.concat(bbox_tensors, axis=1)
pred_prob = tf.concat(prob_tensors, axis=1)
boxes, pred_conf = filter_boxes(pred_bbox, pred_prob, score_threshold=FLAGS.score_thres, input_shape=tf.constant([FLAGS.size, FLAGS.size]))
pred_bbox = tf.concat([boxes, pred_conf], axis=-1)
boxes = pred_bbox[:, :, 0:4]
pred_conf = pred_bbox[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=1,
max_total_size=1,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
boxes, scores, classes, valid_detections = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
if num % 1 ==0:
G_im = pred_result[-1][0]
G_im = G_im * 255
G_im = np.array(G_im).astype(np.int32)
image_result = utils.draw_bbox(np.copy(G_im), [boxes, scores, classes, valid_detections])
image_result = image_result[:,:,::-1]
filepath = dectect_epoch_path+"/"+ str(image_name)
cv2.imwrite(filepath, image_result, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
################################################################
################################################################
################################################################
if os.path.exists(loss_dir_path): shutil.rmtree(loss_dir_path)
os.mkdir(loss_dir_path)
for epoch in range(first_stage_epochs + second_stage_epochs):
if epoch >= first_stage_epochs:
for name in T_freeze_layers:
try:
freeze = D2_model.get_layer(name)
freeze_all(freeze)
print("Successfully freeze {}...".format(name))
except:
print("{} not exist...".format(name))
loss_epoch = np.zeros((steps_per_epoch,10),dtype=np.float32)
for index, (image_data, target) in enumerate(trainset):
loss_step = train_step(image_data, target)
loss_epoch[index] = loss_step
mask = loss_epoch[:,0] >0
loss_mean = np.mean(tf.boolean_mask(loss_epoch,mask),0)
loss_list_step = {"D2:lr":loss_mean[0],"total_loss":loss_mean[1],"loss/yolo_loss":loss_mean[2],
"G_loss":loss_mean[3],"D_loss":loss_mean[4],"loss/giou_loss":loss_mean[5],"loss/conf_loss":loss_mean[6],
"loss/prob_loss":loss_mean[7],"loss_content":loss_mean[8],"adv_lambda * loss_adv":loss_mean[9]}
loss_epoch_path = os.path.join(loss_dir_path, "epoch-{}".format(epoch) + '.txt')
with open(loss_epoch_path, 'w') as f:
for vm in loss_list_step.values():
loss_mess = ' '.join([str(vm)]) + '\n'
f.write(loss_mess)
print("No {} epoch params are {} and {}:".format(epoch,params[0].numpy(),params[1].numpy()))
total_loss_result.append(total_loss_metric.result())
loss1_result.append(loss1_metric.result())
loss2_result.append(loss2_metric.result())
total_loss_metric.reset_states()
loss1_metric.reset_states()
loss2_metric.reset_states()
if epoch % FLAGS.save_frequency == 0:
D2_model.save_weights(filepath=FLAGS.save_model_dir+"epoch-{}.h5".format(epoch), save_format="h5")
full_model.save_weights(filepath=FLAGS.save_model_dir+"Dis"+"epoch-{}".format(epoch), save_format="h5")
print("No {} epoch saved successfully...".format(epoch))
#Evaluation model
dectect_epoch_path = text_result_path + "-epoch-{}".format(epoch)
if os.path.exists(dectect_epoch_path): shutil.rmtree(dectect_epoch_path)
os.mkdir(dectect_epoch_path)
test_epoch(D2_model,dectect_epoch_path)
print("Evaluation completed...")
#####draw#####
total_loss = np.array(total_loss_result)
Yolo_loss = np.array(loss1_result)
G_loss = np.array(loss2_result)
epochs_range = np.arange(0,epoch+1,1)
plt.figure(dpi=1000,num=1,figsize=(6, 3))
plt.plot(epochs_range, total_loss, marker='*',linestyle='-',linewidth=1, markersize=2,label='total_loss')
plt.plot(epochs_range, Yolo_loss,marker='o', linestyle='-',linewidth=1, markersize=2,label='Yolo_loss')
plt.plot(epochs_range, G_loss, label='Deblur_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.savefig('Tranin_Loss_result.png',bbox_inches="tight",dpi=1000)
plt.show()
if __name__ == '__main__':
main()
| [
"core.utils.read_class_names",
"tensorflow.metrics.Mean",
"tensorflow.shape",
"tensorflow.boolean_mask",
"matplotlib.pyplot.ylabel",
"tensorflow.norm",
"tensorflow.GradientTape",
"numpy.array",
"core.deblur_losses.get_loss",
"core.config.CFLAGS",
"core.yolov5.NLayerDiscriminator",
"core.utils.... | [((431, 439), 'core.config.CFLAGS', 'CFLAGS', ([], {}), '()\n', (437, 439), False, 'from core.config import cfg, CFLAGS\n'), ((529, 553), 'core.utils.load_config', 'utils.load_config', (['FLAGS'], {}), '(FLAGS)\n', (546, 553), False, 'from core import utils\n'), ((568, 608), 'core.utils.read_class_names', 'utils.read_class_names', (['cfg.YOLO.CLASSES'], {}), '(cfg.YOLO.CLASSES)\n', (590, 608), False, 'from core import utils\n'), ((798, 830), 'core.dataset.Dataset', 'Dataset', (['FLAGS'], {'is_training': '(True)'}), '(FLAGS, is_training=True)\n', (805, 830), False, 'from core.dataset import Dataset\n'), ((1028, 1075), 'tensorflow.Variable', 'tf.Variable', (['(1)'], {'trainable': '(False)', 'dtype': 'tf.int64'}), '(1, trainable=False, dtype=tf.int64)\n', (1039, 1075), True, 'import tensorflow as tf\n'), ((1236, 1306), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['[cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3]'], {}), '([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])\n', (1257, 1306), True, 'import tensorflow as tf\n'), ((1326, 1370), 'core.yolov5.YOLO', 'YOLO', (['FLAGS.scale_v5', 'input_layer', 'NUM_CLASS'], {}), '(FLAGS.scale_v5, input_layer, NUM_CLASS)\n', (1330, 1370), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((2168, 2209), 'tensorflow.keras.Model', 'tf.keras.Model', (['input_layer', 'bbox_tensors'], {}), '(input_layer, bbox_tensors)\n', (2182, 2209), True, 'import tensorflow as tf\n'), ((2227, 2266), 'core.yolov5.NLayerDiscriminator', 'NLayerDiscriminator', ([], {'ndf': '(64)', 'n_layers': '(5)'}), '(ndf=64, n_layers=5)\n', (2246, 2266), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((2365, 2391), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (2389, 2391), True, 'import tensorflow as tf\n'), ((2412, 2437), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {}), '()\n', (2435, 2437), True, 'import tensorflow as tf\n'), ((2467, 2477), 'core.deblur_losses.get_loss', 'get_loss', ([], {}), '()\n', (2475, 2477), False, 'from core.deblur_losses import get_loss, DoubleGAN, SingleGAN\n'), ((2920, 2964), 'core.utils.load_True_freeze_layer', 'utils.load_True_freeze_layer', (['FLAGS.scale_v5'], {}), '(FLAGS.scale_v5)\n', (2948, 2964), False, 'from core import utils\n'), ((3013, 3030), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (3028, 3030), True, 'import tensorflow as tf\n'), ((3050, 3067), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (3065, 3067), True, 'import tensorflow as tf\n'), ((3087, 3104), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (3102, 3104), True, 'import tensorflow as tf\n'), ((3191, 3207), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (3202, 3207), True, 'import tensorflow as tf\n'), ((3226, 3242), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (3237, 3242), True, 'import tensorflow as tf\n'), ((11645, 11674), 'os.path.exists', 'os.path.exists', (['loss_dir_path'], {}), '(loss_dir_path)\n', (11659, 11674), False, 'import os\n'), ((11713, 11736), 'os.mkdir', 'os.mkdir', (['loss_dir_path'], {}), '(loss_dir_path)\n', (11721, 11736), False, 'import os\n'), ((14211, 14238), 'numpy.array', 'np.array', (['total_loss_result'], {}), '(total_loss_result)\n', (14219, 14238), True, 'import numpy as np\n'), ((14255, 14277), 'numpy.array', 'np.array', (['loss1_result'], {}), '(loss1_result)\n', (14263, 14277), True, 'import numpy as np\n'), ((14291, 14313), 'numpy.array', 'np.array', (['loss2_result'], {}), '(loss2_result)\n', (14299, 14313), True, 'import numpy as np\n'), ((14333, 14359), 'numpy.arange', 'np.arange', (['(0)', '(epoch + 1)', '(1)'], {}), '(0, epoch + 1, 1)\n', (14342, 14359), True, 'import numpy as np\n'), ((14360, 14403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(1000)', 'num': '(1)', 'figsize': '(6, 3)'}), '(dpi=1000, num=1, figsize=(6, 3))\n', (14370, 14403), True, 'import matplotlib.pyplot as plt\n'), ((14406, 14518), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'total_loss'], {'marker': '"""*"""', 'linestyle': '"""-"""', 'linewidth': '(1)', 'markersize': '(2)', 'label': '"""total_loss"""'}), "(epochs_range, total_loss, marker='*', linestyle='-', linewidth=1,\n markersize=2, label='total_loss')\n", (14414, 14518), True, 'import matplotlib.pyplot as plt\n'), ((14516, 14626), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'Yolo_loss'], {'marker': '"""o"""', 'linestyle': '"""-"""', 'linewidth': '(1)', 'markersize': '(2)', 'label': '"""Yolo_loss"""'}), "(epochs_range, Yolo_loss, marker='o', linestyle='-', linewidth=1,\n markersize=2, label='Yolo_loss')\n", (14524, 14626), True, 'import matplotlib.pyplot as plt\n'), ((14624, 14675), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'G_loss'], {'label': '"""Deblur_loss"""'}), "(epochs_range, G_loss, label='Deblur_loss')\n", (14632, 14675), True, 'import matplotlib.pyplot as plt\n'), ((14680, 14699), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (14690, 14699), True, 'import matplotlib.pyplot as plt\n'), ((14704, 14722), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (14714, 14722), True, 'import matplotlib.pyplot as plt\n'), ((14730, 14759), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (14740, 14759), True, 'import matplotlib.pyplot as plt\n'), ((14764, 14832), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Tranin_Loss_result.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(1000)'}), "('Tranin_Loss_result.png', bbox_inches='tight', dpi=1000)\n", (14775, 14832), True, 'import matplotlib.pyplot as plt\n'), ((14835, 14845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14843, 14845), True, 'import matplotlib.pyplot as plt\n'), ((2534, 2573), 'core.yolov5.NLayerDiscriminator', 'NLayerDiscriminator', ([], {'ndf': '(64)', 'n_layers': '(3)'}), '(ndf=64, n_layers=3)\n', (2553, 2573), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((2677, 2723), 'core.deblur_losses.DoubleGAN', 'DoubleGAN', (['patch_model', 'full_model', 'criterionD'], {}), '(patch_model, full_model, criterionD)\n', (2686, 2723), False, 'from core.deblur_losses import get_loss, DoubleGAN, SingleGAN\n'), ((2756, 2789), 'core.deblur_losses.SingleGAN', 'SingleGAN', (['full_model', 'criterionD'], {}), '(full_model, criterionD)\n', (2765, 2789), False, 'from core.deblur_losses import get_loss, DoubleGAN, SingleGAN\n'), ((2840, 2865), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {}), '()\n', (2863, 2865), True, 'import tensorflow as tf\n'), ((3380, 3391), 'time.time', 'time.time', ([], {}), '()\n', (3389, 3391), False, 'import time\n'), ((7951, 7975), 'numpy.array', 'np.array', (['loss_list_step'], {}), '(loss_list_step)\n', (7959, 7975), True, 'import numpy as np\n'), ((11676, 11704), 'shutil.rmtree', 'shutil.rmtree', (['loss_dir_path'], {}), '(loss_dir_path)\n', (11689, 11704), False, 'import shutil\n'), ((12174, 12223), 'numpy.zeros', 'np.zeros', (['(steps_per_epoch, 10)'], {'dtype': 'np.float32'}), '((steps_per_epoch, 10), dtype=np.float32)\n', (12182, 12223), True, 'import numpy as np\n'), ((13972, 14006), 'os.path.exists', 'os.path.exists', (['dectect_epoch_path'], {}), '(dectect_epoch_path)\n', (13986, 14006), False, 'import os\n'), ((14050, 14078), 'os.mkdir', 'os.mkdir', (['dectect_epoch_path'], {}), '(dectect_epoch_path)\n', (14058, 14078), False, 'import os\n'), ((1486, 1574), 'core.yolov5.decode_train', 'decode_train', (['fm', '(cfg.TRAIN.INPUT_SIZE // 8)', 'NUM_CLASS', 'STRIDES', 'ANCHORS', 'i', 'XYSCALE'], {}), '(fm, cfg.TRAIN.INPUT_SIZE // 8, NUM_CLASS, STRIDES, ANCHORS, i,\n XYSCALE)\n', (1498, 1574), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((3572, 3589), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3587, 3589), True, 'import tensorflow as tf\n'), ((3599, 3616), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3614, 3616), True, 'import tensorflow as tf\n'), ((3626, 3643), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3641, 3643), True, 'import tensorflow as tf\n'), ((3653, 3670), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3668, 3670), True, 'import tensorflow as tf\n'), ((3680, 3697), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3695, 3697), True, 'import tensorflow as tf\n'), ((5352, 5371), 'tensorflow.norm', 'tf.norm', (['G1R'], {'ord': '(2)'}), '(G1R, ord=2)\n', (5359, 5371), True, 'import tensorflow as tf\n'), ((5430, 5449), 'tensorflow.norm', 'tf.norm', (['G2R'], {'ord': '(2)'}), '(G2R, ord=2)\n', (5437, 5449), True, 'import tensorflow as tf\n'), ((6358, 6369), 'time.time', 'time.time', ([], {}), '()\n', (6367, 6369), False, 'import time\n'), ((12449, 12482), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['loss_epoch', 'mask'], {}), '(loss_epoch, mask)\n', (12464, 12482), True, 'import tensorflow as tf\n'), ((14008, 14041), 'shutil.rmtree', 'shutil.rmtree', (['dectect_epoch_path'], {}), '(dectect_epoch_path)\n', (14021, 14041), False, 'import shutil\n'), ((1699, 1788), 'core.yolov5.decode_train', 'decode_train', (['fm', '(cfg.TRAIN.INPUT_SIZE // 16)', 'NUM_CLASS', 'STRIDES', 'ANCHORS', 'i', 'XYSCALE'], {}), '(fm, cfg.TRAIN.INPUT_SIZE // 16, NUM_CLASS, STRIDES, ANCHORS, i,\n XYSCALE)\n', (1711, 1788), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((4568, 4700), 'core.yolov5.compute_loss', 'compute_loss', (['pred', 'conv', 'target[i][0]', 'target[i][1]'], {'STRIDES': 'STRIDES', 'NUM_CLASS': 'NUM_CLASS', 'IOU_LOSS_THRESH': 'IOU_LOSS_THRESH', 'i': 'i'}), '(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES,\n NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)\n', (4580, 4700), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((5826, 5841), 'tensorflow.identity', 'tf.identity', (['C1'], {}), '(C1)\n', (5837, 5841), True, 'import tensorflow as tf\n'), ((5877, 5892), 'tensorflow.identity', 'tf.identity', (['C2'], {}), '(C2)\n', (5888, 5892), True, 'import tensorflow as tf\n'), ((8497, 8519), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (8507, 8519), False, 'import cv2\n'), ((8544, 8591), 'cv2.cvtColor', 'cv2.cvtColor', (['original_image', 'cv2.COLOR_BGR2RGB'], {}), '(original_image, cv2.COLOR_BGR2RGB)\n', (8556, 8591), False, 'import cv2\n'), ((8908, 8931), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (8919, 8931), True, 'import tensorflow as tf\n'), ((9819, 9850), 'tensorflow.concat', 'tf.concat', (['bbox_tensors'], {'axis': '(1)'}), '(bbox_tensors, axis=1)\n', (9828, 9850), True, 'import tensorflow as tf\n'), ((9879, 9910), 'tensorflow.concat', 'tf.concat', (['prob_tensors'], {'axis': '(1)'}), '(prob_tensors, axis=1)\n', (9888, 9910), True, 'import tensorflow as tf\n'), ((10095, 10133), 'tensorflow.concat', 'tf.concat', (['[boxes, pred_conf]'], {'axis': '(-1)'}), '([boxes, pred_conf], axis=-1)\n', (10104, 10133), True, 'import tensorflow as tf\n'), ((1911, 2000), 'core.yolov5.decode_train', 'decode_train', (['fm', '(cfg.TRAIN.INPUT_SIZE // 32)', 'NUM_CLASS', 'STRIDES', 'ANCHORS', 'i', 'XYSCALE'], {}), '(fm, cfg.TRAIN.INPUT_SIZE // 32, NUM_CLASS, STRIDES, ANCHORS, i,\n XYSCALE)\n', (1923, 2000), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((5971, 5991), 'tensorflow.math.abs', 'tf.math.abs', (['(G1 - C1)'], {}), '(G1 - C1)\n', (5982, 5991), True, 'import tensorflow as tf\n'), ((6012, 6032), 'tensorflow.math.abs', 'tf.math.abs', (['(G2 - C2)'], {}), '(G2 - C2)\n', (6023, 6032), True, 'import tensorflow as tf\n'), ((8759, 8773), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (8766, 8773), True, 'import numpy as np\n'), ((11982, 12000), 'core.utils.freeze_all', 'freeze_all', (['freeze'], {}), '(freeze)\n', (11992, 12000), False, 'from core.utils import freeze_all, unfreeze_all\n'), ((7577, 7653), 'tensorflow.cos', 'tf.cos', (['((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi)'], {}), '((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi)\n', (7583, 7653), True, 'import tensorflow as tf\n'), ((9293, 9361), 'core.yolov5.decode', 'decode', (['fm', '(FLAGS.size // 8)', 'NUM_CLASS', 'STRIDES', 'ANCHORS', 'i', 'XYSCALE'], {}), '(fm, FLAGS.size // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)\n', (9299, 9361), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((10028, 10065), 'tensorflow.constant', 'tf.constant', (['[FLAGS.size, FLAGS.size]'], {}), '([FLAGS.size, FLAGS.size])\n', (10039, 10065), True, 'import tensorflow as tf\n'), ((11128, 11141), 'numpy.copy', 'np.copy', (['G_im'], {}), '(G_im)\n', (11135, 11141), True, 'import numpy as np\n'), ((9436, 9505), 'core.yolov5.decode', 'decode', (['fm', '(FLAGS.size // 16)', 'NUM_CLASS', 'STRIDES', 'ANCHORS', '(1)', 'XYSCALE'], {}), '(fm, FLAGS.size // 16, NUM_CLASS, STRIDES, ANCHORS, 1, XYSCALE)\n', (9442, 9505), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((11045, 11059), 'numpy.array', 'np.array', (['G_im'], {}), '(G_im)\n', (11053, 11059), True, 'import numpy as np\n'), ((9578, 9647), 'core.yolov5.decode', 'decode', (['fm', '(FLAGS.size // 32)', 'NUM_CLASS', 'STRIDES', 'ANCHORS', '(2)', 'XYSCALE'], {}), '(fm, FLAGS.size // 32, NUM_CLASS, STRIDES, ANCHORS, 2, XYSCALE)\n', (9584, 9647), False, 'from core.yolov5 import NLayerDiscriminator, YOLO, decode, compute_loss, decode_train, filter_boxes\n'), ((10379, 10394), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (10387, 10394), True, 'import tensorflow as tf\n'), ((10494, 10513), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (10502, 10513), True, 'import tensorflow as tf\n'), ((10522, 10541), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (10530, 10541), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
with fits.open("../archive/noise/process.fits") as f:
hdr = f[0].header
model = f[1].data
mask = f[2].data
color_bins = np.linspace(hdr["MIN_COL"], hdr["MAX_COL"], hdr["NUM_COL"] + 1)
mag_bins = np.linspace(hdr["MIN_MAG"], hdr["MAX_MAG"], hdr["NUM_MAG"] + 1)
color_bin_centers = 0.5 * (color_bins[1:] + color_bins[:-1])
mag_bin_centers = 0.5 * (mag_bins[1:] + mag_bins[:-1])
plt.figure(figsize=(7, 6))
sigma_rv = np.exp(model)
levels = np.exp(
np.linspace(model[mask == 1].min(), model[mask == 1].max(), 25)
)
norm = mpl.colors.LogNorm(vmin=levels[0], vmax=levels[-1])
sigma_rv[sigma_rv >= levels[-1]] = levels[-1] - 1e-5
plt.contourf(
color_bin_centers,
mag_bin_centers,
sigma_rv,
levels=levels,
norm=norm,
)
color_array = np.zeros((2, 4))
color_array[:, -1] = [1.0, 0]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
name="shading", colors=color_array
)
plt.contourf(
color_bin_centers,
mag_bin_centers,
1.5 * mask,
levels=1,
cmap=cmap,
vmin=0,
vmax=1,
)
sm = plt.cm.ScalarMappable(norm=norm)
sm.set_array(np.array([]))
cbar = plt.colorbar(sm, label=r"per-transit uncertainty [km/s]")
cbar.ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
plt.annotate(
"extrapolated\nin shaded\nregions",
xy=(1, 1),
xycoords="axes fraction",
ha="right",
va="top",
color="w",
xytext=(-10, -10),
textcoords="offset points",
fontsize=9,
)
plt.ylim(plt.ylim()[::-1])
plt.ylabel("$m_\mathrm{G}$")
plt.xlabel("$G_\mathrm{BP}-G_\mathrm{RP}$")
plt.savefig("noise_model.pdf", bbox_inches="tight")
| [
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"numpy.exp",
"matplotlib.pyplot.annotate",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy... | [((285, 348), 'numpy.linspace', 'np.linspace', (["hdr['MIN_COL']", "hdr['MAX_COL']", "(hdr['NUM_COL'] + 1)"], {}), "(hdr['MIN_COL'], hdr['MAX_COL'], hdr['NUM_COL'] + 1)\n", (296, 348), True, 'import numpy as np\n'), ((360, 423), 'numpy.linspace', 'np.linspace', (["hdr['MIN_MAG']", "hdr['MAX_MAG']", "(hdr['NUM_MAG'] + 1)"], {}), "(hdr['MIN_MAG'], hdr['MAX_MAG'], hdr['NUM_MAG'] + 1)\n", (371, 423), True, 'import numpy as np\n'), ((542, 568), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (552, 568), True, 'import matplotlib.pyplot as plt\n'), ((581, 594), 'numpy.exp', 'np.exp', (['model'], {}), '(model)\n', (587, 594), True, 'import numpy as np\n'), ((689, 740), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {'vmin': 'levels[0]', 'vmax': 'levels[-1]'}), '(vmin=levels[0], vmax=levels[-1])\n', (707, 740), True, 'import matplotlib as mpl\n'), ((795, 883), 'matplotlib.pyplot.contourf', 'plt.contourf', (['color_bin_centers', 'mag_bin_centers', 'sigma_rv'], {'levels': 'levels', 'norm': 'norm'}), '(color_bin_centers, mag_bin_centers, sigma_rv, levels=levels,\n norm=norm)\n', (807, 883), True, 'import matplotlib.pyplot as plt\n'), ((918, 934), 'numpy.zeros', 'np.zeros', (['(2, 4)'], {}), '((2, 4))\n', (926, 934), True, 'import numpy as np\n'), ((972, 1057), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', ([], {'name': '"""shading"""', 'colors': 'color_array'}), "(name='shading', colors=color_array\n )\n", (1016, 1057), True, 'import matplotlib as mpl\n'), ((1059, 1161), 'matplotlib.pyplot.contourf', 'plt.contourf', (['color_bin_centers', 'mag_bin_centers', '(1.5 * mask)'], {'levels': '(1)', 'cmap': 'cmap', 'vmin': '(0)', 'vmax': '(1)'}), '(color_bin_centers, mag_bin_centers, 1.5 * mask, levels=1, cmap\n =cmap, vmin=0, vmax=1)\n', (1071, 1161), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1226), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'norm': 'norm'}), '(norm=norm)\n', (1215, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1318), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'label': '"""per-transit uncertainty [km/s]"""'}), "(sm, label='per-transit uncertainty [km/s]')\n", (1274, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1573), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""extrapolated\nin shaded\nregions"""'], {'xy': '(1, 1)', 'xycoords': '"""axes fraction"""', 'ha': '"""right"""', 'va': '"""top"""', 'color': '"""w"""', 'xytext': '(-10, -10)', 'textcoords': '"""offset points"""', 'fontsize': '(9)'}), '("""extrapolated\nin shaded\nregions""", xy=(1, 1), xycoords=\n \'axes fraction\', ha=\'right\', va=\'top\', color=\'w\', xytext=(-10, -10),\n textcoords=\'offset points\', fontsize=9)\n', (1397, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1659), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$m_\\\\mathrm{G}$"""'], {}), "('$m_\\\\mathrm{G}$')\n", (1640, 1659), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1704), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$G_\\\\mathrm{BP}-G_\\\\mathrm{RP}$"""'], {}), "('$G_\\\\mathrm{BP}-G_\\\\mathrm{RP}$')\n", (1669, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1704, 1755), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""noise_model.pdf"""'], {'bbox_inches': '"""tight"""'}), "('noise_model.pdf', bbox_inches='tight')\n", (1715, 1755), True, 'import matplotlib.pyplot as plt\n'), ((157, 199), 'astropy.io.fits.open', 'fits.open', (['"""../archive/noise/process.fits"""'], {}), "('../archive/noise/process.fits')\n", (166, 199), False, 'from astropy.io import fits\n'), ((1240, 1252), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1248, 1252), True, 'import numpy as np\n'), ((1354, 1382), 'matplotlib.ticker.ScalarFormatter', 'mpl.ticker.ScalarFormatter', ([], {}), '()\n', (1380, 1382), True, 'import matplotlib as mpl\n'), ((1612, 1622), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (1620, 1622), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import cv2
import numpy as np
from types import SimpleNamespace
from experts.expert import Expert
sys.path.append("external/CenterTrack/src/lib")
from detector import Detector
from opts import opts
def get_default_calib(width, height):
rest_focal_length = 1200
calib = np.array(
[
[rest_focal_length, 0, width / 2, 0],
[0, rest_focal_length, height / 2, 0],
[0, 0, 1, 0],
]
)
return calib
def parse_opt(opt):
opt.gpus_str = opt.gpus
opt.gpus = [int(gpu) for gpu in opt.gpus.split(",")]
opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >= 0 else [-1]
opt.lr_step = [int(i) for i in opt.lr_step.split(",")]
opt.save_point = [int(i) for i in opt.save_point.split(",")]
opt.test_scales = [float(i) for i in opt.test_scales.split(",")]
opt.save_imgs = [i for i in opt.save_imgs.split(",")] if opt.save_imgs != "" else []
opt.ignore_loaded_cats = (
[int(i) for i in opt.ignore_loaded_cats.split(",")]
if opt.ignore_loaded_cats != ""
else []
)
opt.num_workers = max(opt.num_workers, 2 * len(opt.gpus))
opt.pre_img = False
if "tracking" in opt.task:
print("Running tracking")
opt.tracking = True
opt.out_thresh = max(opt.track_thresh, opt.out_thresh)
opt.pre_thresh = max(opt.track_thresh, opt.pre_thresh)
opt.new_thresh = max(opt.track_thresh, opt.new_thresh)
opt.pre_img = not opt.no_pre_img
if "ddd" in opt.task:
opt.show_track_color = True
opt.fix_res = not opt.keep_res
if opt.head_conv == -1: # init default head_conv
opt.head_conv = 256 if "dla" in opt.arch else 64
opt.pad = 127 if "hourglass" in opt.arch else 31
opt.num_stacks = 2 if opt.arch == "hourglass" else 1
if opt.master_batch_size == -1:
opt.master_batch_size = opt.batch_size // len(opt.gpus)
rest_batch_size = opt.batch_size - opt.master_batch_size
opt.chunk_sizes = [opt.master_batch_size]
for i in range(len(opt.gpus) - 1):
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
if i < rest_batch_size % (len(opt.gpus) - 1):
slave_chunk_size += 1
opt.chunk_sizes.append(slave_chunk_size)
if opt.debug > 0:
opt.num_workers = 0
opt.batch_size = 1
opt.gpus = [opt.gpus[0]]
opt.master_batch_size = -1
return opt
def update_dataset_info_and_set_heads(opt, num_classes, default_resolution, num_joints):
opt.num_classes = num_classes
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
input_h, input_w = default_resolution
input_h = opt.input_res if opt.input_res > 0 else input_h
input_w = opt.input_res if opt.input_res > 0 else input_w
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
opt.output_h = opt.input_h // opt.down_ratio
opt.output_w = opt.input_w // opt.down_ratio
opt.input_res = max(opt.input_h, opt.input_w)
opt.output_res = max(opt.output_h, opt.output_w)
opt.heads = {"hm": opt.num_classes, "reg": 2, "wh": 2}
if "tracking" in opt.task:
opt.heads.update({"tracking": 2})
if "ddd" in opt.task:
opt.heads.update({"dep": 1, "rot": 8, "dim": 3, "amodel_offset": 2})
if "multi_pose" in opt.task:
opt.heads.update({"hps": num_joints * 2, "hm_hp": num_joints, "hp_offset": 2})
if opt.ltrb:
opt.heads.update({"ltrb": 4})
if opt.ltrb_amodal:
opt.heads.update({"ltrb_amodal": 4})
if opt.nuscenes_att:
opt.heads.update({"nuscenes_att": 8})
if opt.velocity:
opt.heads.update({"velocity": 3})
weight_dict = {
"hm": opt.hm_weight,
"wh": opt.wh_weight,
"reg": opt.off_weight,
"hps": opt.hp_weight,
"hm_hp": opt.hm_hp_weight,
"hp_offset": opt.off_weight,
"dep": opt.dep_weight,
"rot": opt.rot_weight,
"dim": opt.dim_weight,
"amodel_offset": opt.amodel_offset_weight,
"ltrb": opt.ltrb_weight,
"tracking": opt.tracking_weight,
"ltrb_amodal": opt.ltrb_amodal_weight,
"nuscenes_att": opt.nuscenes_att_weight,
"velocity": opt.velocity_weight,
}
opt.weights = {head: weight_dict[head] for head in opt.heads}
for head in opt.weights:
if opt.weights[head] == 0:
del opt.heads[head]
opt.head_conv = {
head: [opt.head_conv for i in range(opt.num_head_conv if head != "reg" else 1)]
for head in opt.heads
}
return opt
class CenterTrack(Expert):
def __init__(self, load_model, track_thresh, pre_thresh, private):
super(CenterTrack, self).__init__("CenterTrack")
parser = opts().parser
opt = {}
for action in parser._actions:
if not action.required and action.dest != "help":
opt[action.dest] = action.default
opt = SimpleNamespace(**opt)
opt.task = "tracking"
opt.load_model = load_model
opt.track_thresh = track_thresh
opt.pre_thresh = pre_thresh
opt.pre_hm = True
opt.ltrb_amodal = True
opt.public_det = private
self.opt = parse_opt(opt)
self.opt = update_dataset_info_and_set_heads(self.opt, 1, [544, 960], 17)
self.private = private
def initialize(self, seq_info):
super(CenterTrack, self).initialize(seq_info)
self.tracker = Detector(self.opt)
self.tracker.reset_tracking()
def track(self, img_path, dets):
super(CenterTrack, self).track(img_path, dets)
input_meta = self.preprocess(img_path, dets)
ret = self.tracker.run(img_path, input_meta)
result = []
for t in ret["results"]:
bbox = t["bbox"]
tracking_id = t["tracking_id"]
result.append(
[tracking_id, bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
)
return result
def preprocess(self, img_path, dets):
img = cv2.imread(img_path)
input_meta = {}
input_meta["calib"] = get_default_calib(img.shape[1], img.shape[0])
detections = []
if dets is not None:
for det in dets:
bbox = [
float(det[1]),
float(det[2]),
float(det[1] + det[3]),
float(det[2] + det[4]),
]
ct = [(det[1] + det[3]) / 2, (det[2] + det[4]) / 2]
detections.append(
{"bbox": bbox, "score": float(det[5]), "class": 1, "ct": ct}
)
if self.frame_idx == 0:
if self.private:
input_meta["pre_dets"] = []
else:
input_meta["pre_dets"] = detections
if self.private:
input_meta["cur_dets"] = []
else:
input_meta["cur_dets"] = detections
return input_meta
| [
"detector.Detector",
"types.SimpleNamespace",
"numpy.array",
"opts.opts",
"sys.path.append",
"cv2.imread"
] | [((111, 158), 'sys.path.append', 'sys.path.append', (['"""external/CenterTrack/src/lib"""'], {}), "('external/CenterTrack/src/lib')\n", (126, 158), False, 'import sys\n'), ((292, 398), 'numpy.array', 'np.array', (['[[rest_focal_length, 0, width / 2, 0], [0, rest_focal_length, height / 2, 0\n ], [0, 0, 1, 0]]'], {}), '([[rest_focal_length, 0, width / 2, 0], [0, rest_focal_length, \n height / 2, 0], [0, 0, 1, 0]])\n', (300, 398), True, 'import numpy as np\n'), ((5025, 5047), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**opt)\n', (5040, 5047), False, 'from types import SimpleNamespace\n'), ((5543, 5561), 'detector.Detector', 'Detector', (['self.opt'], {}), '(self.opt)\n', (5551, 5561), False, 'from detector import Detector\n'), ((6134, 6154), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (6144, 6154), False, 'import cv2\n'), ((4829, 4835), 'opts.opts', 'opts', ([], {}), '()\n', (4833, 4835), False, 'from opts import opts\n')] |
import pdb
import sys
from functools import reduce
import matplotlib.pyplot as plt
import arrow
import pandas as pd
from pandas.errors import EmptyDataError
import numpy as np
from scipy import interpolate
import statsmodels.api as sm
from sparqlwrapper_brick import BrickEndpoint
series_checker = lambda x: isinstance(x, pd.Series)
class HvacMeter(object):
"""
OneVAV-Points model
{
"vav": {
"srcid": "XXX",
"room" :"RRR,
"cc": "YYY",
"saf": "ZZZ",
"znt": "KKK"
}
}
OneAHU-VAV model
{
"ahu": {
"vavs": ["vav1", "vav2", ...],
"mixt": "AAA",
"supt": "BBB",
}
}
"""
def __init__(self, target_building, brick_endpoint):
self.brick = brick_endpoint
self.target_building = target_building
#ttlfile = '../../../repo/hvacmeter/metadata/ebu3b_brick.ttl'
ttlfile = '/home/jbkoh/repo/hvacmeter/metadata/ebu3b_brick.ttl'
self.brick.load_ttlfile(ttlfile)
self.begin_time = arrow.get(2018, 4, 6).datetime
self.end_time = arrow.get(2018, 4, 14).datetime
self.init_df()
self.datadir = './data/'
def init_df(self):
self.datetimes = pd.date_range(self.begin_time,
self.end_time,
freq='5min')
self.df = pd.DataFrame(index=self.datetimes)
self.base_ts = [arrow.get(dt).timestamp for dt in self.datetimes]
self._init_model_params()
def _init_model_params(self):
self._init_cooling_params()
def _init_cooling_params():
self.df['Q_ahu_cooling_power'] = None
self.df['Q_vav_cooling_power'] = None
self.df['Q_ahu_returned_power'] = None
self.df['water_thermal_power'] = None
self.df['C3'] = 1
def get_ahus(self):
qstr = 'select ?ahu where {?ahu a/rdf:subClassOf* brick:AHU.}'
self.ahus = [row[0] for row in self.brick.query(qstr)[1]]
return self.ahus
def get_ahu_points(self, ahu):
qstr = """
select ?oat ?mat ?rat ?dat where {{
OPTIONAL {{
?oat a brick:Outside_Air_Temperature_Sensor .
?oat bf:isPointOf <{0}>.
}}
OPTIONAL {{
?mat a brick:Mixed_Air_Temperature_Sensor .
?mat bf:isPointOf <{0}>.
}}
OPTIONAL {{
?rat a brick:Return_Air_Temperature_Sensor .
?rat bf:isPointOf <{0}>.
}}
OPTIONAL {{
?dat a brick:Discharge_Air_Temperature_Setpoint.
?dat bf:isPointOf <{0}>.
}}
}}
""".format(ahu)
res = self.brick.query(qstr)
points = {varname: entity for varname, entity
in zip(res[0], res[1][0])}
return points
def get_ahu_vavs(self, ahu):
""" This is a backup note.
BIND(
IF(
NOT EXISTS{{
?sat bf:isPointOf ?vav .
?sat a brick:Supply_Air_Temperature_Sensor .
}}
, ?sat, ?dat)
AS ?dat
)
"""
qstr = """
select ?vav ?zone ?znt ?saf ?dat ?sat where {{
<{0}> bf:feeds+ ?vav .
?vav a brick:VAV .
?vav bf:feeds+ ?zone .
?zone a brick:HVAC_Zone .
?znt bf:isPointOf ?vav .
?znt a brick:Zone_Temperature_Sensor .
?saf bf:isPointOf ?vav .
?saf a brick:Supply_Air_Flow_Sensor .
?dat bf:isPointOf <{0}>.
?dat a brick:Discharge_Air_Temperature_Setpoint .
OPTIONAL{{
?sat a brick:Supply_Air_Temperature_Sensor .
?sat bf:isPointOf ?vav .
}}
#BIND(IF(exists{{
# ?sat a brick:Supply_Air_Temperature_Sensor .
# ?sat bf:isPointOf ?vav .
# }}, "yes", ?dat_cand) AS ?dat
# #}}, ?sat, ?dat_cand) AS ?dat
#)
}}
""".format(ahu)
res = self.brick.query(qstr)
def get_ahu_disch_airflow(self, ahu):
qstr = """
select ?daf where {{
?daf bf:isPointOf <{0}>.
?daf a brick:Discharge_Air_Flow_Sensor .
}}
""".format(ahu)
res = self.brick.query(qstr)
if res[1]: # If DAF exists for the AHU.
airflow = None # TODO
else: # If AHU's DAF does not exist, collect VAVs' SAF
airflow = self.calc_tot_vavs_airflow(ahu)
return airflow
def calc_tot_vavs_airflow(self, ahu):
qstr = """
select ?saf where {{
<{0}> bf:feeds ?vav .
?vav a brick:VAV.
?saf bf:isPointOf ?vav.
?saf a brick:Supply_Air_Flow_Sensor .
}}
""".format(ahu)
[var_names, tuples] = self.brick.query(qstr)
safs = [tup[0] for tup in tuples]
saf_values = [self.get_point_data(saf) for saf in safs]
saf_sum = sum([saf_value for saf_value in saf_values
if isinstance(saf_value, pd.Series)])
return saf_sum
def calc_ahu_returned_power(self, ahu):
daf = self.get_ahu_disch_airflow(ahu)
ahu_points = self.get_ahu_points(ahu)
rat = self.get_point_data(ahu_points['?rat'])
mat = self.get_point_data(ahu_points['?mat'])
power = daf.multiply(rat - mat)
self.df['Q_ahu_returned_power'] = power
def calc_ahu_cooling_power(self, ahu):
daf = self.get_ahu_disch_airflow(ahu)
ahu_points = self.get_ahu_points(ahu)
dat = self.get_point_data(ahu_points['?dat'])
mat = self.get_point_data(ahu_points['?mat'])
power = daf.multiply(mat - dat)
self.df['Q_ahu_cooling_power'] = power
def calc_vavs_cooling_power(self, ahu): #TODO: Test if this is working.
point_sets = self.get_vavs_points(ahu)
powers = [self.calc_vav_cooling_power(points) for points in point_sets.values()]
none_cnt = sum([not isinstance(power, pd.Series) for power in powers])
powers_sum = sum([power for power in powers if isinstance(power, pd.Series)]) * len(powers) / (len(powers) - none_cnt)
self.df['Q_vav_cooling_power'] = powers_sum
def calc_vav_cooling_power(self, vav_points):
if not vav_points:
return None
znts = self.get_point_data(vav_points['?znt'])
dats = self.get_point_data(vav_points['?dat'])
safs = self.get_point_data(vav_points['?saf'])
if False not in map(series_checker, [znts, dats, safs]):
res = safs.multiply(znts-dats)
return res
else:
return None
def get_vavs(self, ahu):
qstr = """
select ?vav where {{
<{0}> bf:feeds+ ?vav.
?vav a brick:VAV.
}}
""".format(ahu)
res = self.brick.query(qstr)
vavs = [row[0] for row in res[1]]
return vavs
def get_vavs_points(self, ahu):
qstr = """
select ?vav ?znt ?saf ?dat ?sat ?zone where {{
?dat bf:isPointOf <{0}>.
?dat a brick:Discharge_Air_Temperature_Setpoint .
<{0}> bf:feeds ?vav .
?vav a brick:VAV .
?vav bf:feeds ?zone .
?zone a brick:HVAC_Zone .
?znt bf:isPointOf ?vav .
?znt a brick:Zone_Temperature_Sensor .
?saf bf:isPointOf ?vav .
?saf a brick:Supply_Air_Flow_Sensor .
OPTIONAL{{
?sat a brick:Supply_Air_Temperature_Sensor .
?sat bf:isPointOf ?vav .
}}
}}
""".format(ahu)
res = self.brick.query(qstr)
var_names = res[0]
point_sets = {} # key: vav, value: points
for row in res[1]:
points = {
'?znt': row[var_names.index('?znt')],
'?saf': row[var_names.index('?saf')],
'?dat': row[var_names.index('?sat')] if row[var_names.index('?sat')] else \
row[var_names.index('?dat')],
'?zone': row[var_names.index('?zone')]
}
vav = row[var_names.index('?vav')]
if vav in point_sets:
print('VAV should not occur twise')
point_sets[vav] = points
return point_sets
def get_vav_points(self, vav):
qstr = """
select ?znt ?saf ?dat ?sat ?zone where {{
<{0}> bf:feeds ?zone .
?zone a brick:HVAC_Zone .
?znt bf:isPointOf <{0}> .
?znt a brick:Zone_Temperature_Sensor .
?saf bf:isPointOf <{0}> .
?saf a brick:Supply_Air_Flow_Sensor .
?ahu bf:feeds <{0}>.
?dat bf:isPointOf ?ahu.
?dat a brick:Discharge_Air_Temperature_Setpoint .
OPTIONAL{{
?sat a brick:Supply_Air_Temperature_Sensor .
?sat bf:isPointOf <{0}> .
}}
}}
""".format(vav)
res = self.brick.query(qstr)
var_names = res[0]
rows = res[1]
if not rows:
return None
row = res[1][0]
points = {
'?znt': row[var_names.index('?znt')],
'?saf': row[var_names.index('?saf')],
'?dat': row[var_names.index('?sat')] if row[var_names.index('?sat')] else \
row[var_names.index('?dat')],
'?zone': row[var_names.index('?zone')]
}
return points
def get_point_data(self, point, aligned=True):
qstr = """
select ?srcid where {{
<{0}> bf:srcid ?srcid.
}}
""".format(point)
res = self.brick.query(qstr)
srcid = res[1][0][0]
try:
data = pd.Series.from_csv(self.datadir + '{0}.csv'.format(srcid))
except EmptyDataError:
return None
except Exception as e:
pdb.set_trace()
print(e)
sys.exit()
ts = [arrow.get(dt).timestamp for dt in data.keys()]
if aligned:
res = np.interp(self.base_ts, ts, data.values)
data = pd.Series(data=res, index=[arrow.get(t) for t in self.base_ts])
return data
def get_chilled_water_sensors(self):
qstr = """
select ?cwf ?cwst ?cwrt where {
?cwf a brick:Chilled_Water_Flow_Sensor.
?cwf bf:srcid ?cwf_srcid.
FILTER(CONTAINS(?cwf_srcid, "ION"))
?cwst a brick:Chilled_Water_Supply_Temperature_Sensor .
?cwrt a brick:Chilled_Water_Return_Temperature_Sensor .
}
"""
[varnames, rows] = self.brick.query(qstr)
points = {varname: value for varname, value in zip(varnames, rows[0])}
return points
def calc_chilled_water_usage(self):
points = self.get_chilled_water_sensors()
cwrt = self.get_point_data(points['?cwrt'])
cwst = self.get_point_data(points['?cwst'])
cwf = self.get_point_data(points['?cwf'])
self.df['water_thermal_power'] = cwf.multiply(cwrt - cwst)
def fit_coefficients(self):
x = self.df[['Q_vav_cooling_power', 'Q_ahu_returned_power', 'C3']]
y = self.df['water_thermal_power']
self.model = sm.OLS(y, x).fit()
if __name__ == '__main__':
brick_endpoint = BrickEndpoint('http://localhost:8890/sparql', '1.0.2')
hvacmeter = HvacMeter('ebu3b', brick_endpoint)
hvacmeter.calc_chilled_water_usage()
ahus = hvacmeter.get_ahus()
ahu = ahus[0]
#hvacmeter.calc_ahu_cooling_power(ahu)
hvacmeter.calc_ahu_returned_power(ahu)
vavs = hvacmeter.get_vavs(ahu)
hvacmeter.calc_vavs_cooling_power(ahu)
hvacmeter.fit_coefficients()
| [
"sys.exit",
"sparqlwrapper_brick.BrickEndpoint",
"arrow.get",
"numpy.interp",
"pdb.set_trace",
"pandas.DataFrame",
"statsmodels.api.OLS",
"pandas.date_range"
] | [((11579, 11633), 'sparqlwrapper_brick.BrickEndpoint', 'BrickEndpoint', (['"""http://localhost:8890/sparql"""', '"""1.0.2"""'], {}), "('http://localhost:8890/sparql', '1.0.2')\n", (11592, 11633), False, 'from sparqlwrapper_brick import BrickEndpoint\n'), ((1287, 1345), 'pandas.date_range', 'pd.date_range', (['self.begin_time', 'self.end_time'], {'freq': '"""5min"""'}), "(self.begin_time, self.end_time, freq='5min')\n", (1300, 1345), True, 'import pandas as pd\n'), ((1444, 1478), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.datetimes'}), '(index=self.datetimes)\n', (1456, 1478), True, 'import pandas as pd\n'), ((1095, 1116), 'arrow.get', 'arrow.get', (['(2018)', '(4)', '(6)'], {}), '(2018, 4, 6)\n', (1104, 1116), False, 'import arrow\n'), ((1150, 1172), 'arrow.get', 'arrow.get', (['(2018)', '(4)', '(14)'], {}), '(2018, 4, 14)\n', (1159, 1172), False, 'import arrow\n'), ((10334, 10374), 'numpy.interp', 'np.interp', (['self.base_ts', 'ts', 'data.values'], {}), '(self.base_ts, ts, data.values)\n', (10343, 10374), True, 'import numpy as np\n'), ((1503, 1516), 'arrow.get', 'arrow.get', (['dt'], {}), '(dt)\n', (1512, 1516), False, 'import arrow\n'), ((10175, 10190), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10188, 10190), False, 'import pdb\n'), ((10224, 10234), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10232, 10234), False, 'import sys\n'), ((10249, 10262), 'arrow.get', 'arrow.get', (['dt'], {}), '(dt)\n', (10258, 10262), False, 'import arrow\n'), ((11511, 11523), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'x'], {}), '(y, x)\n', (11517, 11523), True, 'import statsmodels.api as sm\n'), ((10421, 10433), 'arrow.get', 'arrow.get', (['t'], {}), '(t)\n', (10430, 10433), False, 'import arrow\n')] |
import csw93.main
from csw93 import get_design, get_wlp, get_cfi
import numpy as np
import pytest
# Global variables for all tests
run_sizes = [16, 32, 32, 64, 64]
design_indices = ["8-4.6", "8-3.5", "14-9.2", "12-6.2", "17-11.6"]
def test_design():
"""Function produces the correct design matrix"""
ref_matrix = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 1, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 0],
[1, 0, 1, 1, 1],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 1],
[1, 1, 1, 1, 0],
]
)
comp_matrix = get_design(16, "5-1.1")
assert (comp_matrix == ref_matrix).all()
def test_wlp():
"""Function produces the correct WLP"""
wlps = [
[7, 7, 0, 0, 1, 0],
[1, 2, 3, 1, 0],
[5, 55, 45, 96, 106],
[8, 20, 14, 8],
[105, 35, 280, 168],
]
computed_wlps = [get_wlp(x, design_indices[i]) for i, x in enumerate(run_sizes)]
assert all([x == wlps[i] for i, x in enumerate(computed_wlps)])
def test_cfi():
"""Function produces the correct cfi"""
cfi = (7, 13, 3, 27, 31)
commputed_cfi = [get_cfi(x, design_indices[i]) for i, x in enumerate(run_sizes)]
assert all([x == cfi[i] for i, x in enumerate(commputed_cfi)])
class TestDesign:
def test_run_size(self):
with pytest.raises(ValueError):
get_design(15, "8-4.1")
def test_index(self):
assert get_design(16, "8-3.1") is None
class TestWLP:
def test_run_size(self):
with pytest.raises(ValueError):
get_wlp(15, "8-4.1")
def test_index(self):
assert get_wlp(16, "8-3.1") is None
class TestCfi:
def test_run_size(self):
with pytest.raises(ValueError):
get_cfi(15, "8-4.1")
def test_index(self):
assert get_cfi(16, "8-3.1") is None
| [
"csw93.get_cfi",
"numpy.array",
"pytest.raises",
"csw93.get_design",
"csw93.get_wlp"
] | [((324, 619), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 1, 0, 1], [0, 0, 1, 1, 0], [0, 1,\n 0, 0, 1], [0, 1, 0, 1, 0], [0, 1, 1, 0, 0], [0, 1, 1, 1, 1], [1, 0, 0, \n 0, 1], [1, 0, 0, 1, 0], [1, 0, 1, 0, 0], [1, 0, 1, 1, 1], [1, 1, 0, 0, \n 0], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1], [1, 1, 1, 1, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 1, 0, 1], [0, 0, 1, 1, 0\n ], [0, 1, 0, 0, 1], [0, 1, 0, 1, 0], [0, 1, 1, 0, 0], [0, 1, 1, 1, 1],\n [1, 0, 0, 0, 1], [1, 0, 0, 1, 0], [1, 0, 1, 0, 0], [1, 0, 1, 1, 1], [1,\n 1, 0, 0, 0], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1], [1, 1, 1, 1, 0]])\n', (332, 619), True, 'import numpy as np\n'), ((842, 865), 'csw93.get_design', 'get_design', (['(16)', '"""5-1.1"""'], {}), "(16, '5-1.1')\n", (852, 865), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1149, 1178), 'csw93.get_wlp', 'get_wlp', (['x', 'design_indices[i]'], {}), '(x, design_indices[i])\n', (1156, 1178), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1393, 1422), 'csw93.get_cfi', 'get_cfi', (['x', 'design_indices[i]'], {}), '(x, design_indices[i])\n', (1400, 1422), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1586, 1611), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1599, 1611), False, 'import pytest\n'), ((1625, 1648), 'csw93.get_design', 'get_design', (['(15)', '"""8-4.1"""'], {}), "(15, '8-4.1')\n", (1635, 1648), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1691, 1714), 'csw93.get_design', 'get_design', (['(16)', '"""8-3.1"""'], {}), "(16, '8-3.1')\n", (1701, 1714), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1782, 1807), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1795, 1807), False, 'import pytest\n'), ((1821, 1841), 'csw93.get_wlp', 'get_wlp', (['(15)', '"""8-4.1"""'], {}), "(15, '8-4.1')\n", (1828, 1841), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1884, 1904), 'csw93.get_wlp', 'get_wlp', (['(16)', '"""8-3.1"""'], {}), "(16, '8-3.1')\n", (1891, 1904), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((1972, 1997), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1985, 1997), False, 'import pytest\n'), ((2011, 2031), 'csw93.get_cfi', 'get_cfi', (['(15)', '"""8-4.1"""'], {}), "(15, '8-4.1')\n", (2018, 2031), False, 'from csw93 import get_design, get_wlp, get_cfi\n'), ((2074, 2094), 'csw93.get_cfi', 'get_cfi', (['(16)', '"""8-3.1"""'], {}), "(16, '8-3.1')\n", (2081, 2094), False, 'from csw93 import get_design, get_wlp, get_cfi\n')] |
# Copyright (c) 2016 by <NAME> and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import galsim
import numpy as np
import piff
import os
import subprocess
import fitsio
from piff_test_helper import timer
keys = ['focal_x', 'focal_y']
ntarget = 5
def generate_data(n_samples=100):
# generate as Norm(0, 1) for all parameters
np_rng = np.random.RandomState(1234)
X = np_rng.normal(0, 1, size=(n_samples, len(keys)))
y = np_rng.normal(0, 1, size=(n_samples, ntarget))
star_list = []
for Xi, yi in zip(X, y):
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
image = galsim.Image(64,64, wcs=wcs)
properties = {k:v for k,v in zip(keys, Xi)}
stardata = piff.StarData(image, image.true_center, properties=properties)
# params = np.array([yi[ith] for ith in attr_target])
params = yi
starfit = piff.StarFit(params)
star = piff.Star(stardata, starfit)
star_list.append(star)
return star_list
@timer
def test_init():
# make sure we can init the interpolator
knn = piff.kNNInterp(keys)
@timer
def test_interp():
# logger = piff.config.setup_logger(verbose=3, log_file='test_knn_interp.log')
logger = None
# make sure we can put in the data
star_list = generate_data()
knn = piff.kNNInterp(keys, n_neighbors=1)
knn.initialize(star_list, logger=logger)
knn.solve(star_list, logger=logger)
# make prediction on first 10 items of star_list
star_list_predict = star_list[:10]
star_list_predicted = knn.interpolateList(star_list_predict, logger=logger)
# also on a single star
star_predict = star_list_predict[0]
star_predicted = knn.interpolate(star_predict)
# predicted stars should find their exact partner here, so they have the same data
np.testing.assert_array_equal(star_predicted.fit.params, star_predict.fit.params)
for attr in keys:
np.testing.assert_equal(star_predicted.data[attr], star_predict.data[attr])
# repeat for a star with its starfit removed
star_predict = star_list_predict[0]
star_predict.fit = None
star_predicted = knn.interpolate(star_predict)
# predicted stars should find their exact partner here, so they have the same data
# removed the fit, so don't check that
# np.testing.assert_array_equal(star_predicted.fit.params, star_predict.fit.params)
for attr in keys:
np.testing.assert_equal(star_predicted.data[attr], star_predict.data[attr])
@timer
def test_config():
# Take DES test image, and test doing a psf run with kNN interpolator
# Now test running it via the config parser
psf_file = os.path.join('output','knn_psf.fits')
config = {
'input' : {
'image_file_name' : 'input/DECam_00241238_01.fits.fz',
'cat_file_name' : 'input/DECam_00241238_01_psfcat_tb_maxmag_17.0_magcut_3.0_findstars.fits',
# What hdu is everything in?
'image_hdu': 1,
'badpix_hdu': 2,
'weight_hdu': 3,
'cat_hdu': 2,
# What columns in the catalog have things we need?
'x_col': 'XWIN_IMAGE',
'y_col': 'YWIN_IMAGE',
'ra': 'TELRA',
'dec': 'TELDEC',
'gain': 'GAINA',
'sky_col': 'BACKGROUND',
# How large should the postage stamp cutouts of the stars be?
'stamp_size': 31,
},
'psf' : {
'model' : { 'type': 'GSObjectModel',
'fastfit': True,
'gsobj': 'galsim.Gaussian(sigma=1.0)' },
'interp' : { 'type': 'kNNInterp',
'keys': ['u', 'v'],
'n_neighbors': 115,}
},
'output' : { 'file_name' : psf_file },
}
if __name__ != '__main__':
config['verbose'] = 0
config['input']['nstars'] = 20
config['psf']['interp']['n_neighbors'] = 19
test_factor = 0.04
else:
test_factor = 0.01
psf = piff.process(config)
# by using n_neighbors = 115, when there are only 117 stars in the catalog, we should expect
# that the standard deviation of the interpolated parameters should be small, since almost the
# same set of stars are being averaged in every case.
nstars = len(psf.stars)
np.testing.assert_array_less(
np.std([s.fit.params for s in psf.stars], axis=0),
test_factor*np.mean([s.fit.params for s in psf.stars], axis=0),
err_msg="Interpolated parameters show too much variation.")
@timer
def test_disk():
# make sure reading and writing of data works
star_list = generate_data()
knn = piff.kNNInterp(keys, n_neighbors=2)
knn.initialize(star_list)
knn.solve(star_list)
knn_file = os.path.join('output','knn_interp.fits')
with fitsio.FITS(knn_file,'rw',clobber=True) as f:
knn.write(f, 'knn')
knn2 = piff.kNNInterp.read(f, 'knn')
np.testing.assert_array_equal(knn.locations, knn2.locations)
np.testing.assert_array_equal(knn.targets, knn2.targets)
np.testing.assert_array_equal(knn.kwargs['keys'], knn2.kwargs['keys'])
np.testing.assert_equal(knn.knr_kwargs['n_neighbors'], knn2.knr_kwargs['n_neighbors'])
np.testing.assert_equal(knn.knr_kwargs['algorithm'], knn2.knr_kwargs['algorithm'])
@timer
def test_decam_wavefront():
file_name = 'input/Science-20121120s1-v20i2.fits'
extname = 'Science-20121120s1-v20i2'
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_decamlog')
knn = piff.des.DECamWavefront(file_name, extname, logger=logger)
n_samples = 2000
np_rng = np.random.RandomState(1234)
ccdnums = np_rng.randint(1, 63, n_samples)
star_list = []
for ccdnum in ccdnums:
# make some basic images, pass Xi as properties
# Draw the PSF onto an image. Let's go ahead and give it a non-trivial WCS.
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
image = galsim.Image(64,64, wcs=wcs)
# set icen and jcen
icen = np_rng.randint(100, 2048)
jcen = np_rng.randint(100, 4096)
image.setCenter(icen, jcen)
image_pos = image.center
stardata = piff.StarData(image, image_pos, properties={'chipnum': ccdnum})
star = piff.Star(stardata, None)
star_list.append(star)
# get the focal positions
star_list = piff.des.DECamInfo().pixel_to_focalList(star_list)
star_list_predicted = knn.interpolateList(star_list)
# test misalignment
misalignment = {'z04d': 10, 'z10x': 10, 'z09y': -10}
knn.misalign_wavefront(misalignment)
star_list_misaligned = knn.interpolateList(star_list)
# test the prediction algorithm
y_predicted = np.array([s.fit.params for s in star_list_predicted])
y_misaligned = np.array([s.fit.params for s in star_list_misaligned])
X = np.array([knn.getProperties(s) for s in star_list])
# check the misalignments work
np.testing.assert_array_almost_equal(y_predicted[:,0], y_misaligned[:,0] - misalignment['z04d'])
np.testing.assert_array_almost_equal(y_predicted[:,5], y_misaligned[:,5] - misalignment['z09y'] * X[:,0])
np.testing.assert_array_almost_equal(y_predicted[:,6], y_misaligned[:,6] - misalignment['z10x'] * X[:,1])
# Check shape of misalignment if array
np.testing.assert_raises(ValueError, knn.misalign_wavefront, knn.misalignment[:,:2])
np.testing.assert_raises(ValueError, knn.misalign_wavefront, knn.misalignment[:-1,:])
# empty dict is equivalent to no misalignment
knn.misalign_wavefront({})
np.testing.assert_equal(knn.misalignment, 0.)
@timer
def test_decam_disk():
file_name = 'input/Science-20121120s1-v20i2.fits'
extname = 'Science-20121120s1-v20i2'
knn = piff.des.DECamWavefront(file_name, extname, n_neighbors=30)
misalignment = {'z04d': 10, 'z10x': 10, 'z09y': -10}
knn.misalign_wavefront(misalignment)
knn_file = os.path.join('output','decam_wavefront.fits')
with fitsio.FITS(knn_file,'rw',clobber=True) as f:
knn.write(f, 'decam_wavefront')
knn2 = piff.des.DECamWavefront.read(f, 'decam_wavefront')
np.testing.assert_array_equal(knn.locations, knn2.locations)
np.testing.assert_array_equal(knn.targets, knn2.targets)
np.testing.assert_array_equal(knn.keys, knn2.keys)
np.testing.assert_array_equal(knn.misalignment, knn2.misalignment)
assert knn.knr_kwargs['n_neighbors'] == knn2.knr_kwargs['n_neighbors'], 'n_neighbors not equal'
assert knn.knr_kwargs['algorithm'] == knn2.knr_kwargs['algorithm'], 'algorithm not equal'
@timer
def test_decaminfo():
# test switching between focal and pixel coordinates
n_samples = 500000
np_rng = np.random.RandomState(1234)
chipnums = np_rng.randint(1, 63, n_samples)
icen = np_rng.randint(1, 2048, n_samples)
jcen = np_rng.randint(1, 4096, n_samples)
decaminfo = piff.des.DECamInfo()
xPos, yPos = decaminfo.getPosition(chipnums, icen, jcen)
chipnums_ret, icen_ret, jcen_ret = decaminfo.getPixel(xPos, yPos)
xPos_ret, yPos_ret = decaminfo.getPosition(chipnums_ret, icen_ret, jcen_ret)
np.testing.assert_allclose(chipnums, chipnums_ret)
np.testing.assert_allclose(xPos, xPos_ret)
np.testing.assert_allclose(yPos, yPos_ret)
np.testing.assert_allclose(icen, icen_ret)
np.testing.assert_allclose(jcen, jcen_ret)
if __name__ == '__main__':
test_init()
test_interp()
test_config()
test_disk()
test_decam_wavefront()
test_decam_disk()
test_decaminfo()
| [
"piff.kNNInterp",
"numpy.testing.assert_equal",
"numpy.testing.assert_raises",
"piff.StarFit",
"numpy.array",
"numpy.random.RandomState",
"numpy.mean",
"numpy.testing.assert_array_almost_equal",
"piff.StarData",
"numpy.testing.assert_allclose",
"fitsio.FITS",
"numpy.testing.assert_array_equal"... | [((989, 1016), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1010, 1016), True, 'import numpy as np\n'), ((1716, 1736), 'piff.kNNInterp', 'piff.kNNInterp', (['keys'], {}), '(keys)\n', (1730, 1736), False, 'import piff\n'), ((1947, 1982), 'piff.kNNInterp', 'piff.kNNInterp', (['keys'], {'n_neighbors': '(1)'}), '(keys, n_neighbors=1)\n', (1961, 1982), False, 'import piff\n'), ((2452, 2538), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['star_predicted.fit.params', 'star_predict.fit.params'], {}), '(star_predicted.fit.params, star_predict.fit.\n params)\n', (2481, 2538), True, 'import numpy as np\n'), ((3299, 3337), 'os.path.join', 'os.path.join', (['"""output"""', '"""knn_psf.fits"""'], {}), "('output', 'knn_psf.fits')\n", (3311, 3337), False, 'import os\n'), ((4670, 4690), 'piff.process', 'piff.process', (['config'], {}), '(config)\n', (4682, 4690), False, 'import piff\n'), ((5336, 5371), 'piff.kNNInterp', 'piff.kNNInterp', (['keys'], {'n_neighbors': '(2)'}), '(keys, n_neighbors=2)\n', (5350, 5371), False, 'import piff\n'), ((5442, 5483), 'os.path.join', 'os.path.join', (['"""output"""', '"""knn_interp.fits"""'], {}), "('output', 'knn_interp.fits')\n", (5454, 5483), False, 'import os\n'), ((5615, 5675), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['knn.locations', 'knn2.locations'], {}), '(knn.locations, knn2.locations)\n', (5644, 5675), True, 'import numpy as np\n'), ((5680, 5736), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['knn.targets', 'knn2.targets'], {}), '(knn.targets, knn2.targets)\n', (5709, 5736), True, 'import numpy as np\n'), ((5741, 5811), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["knn.kwargs['keys']", "knn2.kwargs['keys']"], {}), "(knn.kwargs['keys'], knn2.kwargs['keys'])\n", (5770, 5811), True, 'import numpy as np\n'), ((5816, 5907), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["knn.knr_kwargs['n_neighbors']", "knn2.knr_kwargs['n_neighbors']"], {}), "(knn.knr_kwargs['n_neighbors'], knn2.knr_kwargs[\n 'n_neighbors'])\n", (5839, 5907), True, 'import numpy as np\n'), ((5907, 5994), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["knn.knr_kwargs['algorithm']", "knn2.knr_kwargs['algorithm']"], {}), "(knn.knr_kwargs['algorithm'], knn2.knr_kwargs[\n 'algorithm'])\n", (5930, 5994), True, 'import numpy as np\n'), ((6302, 6360), 'piff.des.DECamWavefront', 'piff.des.DECamWavefront', (['file_name', 'extname'], {'logger': 'logger'}), '(file_name, extname, logger=logger)\n', (6325, 6360), False, 'import piff\n'), ((6396, 6423), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (6417, 6423), True, 'import numpy as np\n'), ((7491, 7544), 'numpy.array', 'np.array', (['[s.fit.params for s in star_list_predicted]'], {}), '([s.fit.params for s in star_list_predicted])\n', (7499, 7544), True, 'import numpy as np\n'), ((7564, 7618), 'numpy.array', 'np.array', (['[s.fit.params for s in star_list_misaligned]'], {}), '([s.fit.params for s in star_list_misaligned])\n', (7572, 7618), True, 'import numpy as np\n'), ((7719, 7821), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y_predicted[:, 0]', "(y_misaligned[:, 0] - misalignment['z04d'])"], {}), "(y_predicted[:, 0], y_misaligned[:, 0] -\n misalignment['z04d'])\n", (7755, 7821), True, 'import numpy as np\n'), ((7820, 7932), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y_predicted[:, 5]', "(y_misaligned[:, 5] - misalignment['z09y'] * X[:, 0])"], {}), "(y_predicted[:, 5], y_misaligned[:, 5] -\n misalignment['z09y'] * X[:, 0])\n", (7856, 7932), True, 'import numpy as np\n'), ((7930, 8042), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y_predicted[:, 6]', "(y_misaligned[:, 6] - misalignment['z10x'] * X[:, 1])"], {}), "(y_predicted[:, 6], y_misaligned[:, 6] -\n misalignment['z10x'] * X[:, 1])\n", (7966, 8042), True, 'import numpy as np\n'), ((8084, 8174), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError', 'knn.misalign_wavefront', 'knn.misalignment[:, :2]'], {}), '(ValueError, knn.misalign_wavefront, knn.\n misalignment[:, :2])\n', (8108, 8174), True, 'import numpy as np\n'), ((8173, 8264), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError', 'knn.misalign_wavefront', 'knn.misalignment[:-1, :]'], {}), '(ValueError, knn.misalign_wavefront, knn.\n misalignment[:-1, :])\n', (8197, 8264), True, 'import numpy as np\n'), ((8345, 8391), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['knn.misalignment', '(0.0)'], {}), '(knn.misalignment, 0.0)\n', (8368, 8391), True, 'import numpy as np\n'), ((8528, 8587), 'piff.des.DECamWavefront', 'piff.des.DECamWavefront', (['file_name', 'extname'], {'n_neighbors': '(30)'}), '(file_name, extname, n_neighbors=30)\n', (8551, 8587), False, 'import piff\n'), ((8703, 8749), 'os.path.join', 'os.path.join', (['"""output"""', '"""decam_wavefront.fits"""'], {}), "('output', 'decam_wavefront.fits')\n", (8715, 8749), False, 'import os\n'), ((8914, 8974), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['knn.locations', 'knn2.locations'], {}), '(knn.locations, knn2.locations)\n', (8943, 8974), True, 'import numpy as np\n'), ((8979, 9035), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['knn.targets', 'knn2.targets'], {}), '(knn.targets, knn2.targets)\n', (9008, 9035), True, 'import numpy as np\n'), ((9040, 9090), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['knn.keys', 'knn2.keys'], {}), '(knn.keys, knn2.keys)\n', (9069, 9090), True, 'import numpy as np\n'), ((9095, 9161), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['knn.misalignment', 'knn2.misalignment'], {}), '(knn.misalignment, knn2.misalignment)\n', (9124, 9161), True, 'import numpy as np\n'), ((9480, 9507), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (9501, 9507), True, 'import numpy as np\n'), ((9665, 9685), 'piff.des.DECamInfo', 'piff.des.DECamInfo', ([], {}), '()\n', (9683, 9685), False, 'import piff\n'), ((9903, 9953), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['chipnums', 'chipnums_ret'], {}), '(chipnums, chipnums_ret)\n', (9929, 9953), True, 'import numpy as np\n'), ((9958, 10000), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['xPos', 'xPos_ret'], {}), '(xPos, xPos_ret)\n', (9984, 10000), True, 'import numpy as np\n'), ((10005, 10047), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['yPos', 'yPos_ret'], {}), '(yPos, yPos_ret)\n', (10031, 10047), True, 'import numpy as np\n'), ((10052, 10094), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['icen', 'icen_ret'], {}), '(icen, icen_ret)\n', (10078, 10094), True, 'import numpy as np\n'), ((10099, 10141), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jcen', 'jcen_ret'], {}), '(jcen, jcen_ret)\n', (10125, 10141), True, 'import numpy as np\n'), ((1192, 1236), 'galsim.JacobianWCS', 'galsim.JacobianWCS', (['(0.26)', '(0.05)', '(-0.08)', '(-0.29)'], {}), '(0.26, 0.05, -0.08, -0.29)\n', (1210, 1236), False, 'import galsim\n'), ((1253, 1282), 'galsim.Image', 'galsim.Image', (['(64)', '(64)'], {'wcs': 'wcs'}), '(64, 64, wcs=wcs)\n', (1265, 1282), False, 'import galsim\n'), ((1353, 1415), 'piff.StarData', 'piff.StarData', (['image', 'image.true_center'], {'properties': 'properties'}), '(image, image.true_center, properties=properties)\n', (1366, 1415), False, 'import piff\n'), ((1517, 1537), 'piff.StarFit', 'piff.StarFit', (['params'], {}), '(params)\n', (1529, 1537), False, 'import piff\n'), ((1553, 1581), 'piff.Star', 'piff.Star', (['stardata', 'starfit'], {}), '(stardata, starfit)\n', (1562, 1581), False, 'import piff\n'), ((2564, 2639), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['star_predicted.data[attr]', 'star_predict.data[attr]'], {}), '(star_predicted.data[attr], star_predict.data[attr])\n', (2587, 2639), True, 'import numpy as np\n'), ((3058, 3133), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['star_predicted.data[attr]', 'star_predict.data[attr]'], {}), '(star_predicted.data[attr], star_predict.data[attr])\n', (3081, 3133), True, 'import numpy as np\n'), ((5020, 5069), 'numpy.std', 'np.std', (['[s.fit.params for s in psf.stars]'], {'axis': '(0)'}), '([s.fit.params for s in psf.stars], axis=0)\n', (5026, 5069), True, 'import numpy as np\n'), ((5492, 5533), 'fitsio.FITS', 'fitsio.FITS', (['knn_file', '"""rw"""'], {'clobber': '(True)'}), "(knn_file, 'rw', clobber=True)\n", (5503, 5533), False, 'import fitsio\n'), ((5581, 5610), 'piff.kNNInterp.read', 'piff.kNNInterp.read', (['f', '"""knn"""'], {}), "(f, 'knn')\n", (5600, 5610), False, 'import piff\n'), ((6171, 6206), 'piff.config.setup_logger', 'piff.config.setup_logger', ([], {'verbose': '(2)'}), '(verbose=2)\n', (6195, 6206), False, 'import piff\n'), ((6234, 6291), 'piff.config.setup_logger', 'piff.config.setup_logger', ([], {'log_file': '"""output/test_decamlog"""'}), "(log_file='output/test_decamlog')\n", (6258, 6291), False, 'import piff\n'), ((6673, 6717), 'galsim.JacobianWCS', 'galsim.JacobianWCS', (['(0.26)', '(0.05)', '(-0.08)', '(-0.29)'], {}), '(0.26, 0.05, -0.08, -0.29)\n', (6691, 6717), False, 'import galsim\n'), ((6734, 6763), 'galsim.Image', 'galsim.Image', (['(64)', '(64)'], {'wcs': 'wcs'}), '(64, 64, wcs=wcs)\n', (6746, 6763), False, 'import galsim\n'), ((6962, 7025), 'piff.StarData', 'piff.StarData', (['image', 'image_pos'], {'properties': "{'chipnum': ccdnum}"}), "(image, image_pos, properties={'chipnum': ccdnum})\n", (6975, 7025), False, 'import piff\n'), ((7042, 7067), 'piff.Star', 'piff.Star', (['stardata', 'None'], {}), '(stardata, None)\n', (7051, 7067), False, 'import piff\n'), ((8758, 8799), 'fitsio.FITS', 'fitsio.FITS', (['knn_file', '"""rw"""'], {'clobber': '(True)'}), "(knn_file, 'rw', clobber=True)\n", (8769, 8799), False, 'import fitsio\n'), ((8859, 8909), 'piff.des.DECamWavefront.read', 'piff.des.DECamWavefront.read', (['f', '"""decam_wavefront"""'], {}), "(f, 'decam_wavefront')\n", (8887, 8909), False, 'import piff\n'), ((5095, 5145), 'numpy.mean', 'np.mean', (['[s.fit.params for s in psf.stars]'], {'axis': '(0)'}), '([s.fit.params for s in psf.stars], axis=0)\n', (5102, 5145), True, 'import numpy as np\n'), ((7146, 7166), 'piff.des.DECamInfo', 'piff.des.DECamInfo', ([], {}), '()\n', (7164, 7166), False, 'import piff\n')] |
import numpy as np
post_a = None
post_b = None
bandit = None
total_reward = 0
def agent(observation, configuration):
global total_reward, bandit, post_a, post_b
n_bandits = configuration.banditCount
if observation.step == 0:
post_a = np.ones(n_bandits)
post_b = np.ones(n_bandits)
else:
r = observation.reward - total_reward
total_reward = observation.reward
# Update beta posterior
post_a[bandit] += r
post_b[bandit] += (1 - r)
samples = np.random.beta(post_a, post_b)
bandit = int(np.argmax(samples))
return bandit
| [
"numpy.argmax",
"numpy.random.beta",
"numpy.ones"
] | [((530, 560), 'numpy.random.beta', 'np.random.beta', (['post_a', 'post_b'], {}), '(post_a, post_b)\n', (544, 560), True, 'import numpy as np\n'), ((263, 281), 'numpy.ones', 'np.ones', (['n_bandits'], {}), '(n_bandits)\n', (270, 281), True, 'import numpy as np\n'), ((299, 317), 'numpy.ones', 'np.ones', (['n_bandits'], {}), '(n_bandits)\n', (306, 317), True, 'import numpy as np\n'), ((578, 596), 'numpy.argmax', 'np.argmax', (['samples'], {}), '(samples)\n', (587, 596), True, 'import numpy as np\n')] |
"""a module that houses TOV solvers in the "standard" formulation
"""
__author__ = "<NAME> (<EMAIL>)"
#-------------------------------------------------
import numpy as np
from scipy.integrate import odeint
from scipy.special import hyp2f1
from universality.utils.units import (G, c2, Msun)
#-------------------------------------------------
#DEFAULT_MAX_DR = 1e5 ### maximum step size allowed within the integrator (in standard units, which should be in cm)
DEFAULT_MAX_DR = 1e6
DEFAULT_MIN_DR = 1.0 ### the smallest step size we allow (in standard units, which should be cm)
DEFAULT_GUESS_FRAC = 0.1 ### how much of the way to the vanishing pressure we guess via Newton's method
DEFAULT_INITIAL_FRAC = 1e-3 ### the initial change in pressure we allow when setting the intial conditions
DEFAULT_RTOL = 1e-4
DEFAULT_MXSTEP = 10000
#------------------------
TWOPI = 2*np.pi
FOURPI = 2*TWOPI
Gc2 = G/c2
#-------------------------------------------------
### Standard formulation of the TOV equations
#-------------------------------------------------
### basic evolutionary equations
def dmdr(r, epsc2):
return FOURPI * r**2 * epsc2
def dmbdr(r, rho, m):
return dmdr(r, rho) * (1 - 2*Gc2*m/r)**-0.5
def dpc2dr(r, pc2, m, epsc2):
return - Gc2 * (epsc2 + pc2)*(m + FOURPI * r**3 * pc2)/(r * (r - 2*Gc2*m))
def detadr(r, pc2, m, eta, epsc2, cs2c2):
invf = (1. - 2.*Gc2*m/r)**-1
A = 2. * invf * (1. - 3.*Gc2*m/r - TWOPI*Gc2*r**2 * (epsc2 + 3.*pc2))
B = invf * (6. - FOURPI*Gc2*r**2 * (epsc2 + pc2)*(3. + 1./cs2c2))
return -1.*(eta*(eta - 1.) + A*eta - B)/r
def domegadr(r, pc2, m, omega, epsc2):
P = FOURPI * Gc2 * r**3 * (epsc2 + pc2)/ (r - 2.*Gc2*m)
return (P*(omega + 4.) - omega*(omega + 3.))/r
#-------------------------------------------------
# functions for values at the stellar surface
#-------------------------------------------------
def eta2lambda(r, m, eta): ### dimensionless tidal deformability
C = Gc2*m/r # compactness
fR = 1.-2.*C
F = hyp2f1(3., 5., 6., 2.*C) # a hypergeometric function
z = 2.*C
dFdz = (5./(2.*z**6.)) * (z*(z*(z*(3.*z*(5. + z) - 110.) + 150.) - 60.) / (z - 1.)**3 + 60.*np.log(1. - z))
RdFdr = -2.*C*dFdz # log derivative of hypergeometric function
k2el = 0.5*(eta - 2. - 4.*C/fR) / (RdFdr -F*(eta + 3. - 4.*C/fR)) # gravitoelectric quadrupole Love number
return (2./3.)*(k2el/C**5)
def omega2i(r, omega): ### moment of inertia
return (omega/(3. + omega)) * r**3/(2.*Gc2)
#-------------------------------------------------
# initial conditions
#-------------------------------------------------
def initial_pc2(pc2i, frac):
return (1. - frac)*pc2i ### assume a constant slope over a small change in the pressure
def initial_r(pc2i, ec2i, frac):
return (frac*pc2i / ( G * (ec2i + pc2i) * (ec2i/3. + pc2i) * TWOPI ) )**0.5 ### solve for the radius that corresponds to that small change
def initial_m(r, ec2i):
return FOURPI * r**3 * ec2i / 3. # gravitational mass
def initial_mb(r, rhoi):
return FOURPI * r**3 * rhoi / 3. # gravitational mass
def initial_eta(r, pc2i, ec2i, cs2c2i):
return 2. + FOURPI * Gc2 * r**2 * (9.*pc2i + 13.*ec2i + 3.*(pc2i+ec2i)/cs2c2i)/21. # intial perturbation for dimensionless tidal deformability
def initial_omega(r, pc2i, ec2i):
return 16.*np.pi * Gc2 * r**2 * (pc2i + ec2i)/5. # initial frame-dgragging function
#-------------------------------------------------
# central loop that solves the TOV equations given a set of coupled ODEs
#-------------------------------------------------
def engine(
r,
vec,
eos,
dvecdr_func,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
mxstp=DEFAULT_MXSTEP,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density)
"""
vec = np.array(vec, dtype=float)
while vec[0] > 0: ### continue until pressure vanishes
vec0 = vec[:] # store the current location as the old location
r0 = r
### estimate the radius at which this p will vanish via Newton's method
r = r0 + max(min_dr, min(max_dr, guess_frac * abs(vec[0]/dvecdr_func(vec, r, eos)[0])))
### integrate out until we hit that estimate
vec[:] = odeint(dvecdr_func, vec0, (r0, r), args=(eos,), rtol=rtol, hmax=max_dr, mxstep=mxstep)[-1,:] ### retain only the last point
### return to client, who will then interpolate to find the surface
### interpolate to find stellar surface
p = [vec0[0], vec[0]]
# radius
r = np.interp(0, p, [r0, r])
# the rest of the macro properties
vals = [np.interp(0, p, [vec0[i], vec[i]]) for i in range(1, len(vec))]
return r, vals
#-------------------------------------------------
### the solver that yields all known macroscopic quantites
MACRO_COLS = ['M', 'R', 'Lambda', 'I', 'Mb'] ### the column names for what we compute
def dvecdr(vec, r, eos):
pc2, m, eta, omega, mb = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = np.interp(pc2, eos[0], eos[2])
cs2c2 = np.interp(pc2, eos[0], eos[3])
return \
dpc2dr(r, pc2, m, epsc2), \
dmdr(r, epsc2), \
detadr(r, pc2, m, eta, epsc2, cs2c2), \
domegadr(r, pc2, m, omega, epsc2), \
dmbdr(r, rho, m)
def initial_condition(pc2i, eos, frac=DEFAULT_INITIAL_FRAC):
"""determines the initial conditions for a stellar model with central pressure pc
this is done by analytically integrating the TOV equations over very small radii to avoid the divergence as r->0
"""
ec2i = np.interp(pc2i, eos[0], eos[1])
rhoi = np.interp(pc2i, eos[0], eos[2])
cs2c2i = np.interp(pc2i, eos[0], eos[3])
pc2 = initial_pc2(pc2i, frac)
r = initial_r(pc2i, ec2i, frac)
m = initial_m(r, ec2i)
mb = initial_mb(r, rhoi)
eta = initial_eta(r, pc2i, ec2i, cs2c2i)
omega = initial_omega(r, pc2i, ec2i)
return r, (pc2, m, eta, omega, mb)
def integrate(
pc2i,
eos,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density, baryon_density, cs2c2)
"""
r, vec = initial_condition(pc2i, eos, frac=initial_frac)
if vec[0] < 0: ### guarantee that we enter the loop
raise RuntimeError('bad initial condition!')
r, (m, eta, omega, mb) = engine(
r,
vec,
eos,
dvecdr,
min_dr=min_dr,
max_dr=max_dr,
guess_frac=guess_frac,
rtol=rtol,
)
# compute tidal deformability
l = eta2lambda(r, m, eta)
# compute moment of inertia
i = omega2i(r, omega)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
mb /= Msun
r *= 1e-5 ### convert from cm to km
i /= 1e45 ### normalize this to a common value but still in CGS
return m, r, l, i, mb
#-------------------------------------------------
### light-weight solver that only includes M and R
MACRO_COLS_MR = ['M', 'R']
def dvecdr_MR(vec, r, eos):
'''returns d(p, m)/dr
expects: pressurec2, energy_densityc2 = eos
'''
pc2, m = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = np.interp(pc2, eos[0], eos[2])
return \
dpc2dr(r, pc2, m, epsc2), \
dmdr(r, epsc2)
def initial_condition_MR(pc2i, eos, frac=DEFAULT_INITIAL_FRAC):
"""determines the initial conditions for a stellar model with central pressure pc
this is done by analytically integrating the TOV equations over very small radii to avoid the divergence as r->0
"""
ec2i = np.interp(pc2i, eos[0], eos[1])
rhoi = np.interp(pc2i, eos[0], eos[2])
pc2 = initial_pc2(pc2i, frac)
r = initial_r(pc2i, ec2i, frac)
m = initial_m(r, ec2i)
return r, (pc2, m)
def integrate_MR(
pc2i,
eos,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density, baryon_density, cs2c2)
"""
r, vec = initial_condition_MR(pc2i, eos, frac=initial_frac)
if vec[0] < 0: ### guarantee that we enter the loop
raise RuntimeError('bad initial condition!')
r, (m,) = engine(
r,
vec,
eos,
dvecdr_MR,
min_dr=min_dr,
max_dr=max_dr,
guess_frac=guess_frac,
rtol=rtol,
)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
r *= 1e-5 ### convert from cm to km
return m, r
#-------------------------------------------------
### light-weight solver that only includes M, R, and Lambda
MACRO_COLS_MRLambda = ['M', 'R', 'Lambda']
def dvecdr_MRLambda(vec, r, eos):
'''returns d(p, m)/dr
expects: pressurec2, energy_densityc2 = eos
'''
pc2, m, eta = vec
epsc2 = np.interp(pc2, eos[0], eos[1])
rho = np.interp(pc2, eos[0], eos[2])
cs2c2 = np.interp(pc2, eos[0], eos[3])
return \
dpc2dr(r, pc2, m, epsc2), \
dmdr(r, epsc2), \
detadr(r, pc2, m, eta, epsc2, cs2c2)
def initial_condition_MRLambda(pc2i, eos, frac=DEFAULT_INITIAL_FRAC):
"""determines the initial conditions for a stellar model with central pressure pc
this is done by analytically integrating the TOV equations over very small radii to avoid the divergence as r->0
"""
ec2i = np.interp(pc2i, eos[0], eos[1])
rhoi = np.interp(pc2i, eos[0], eos[2])
cs2c2i = np.interp(pc2i, eos[0], eos[3])
pc2 = initial_pc2(pc2i, frac)
r = initial_r(pc2i, ec2i, frac)
m = initial_m(r, ec2i)
eta = initial_eta(r, pc2i, ec2i, cs2c2i)
return r, (pc2, m, eta)
def integrate_MRLambda(
pc2i,
eos,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density, baryon_density, cs2c2)
"""
r, vec = initial_condition_MRLambda(pc2i, eos, frac=initial_frac)
if vec[0] < 0: ### guarantee that we enter the loop
raise RuntimeError('bad initial condition!')
r, (m, eta) = engine(
r,
vec,
eos,
dvecdr_MRLambda,
min_dr=min_dr,
max_dr=max_dr,
guess_frac=guess_frac,
rtol=rtol,
)
# compute tidal deformability
l = eta2lambda(r, m, eta)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
r *= 1e-5 ### convert from cm to km
return m, r, l
| [
"scipy.integrate.odeint",
"numpy.log",
"numpy.array",
"numpy.interp",
"scipy.special.hyp2f1"
] | [((2017, 2047), 'scipy.special.hyp2f1', 'hyp2f1', (['(3.0)', '(5.0)', '(6.0)', '(2.0 * C)'], {}), '(3.0, 5.0, 6.0, 2.0 * C)\n', (2023, 2047), False, 'from scipy.special import hyp2f1\n'), ((4056, 4082), 'numpy.array', 'np.array', (['vec'], {'dtype': 'float'}), '(vec, dtype=float)\n', (4064, 4082), True, 'import numpy as np\n'), ((4766, 4790), 'numpy.interp', 'np.interp', (['(0)', 'p', '[r0, r]'], {}), '(0, p, [r0, r])\n', (4775, 4790), True, 'import numpy as np\n'), ((5195, 5225), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[1]'], {}), '(pc2, eos[0], eos[1])\n', (5204, 5225), True, 'import numpy as np\n'), ((5236, 5266), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[2]'], {}), '(pc2, eos[0], eos[2])\n', (5245, 5266), True, 'import numpy as np\n'), ((5279, 5309), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[3]'], {}), '(pc2, eos[0], eos[3])\n', (5288, 5309), True, 'import numpy as np\n'), ((5788, 5819), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[1]'], {}), '(pc2i, eos[0], eos[1])\n', (5797, 5819), True, 'import numpy as np\n'), ((5831, 5862), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[2]'], {}), '(pc2i, eos[0], eos[2])\n', (5840, 5862), True, 'import numpy as np\n'), ((5876, 5907), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[3]'], {}), '(pc2i, eos[0], eos[3])\n', (5885, 5907), True, 'import numpy as np\n'), ((7600, 7630), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[1]'], {}), '(pc2, eos[0], eos[1])\n', (7609, 7630), True, 'import numpy as np\n'), ((7641, 7671), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[2]'], {}), '(pc2, eos[0], eos[2])\n', (7650, 7671), True, 'import numpy as np\n'), ((8032, 8063), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[1]'], {}), '(pc2i, eos[0], eos[1])\n', (8041, 8063), True, 'import numpy as np\n'), ((8075, 8106), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[2]'], {}), '(pc2i, eos[0], eos[2])\n', (8084, 8106), True, 'import numpy as np\n'), ((9481, 9511), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[1]'], {}), '(pc2, eos[0], eos[1])\n', (9490, 9511), True, 'import numpy as np\n'), ((9522, 9552), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[2]'], {}), '(pc2, eos[0], eos[2])\n', (9531, 9552), True, 'import numpy as np\n'), ((9565, 9595), 'numpy.interp', 'np.interp', (['pc2', 'eos[0]', 'eos[3]'], {}), '(pc2, eos[0], eos[3])\n', (9574, 9595), True, 'import numpy as np\n'), ((10010, 10041), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[1]'], {}), '(pc2i, eos[0], eos[1])\n', (10019, 10041), True, 'import numpy as np\n'), ((10053, 10084), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[2]'], {}), '(pc2i, eos[0], eos[2])\n', (10062, 10084), True, 'import numpy as np\n'), ((10098, 10129), 'numpy.interp', 'np.interp', (['pc2i', 'eos[0]', 'eos[3]'], {}), '(pc2i, eos[0], eos[3])\n', (10107, 10129), True, 'import numpy as np\n'), ((4842, 4876), 'numpy.interp', 'np.interp', (['(0)', 'p', '[vec0[i], vec[i]]'], {}), '(0, p, [vec0[i], vec[i]])\n', (4851, 4876), True, 'import numpy as np\n'), ((4477, 4567), 'scipy.integrate.odeint', 'odeint', (['dvecdr_func', 'vec0', '(r0, r)'], {'args': '(eos,)', 'rtol': 'rtol', 'hmax': 'max_dr', 'mxstep': 'mxstep'}), '(dvecdr_func, vec0, (r0, r), args=(eos,), rtol=rtol, hmax=max_dr,\n mxstep=mxstep)\n', (4483, 4567), False, 'from scipy.integrate import odeint\n'), ((2180, 2195), 'numpy.log', 'np.log', (['(1.0 - z)'], {}), '(1.0 - z)\n', (2186, 2195), True, 'import numpy as np\n')] |
"Define basic subroutines useful for all AI players"
from ..board import black, white, empty, Board, InvalidMoveError
import numpy as np
import unittest
class Playerlibrary(object):
"""
A library class that holds basic subroutines that are useful for all
kinds of artificial-intelligence-type (AI-type) players, e.g. the
function ``win_if_possible`` that checks if the game can be won in
the next move.
All the functions are written to take the same arguments as
``Player.make_move`` such that the call from within ``make_move``
looks like e.g. ``self.win_if_possible(gui)``.
"""
def line_getter_functions(self, gui, length=5):
return [lambda x,y: gui.board.get_column(x,y,length=length), lambda x,y: gui.board.get_row(x,y, length=length),
lambda x,y: gui.board.get_diagonal_upleft_to_lowright(x,y, length=length),
lambda x,y: gui.board.get_diagonal_lowleft_to_upright(x,y, length=length)]
def random_move(self, gui):
moves_left = gui.board.moves_left
while moves_left == gui.board.moves_left:
x = np.random.randint(gui.board.width)
y = np.random.randint(gui.board.height)
try:
gui.board[y,x] = self.color
except InvalidMoveError:
continue
def extend_one(self, gui):
"Place a stone next to another one but only if extendable to five."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# search pattern: one of own color and four empty
if len(np.where(line == empty)[0]) == 4 and len(np.where(line == self.color)[0]) == 1:
index_own_color = np.where(line == self.color)[0][0]
if index_own_color == 0:
gui.board[positions[1]] = self.color
return True
else:
gui.board[positions[index_own_color - 1]] = self.color
return True
return False
def block_open_four(self, gui):
"Block a line of four stones if at least one end open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search four of opponent's color and one empty
if len(np.where(line == empty)[0]) == 1 and len(np.where(line == -self.color)[0]) == 4:
index_of_empty = np.where(line == empty)[0][0]
gui.board[positions[index_of_empty]] = self.color
return True
return False
def block_doubly_open_two(self, gui):
"Block a line of two if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# select pattern [<all empty>, <opponent's color>, <opponent's color>, <all empty>]
if ( line == (empty, -self.color, -self.color, empty, empty) ).all():
gui.board[positions[3]] = self.color
return True
elif ( line == (empty, empty, -self.color, -self.color, empty) ).all():
gui.board[positions[1]] = self.color
return True
return False
def block_twice_to_three_or_more(self, gui):
'Prevent opponent from closing two lines of three or more simultaneously.'
line_getter_functions = self.line_getter_functions(gui)
line_positions = []
getter_functions = []
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in line_getter_functions:
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of opponent's color and three empty in two crossing lines at an empty position
opponent_stones_in_line = len(np.where(line == -self.color)[0])
if opponent_stones_in_line >= 2 and len(np.where(line == empty)[0]) == 5 - opponent_stones_in_line:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def block_open_three(self, gui):
"Block a line of three."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of opponent's color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == -self.color)[0]) == 3:
indices_opponent = np.where(line == -self.color)[0]
if not (indices_opponent[1] == indices_opponent[0] + 1 and \
indices_opponent[2] == indices_opponent[1] + 1):
continue
if 0 not in indices_opponent:
gui.board[positions[indices_opponent[0] - 1]] = self.color
return True
else:
gui.board[positions[3]] = self.color
return True
return False
def block_open_two(self, gui):
"Block a line of two."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [<all empty or bpundary>, opponent, opponent, <all empty or boundary>]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 2:
indices_opponent = np.where(line == -self.color)[0]
if indices_opponent[1] == indices_opponent[0] + 1:
if indices_opponent[0] == 0:
gui.board[positions[3]] = self.color
return True
else:
gui.board[positions[indices_opponent[0]-1]] = self.color
return True
return False
def block_doubly_open_three(self, gui):
"Block a line of three but only if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
if ( line == (empty, -self.color, -self.color, -self.color, empty) ).all():
gui.board[positions[0]] = self.color
return True
return False
def extend_three_to_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of own color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if 0 not in indices_empty:
gui.board[positions[indices_empty[0]]] = self.color
return True
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def block_to_doubly_open_four(self, gui):
"""
Prevent the opponent from getting a line of four with both ends
open.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times opponent>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_three_to_doubly_open_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five ON BOTH SIDES.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times own>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_two_to_three(self, gui):
"""
Extend a line of two stones to a line of three stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search two of own color and three empty
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
indices_empty = np.where(line == empty)[0]
gui.board[positions[indices_empty[np.random.randint(3)]]] = self.color
return True
return False
def extend_twice_two_to_three(self, gui):
"""
Extend two crossing lines of two stones to two lines of three
stones but only if there is enough space to be completed to five.
"""
line_positions = []
getter_functions = []
for f in self.line_getter_functions(gui):
for i in range(gui.board.height):
for j in range(gui.board.width):
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of own color and three empty in two crossing lines at an empty position
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def check_if_immediate_win_possible(self, gui):
"""
Check if it is possible to place a stone such thath the player wins
immediately.
Return the position to place the stone if possible, otherwise return None.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection:
# - can only place stones where field is ``empty``
# - line must sum to "+" or "-" 4 (4 times black=+1 or white=-1 and once empty=0)
# place stone if that leads to winning the game
if empty in line and line.sum() == self.color * 4:
for pos in positions:
if gui.board[pos] == empty:
return pos
raise RuntimeError("Check the implementation of ``check_if_immediate_win_possible``.")
# control reaches this point only if no winning move is found => return None
def win_if_possible(self, gui):
"""
Place a stone where the player wins immediately if possible.
Return ``True`` if a stone has been placed, otherwise return False.
"""
pos = self.check_if_immediate_win_possible(gui)
if pos is None:
return False
else:
gui.board[pos] = self.color
return True
class PlayerTest(unittest.TestCase):
"""
Library class for testing AI players.
Usage:
Create a subclass and set the member variable ``Player`` to the
AI you want to test:
>>> class MyTest(PlayerTest):
... Player = <Your AI>
"""
Player = None
@classmethod
def build_board(self, board_array):
"""
Build up a valid ``GameBoard`` holding the desired ``board_array``.
.. note::
You probably rather need `.build_gui`
:param board_array:
2D-array; e.g. [[white, empty],
[black, black]]
"""
board_array = np.asarray(board_array, dtype=int)
assert len(board_array.shape) == 2
height = board_array.shape[0]
width = board_array.shape[1]
board = Board(width=width, height=height)
white_indices = []
black_indices = []
# find positions that are not empty
for i in range(height):
for j in range(width):
value = board_array[i,j]
if value == empty:
continue
elif value == white:
white_indices.append((i,j))
elif value == black:
black_indices.append((i,j))
else:
raise AssertionError("Invalid ``board_array``")
# in a valid board, there are equally many black and white stones or
# one more white that black stone since white begins
assert len(white_indices) == len(black_indices) or len(white_indices) == len(black_indices) + 1
while black_indices:
board[white_indices.pop()] = white
board[black_indices.pop()] = black
assert board.winner()[0] is None
# if there is one more white stone
if white_indices:
board[white_indices.pop()] = white
return board
@classmethod
def build_gui(self, board_array):
"""
Build up a valid ``GameBoard`` packed in a ``BoardGui`` holding
the desired ``board_array``. The returned instance of ``BoardGui``
is ready to use in ``Player.make_move()``.
:param board_array:
2D-array; e.g. [[white, empty],
[black, black]]
"""
from ..gui import BoardGui, tk
board = self.build_board(board_array)
gui = BoardGui(board, tk.Tk())
gui.in_game = True
return gui
def base_test(self):
width = 20
height = 10
board = Board(height, width)
from ..gui import BoardGui, tk
board_gui = BoardGui(board, tk.Tk())
board_gui.in_game = True
if self.Player is not None:
white_player = self.Player(white)
black_player = self.Player(black)
while board_gui.board.winner()[0] is None and not board_gui.board.full():
white_player.make_move(board_gui)
black_player.make_move(board_gui)
| [
"numpy.where",
"numpy.random.randint",
"numpy.asarray"
] | [((15795, 15829), 'numpy.asarray', 'np.asarray', (['board_array'], {'dtype': 'int'}), '(board_array, dtype=int)\n', (15805, 15829), True, 'import numpy as np\n'), ((1115, 1149), 'numpy.random.randint', 'np.random.randint', (['gui.board.width'], {}), '(gui.board.width)\n', (1132, 1149), True, 'import numpy as np\n'), ((1166, 1201), 'numpy.random.randint', 'np.random.randint', (['gui.board.height'], {}), '(gui.board.height)\n', (1183, 1201), True, 'import numpy as np\n'), ((4674, 4703), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (4682, 4703), True, 'import numpy as np\n'), ((5900, 5929), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (5908, 5929), True, 'import numpy as np\n'), ((7109, 7138), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (7117, 7138), True, 'import numpy as np\n'), ((8899, 8922), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (8907, 8922), True, 'import numpy as np\n'), ((9972, 9995), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (9980, 9995), True, 'import numpy as np\n'), ((11043, 11066), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (11051, 11066), True, 'import numpy as np\n'), ((12057, 12080), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (12065, 12080), True, 'import numpy as np\n'), ((1939, 1967), 'numpy.where', 'np.where', (['(line == self.color)'], {}), '(line == self.color)\n', (1947, 1967), True, 'import numpy as np\n'), ((2923, 2946), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (2931, 2946), True, 'import numpy as np\n'), ((1817, 1840), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (1825, 1840), True, 'import numpy as np\n'), ((1858, 1886), 'numpy.where', 'np.where', (['(line == self.color)'], {}), '(line == self.color)\n', (1866, 1886), True, 'import numpy as np\n'), ((2801, 2824), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (2809, 2824), True, 'import numpy as np\n'), ((2842, 2871), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (2850, 2871), True, 'import numpy as np\n'), ((4768, 4791), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (4776, 4791), True, 'import numpy as np\n'), ((5776, 5799), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (5784, 5799), True, 'import numpy as np\n'), ((5817, 5846), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (5825, 5846), True, 'import numpy as np\n'), ((6985, 7008), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (6993, 7008), True, 'import numpy as np\n'), ((7026, 7055), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (7034, 7055), True, 'import numpy as np\n'), ((8779, 8802), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (8787, 8802), True, 'import numpy as np\n'), ((8820, 8848), 'numpy.where', 'np.where', (['(line == self.color)'], {}), '(line == self.color)\n', (8828, 8848), True, 'import numpy as np\n'), ((9851, 9874), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (9859, 9874), True, 'import numpy as np\n'), ((9892, 9921), 'numpy.where', 'np.where', (['(line == -self.color)'], {}), '(line == -self.color)\n', (9900, 9921), True, 'import numpy as np\n'), ((10923, 10946), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (10931, 10946), True, 'import numpy as np\n'), ((10964, 10992), 'numpy.where', 'np.where', (['(line == self.color)'], {}), '(line == self.color)\n', (10972, 10992), True, 'import numpy as np\n'), ((11937, 11960), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (11945, 11960), True, 'import numpy as np\n'), ((11978, 12006), 'numpy.where', 'np.where', (['(line == self.color)'], {}), '(line == self.color)\n', (11986, 12006), True, 'import numpy as np\n'), ((12142, 12162), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (12159, 12162), True, 'import numpy as np\n'), ((12933, 12956), 'numpy.where', 'np.where', (['(line == empty)'], {}), '(line == empty)\n', (12941, 12956), True, 'import numpy as np\n'), ((12974, 13002), 'numpy.where', 'np.where', (['(line == self.color)'], {}), '(line == self.color)\n', (12982, 13002), True, 'import numpy as np\n')] |
from game import Game
from docplex.mp.model import Model
from docplex.mp.solution import SolveSolution
import numpy as np
import time
class ApproxMILP:
def __init__(self, game, L):
self.initStart = time.time()
self.game = game
self.nodes = game.nodes
self.n = game.n
self.K = game.K
self.Kd = game.Kd
self.Kc = game.Kc
self.model = Model(name='approxMILP')
self.W = np.sum(np.abs(game.CfeatureWeights)) + np.sum(np.abs(game.DfeatureWeights))
self.L = L
self.scores = None
self.prep()
self.generateMILP()
self.initTime = time.time() - self.initStart
self.solveTime = 0
self.obj = 0
def prep(self):
self.grid = np.linspace(-2*self.W, 0, self.L+1)
self.eps = self.grid[1] - self.grid[0]
self.expValue = np.exp(self.grid)
self.gamma = (self.expValue[1:] - self.expValue[:-1]) / (self.grid[1:] - self.grid[:-1])
self.gamma = np.flip(self.gamma)
self.bound = self.eps * self.eps * 2.0
def generateMILP(self):
us = [node.u for node in self.nodes]
M = 1/(np.exp(-2*self.W) * sum(us))
self.y = self.model.binary_var_matrix(self.n, self.L, name="y")
self.d = self.model.binary_var_matrix(self.n, self.Kd, name="d")
self.v = self.model.continuous_var(name="v")
self.q = self.model.continuous_var_matrix(self.n, self.Kc, name="q")
self.s = self.model.continuous_var_matrix(self.n, self.L, name="s")
self.h = self.model.continuous_var_matrix(self.n, self.Kc, name="h")
self.g = self.model.continuous_var_matrix(self.n, self.L, name="g")
self.b = self.model.continuous_var_matrix(self.n, self.Kd, name="b")
self.t = self.model.continuous_var_dict(self.n, name="t")
for i in range(self.n):
for l in range(self.L):
self.model.add_constraint(self.g[(i,l)] <= self.v)
self.model.add_constraint(self.g[(i,l)] <= M * self.y[(i,l)])
self.model.add_constraint(self.g[(i,l)] >= self.v - M * (1 - self.y[(i,l)]))
self.sumDisCost = self.model.linear_expr()
for i in range(self.n):
for k in range(self.Kd):
self.model.add_constraint(self.b[(i,k)] <= self.v)
self.model.add_constraint(self.b[(i,k)] <= M * self.d[(i,k)])
self.model.add_constraint(self.b[(i,k)] >= self.v - M * (1 - self.d[(i,k)]))
self.sumDisCost = self.sumDisCost + self.nodes[i].Dfeatures[k].cost * self.b[(i,k)]
self.sumConCost = self.model.linear_expr()
for i in range(self.n):
expry = self.model.linear_expr()
for k in range(self.Kc):
expr = self.model.linear_expr(self.h[(i,k)] - self.q[(i,k)] + self.nodes[i].Cfeatures[k].xhat * self.v)
self.model.add_constraint(expr >= 0)
expr = self.model.linear_expr(self.h[(i,k)] + self.q[(i,k)] - self.nodes[i].Cfeatures[k].xhat * self.v)
self.model.add_constraint(expr >= 0)
expr = self.model.linear_expr(self.q[(i,k)] - max(self.nodes[i].Cfeatures[k].xhat -
self.nodes[i].Cfeatures[k].range, 0) * self.v)
self.model.add_constraint(expr >= 0)
expr = self.model.linear_expr(self.q[(i,k)] - min(self.nodes[i].Cfeatures[k].xhat +
self.nodes[i].Cfeatures[k].range, 1) * self.v)
self.model.add_constraint(expr <= 0)
self.sumConCost = self.sumConCost + self.nodes[i].Cfeatures[k].cost * self.h[(i,k)]
for i in range(self.n):
vepss = [ self.model.linear_expr(self.v * self.eps - self.s[(i, l)]) for l in range(self.L)]
self.model.add_constraint( self.model.linear_expr(
self.t[i] - np.exp(-2*self.W) * self.v - self.model.scal_prod(vepss, self.gamma)) == 0)
wqC = [self.model.linear_expr(self.game.CfeatureWeights[k] * self.q[(i, k)]) for k in range(self.Kc)]
wbD = [self.model.linear_expr(self.game.DfeatureWeights[k] * self.b[(i, k)]) for k in range(self.Kd)]
ss = [self.s[(i, l)] for l in range(self.L)]
self.model.add_constraint( self.model.linear_expr(
self.model.sum(ss) + self.model.sum(wqC) + self.model.sum(wbD) - self.W * self.v) == 0)
for l in range(self.L):
expr = self.model.linear_expr(self.eps * self.g[(i,l)] - self.s[(i,l)])
self.model.add_constraint(expr <= 0)
if l != self.L-1:
expr = self.model.linear_expr(self.eps * self.g[(i,l)] - self.s[(i,l+1)])
self.model.add_constraint(expr >= 0)
expr = self.model.linear_expr(self.s[(i,l)] - self.eps * self.v)
self.model.add_constraint(expr <= 0)
group = np.arange(self.Kd).reshape((2,-1))
for i in range(self.n):
for j in range(group.shape[0]):
sumxd = self.model.linear_expr()
for k in group[j,:]:
sumxd = sumxd + self.d[(i,k)]
self.model.add_constraint(sumxd == 1)
self.model.add_constraint( self.sumDisCost + self.sumConCost <= self.game.budget * self.v)
self.model.add_constraint( self.model.scal_prod(list(self.t.values()), us) == 1)
self.model.maximize(self.model.sum(self.t))
def solve(self):
self.solveStart = time.time()
self.model.parameters.mip.tolerances.mipgap = 1e-9
self.model.set_time_limit(100)
solution = self.model.solve()
self.model.export_as_lp("discMILP")
if not solution:
self.feasible = False
self.solveTime = time.time() - self.solveStart
self.obj = None
else:
self.feasible = True
solution.export("Sol")
self.extractSolution(solution)
self.solveTime = time.time() - self.solveStart
print("OBJ value Compute = ", self.obj)
def extractSolution(self, solution):
self.optVal = solution.get_objective_value()
qsol = solution.get_value_dict(self.q)
tsol = solution.get_value_dict(self.t)
vsol = solution.get_value(self.v)
ssol = solution.get_value_dict(self.s)
bsol = solution.get_value_dict(self.b)
gsol = solution.get_value_dict(self.g)
yysol = solution.get_value_dict(self.y)
self.xsol = {(i, k): qsol[(i,k)] / vsol for i in range(self.n) for k in range(self.Kc)}
self.dsol = {(i, k): bsol[(i,k)] / vsol for i in range(self.n) for k in range(self.Kd)}
self.zsol = {(i, l): ssol[(i,l)] / vsol for i in range(self.n) for l in range(self.L)}
self.ysol = {(i, l): gsol[(i,l)] / vsol for i in range(self.n) for l in range(self.L)}
ewx = 0
ewxu = 0
feasible = True
Dcost = 0
Ccost = 0
epsilon = 0.0001
for i in range(self.n):
wx = 0
for k in range(self.Kc):
wx = wx + self.game.CfeatureWeights[k] * self.xsol[(i, k)]
Ccost += abs(self.xsol[(i, k)] - self.nodes[i].Cfeatures[k].xhat) * self.nodes[i].Cfeatures[k].cost
if abs(self.xsol[(i, k)] - self.nodes[i].Cfeatures[k].xhat) > self.nodes[i].Cfeatures[k].range + epsilon:
feasible = False
print("Infisible x out of bound, ", i, k, abs(self.xsol[(i, k)] - self.nodes[i].Cfeatures[k].xhat), self.nodes[i].Cfeatures[k].range)
for k in range(self.Kd):
wx = wx + self.game.DfeatureWeights[k] * self.dsol[(i, k)]
Dcost += self.dsol[(i, k)] * self.nodes[i].Dfeatures[k].cost
ewx += np.exp(wx)
ewxu += np.exp(wx) * self.nodes[i].u
self.obj = ewxu / ewx
if Dcost + Ccost > self.game.budget + epsilon:
print("Infisible cost, discrete Cost = ", Dcost, ", continuous cost = ", Ccost, self.game.budget)
z = np.zeros(self.n)
f = np.zeros(self.n)
for i in range(self.n):
z[i] = 0
for l in range(self.L):
z[i] -= self.zsol[(i,l)]
f[i] += self.gamma[l] * (self.eps - self.zsol[(i,l)])
f[i] += np.exp(-2*self.W)
def getScores(self):
return self.xsol
| [
"docplex.mp.model.Model",
"numpy.flip",
"numpy.abs",
"numpy.exp",
"numpy.linspace",
"numpy.zeros",
"time.time",
"numpy.arange"
] | [((210, 221), 'time.time', 'time.time', ([], {}), '()\n', (219, 221), False, 'import time\n'), ((400, 424), 'docplex.mp.model.Model', 'Model', ([], {'name': '"""approxMILP"""'}), "(name='approxMILP')\n", (405, 424), False, 'from docplex.mp.model import Model\n'), ((754, 793), 'numpy.linspace', 'np.linspace', (['(-2 * self.W)', '(0)', '(self.L + 1)'], {}), '(-2 * self.W, 0, self.L + 1)\n', (765, 793), True, 'import numpy as np\n'), ((861, 878), 'numpy.exp', 'np.exp', (['self.grid'], {}), '(self.grid)\n', (867, 878), True, 'import numpy as np\n'), ((997, 1016), 'numpy.flip', 'np.flip', (['self.gamma'], {}), '(self.gamma)\n', (1004, 1016), True, 'import numpy as np\n'), ((5507, 5518), 'time.time', 'time.time', ([], {}), '()\n', (5516, 5518), False, 'import time\n'), ((8073, 8089), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (8081, 8089), True, 'import numpy as np\n'), ((8102, 8118), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (8110, 8118), True, 'import numpy as np\n'), ((636, 647), 'time.time', 'time.time', ([], {}), '()\n', (645, 647), False, 'import time\n'), ((7805, 7815), 'numpy.exp', 'np.exp', (['wx'], {}), '(wx)\n', (7811, 7815), True, 'import numpy as np\n'), ((8339, 8358), 'numpy.exp', 'np.exp', (['(-2 * self.W)'], {}), '(-2 * self.W)\n', (8345, 8358), True, 'import numpy as np\n'), ((449, 477), 'numpy.abs', 'np.abs', (['game.CfeatureWeights'], {}), '(game.CfeatureWeights)\n', (455, 477), True, 'import numpy as np\n'), ((488, 516), 'numpy.abs', 'np.abs', (['game.DfeatureWeights'], {}), '(game.DfeatureWeights)\n', (494, 516), True, 'import numpy as np\n'), ((1154, 1173), 'numpy.exp', 'np.exp', (['(-2 * self.W)'], {}), '(-2 * self.W)\n', (1160, 1173), True, 'import numpy as np\n'), ((4911, 4929), 'numpy.arange', 'np.arange', (['self.Kd'], {}), '(self.Kd)\n', (4920, 4929), True, 'import numpy as np\n'), ((5787, 5798), 'time.time', 'time.time', ([], {}), '()\n', (5796, 5798), False, 'import time\n'), ((5999, 6010), 'time.time', 'time.time', ([], {}), '()\n', (6008, 6010), False, 'import time\n'), ((7836, 7846), 'numpy.exp', 'np.exp', (['wx'], {}), '(wx)\n', (7842, 7846), True, 'import numpy as np\n'), ((3870, 3889), 'numpy.exp', 'np.exp', (['(-2 * self.W)'], {}), '(-2 * self.W)\n', (3876, 3889), True, 'import numpy as np\n')] |
"""
This module contains the implementation of the Classes: ModelGenerationMushroomOnline, ModelGenerationMushroomOnlineDQN,
ModelGenerationMushroomOnlineAC, ModelGenerationMushroomOnlinePPO, ModelGenerationMushroomOnlineSAC,
ModelGenerationMushroomOnlineDDPG and ModelGenerationMushroomOnlineGPOMDP.
The Class ModelGenerationMushroomOnline inherits from the Class ModelGeneration.
The Classes ModelGenerationMushroomOnlineDQQ, ModelGenerationMushroomOnlineAC and ModelGenerationMushroomOnlineGPOMDP inherit
from the Class ModelGenerationMushroomOnline.
The Classes ModelGenerationMushroomOnlinePPO, ModelGenerationMushroomOnlineSAC and ModelGenerationMushroomOnlineDDPG inherit
from the Class ModelGenerationMushroomOnlineAC.
"""
import copy
import numpy as np
from abc import abstractmethod
import matplotlib.pyplot as plt
from mushroom_rl.utils.spaces import Discrete
from mushroom_rl.policy import EpsGreedy, GaussianTorchPolicy, BoltzmannTorchPolicy, StateStdGaussianPolicy
from mushroom_rl.policy import OrnsteinUhlenbeckPolicy
from mushroom_rl.utils.parameters import LinearParameter
from mushroom_rl.algorithms.value.dqn import DQN
from mushroom_rl.algorithms.actor_critic.deep_actor_critic import PPO, SAC, DDPG
from mushroom_rl.algorithms.policy_search import GPOMDP
from mushroom_rl.utils.replay_memory import ReplayMemory
from mushroom_rl.approximators.parametric import TorchApproximator
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.approximators.regressor import Regressor
from mushroom_rl.utils.optimizers import AdaptiveOptimizer
from mushroom_rl.core import Core
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from ARLO.block.block_output import BlockOutput
from ARLO.block.model_generation import ModelGeneration
from ARLO.hyperparameter.hyperparameter import Real, Integer, Categorical
class ModelGenerationMushroomOnline(ModelGeneration):
"""
This Class is used to contain all the common methods for the online model generation algorithms that are implemented in
MushroomRL.
"""
def __repr__(self):
return str(self.__class__.__name__)+'('+'eval_metric='+str(self.eval_metric)+', obj_name='+str(self.obj_name)\
+', seeder='+ str(self.seeder)+', local_prng='+ str(self.local_prng)+', model='+str(self.model)\
+', algo_params='+str(self.algo_params)+', log_mode='+str(self.log_mode)\
+', checkpoint_log_path='+str(self.checkpoint_log_path)+', verbosity='+str(self.verbosity)\
+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)\
+', deterministic_output_policy='+str(self.deterministic_output_policy)\
+', works_on_online_rl='+str(self.works_on_online_rl)+', works_on_offline_rl='+str(self.works_on_offline_rl)\
+', works_on_box_action_space='+str(self.works_on_box_action_space)\
+', works_on_discrete_action_space='+str(self.works_on_discrete_action_space)\
+', works_on_box_observation_space='+str(self.works_on_box_observation_space)\
+', works_on_discrete_observation_space='+str(self.works_on_discrete_observation_space)\
+', pipeline_type='+str(self.pipeline_type)+', is_learn_successful='+str(self.is_learn_successful)\
+', is_parametrised='+str(self.is_parametrised)+', block_eval='+str(self.block_eval)\
+', algo_params_upon_instantiation='+str(self.algo_params_upon_instantiation)\
+', logger='+str(self.logger)+', fully_instantiated='+str(self.fully_instantiated)\
+', info_MDP='+str(self.info_MDP)+')'
def learn(self, train_data=None, env=None):
"""
Parameters
----------
train_data: This can be a dataset that will be used for training. It must be an object of a Class inheriting from Class
BaseDataSet.
The default is None.
env: This must be a simulator/environment. It must be an object of a Class inheriting from Class BaseEnvironment.
The default is None.
Returns
-------
res: This is an object of Class BlockOutput containing the learnt policy. If something went wrong in the execution of the
method the object of Class BlockOutput is empty.
This method alternates between learning the RL algorithm and evaluating it.
"""
#resets is_learn_successful to False, checks pipeline_type, checks the types of train_data and env, and makes sure that
#they are not both None and selects the right inputs:
starting_train_data_and_env = super().learn(train_data=train_data, env=env)
#if super().learn() returned something that is of Class BlockOutput it means that up in the chain there was an error and
#i need to return here the empty object of Class BlockOutput
if(isinstance(starting_train_data_and_env, BlockOutput)):
return BlockOutput(obj_name=self.obj_name)
#since this is an online block we only have an environment, which is the second element of the list
#starting_train_data_and_env
starting_env = starting_train_data_and_env[1]
#if i have a method called _default_network() it means I am using a PyTorch network. This is ok: MushroomRL does not allow
#the use of other deep learning frameworks so there are not going to be issues:
if(hasattr(self, '_default_network')):
#sets torch number of threads
torch.set_num_threads(self.n_jobs)
#create core object with starting_env:
self._create_core(env=starting_env)
self.dict_of_evals = {}
#if the algorithm has a replay buffer i fill it randomly:
if('initial_replay_size' in list(self.algo_params.keys())):
#fill replay memory with random dataset
self.core.learn(n_steps=self.algo_params['initial_replay_size'].current_actual_value,
n_steps_per_fit=self.algo_params['initial_replay_size'].current_actual_value,
quiet=True)
#evaluation step:
res = BlockOutput(obj_name=str(self.obj_name)+'_result', log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity,
policy=self.construct_policy(policy=self.algo_object.policy, regressor_type=self.regressor_type))
if(self.deterministic_output_policy):
#If this method is called then in the metric DiscountedReward you can use batch_eval
res.make_policy_deterministic()
starting_eval = self.eval_metric.evaluate(block_res=res, env=starting_env)
#update dict_of_evals:
self.update_dict_of_evals(current_epoch=0, single_episodes_eval=self.eval_metric.single_episode_evaluations,
env=starting_env)
self.logger.info(msg='Starting evaluation: '+str(starting_eval))
for n_epoch in range(self.algo_params['n_epochs'].current_actual_value):
self.logger.info(msg='Epoch: '+str(n_epoch))
#learning step:
self.core.learn(n_steps=self.algo_params['n_steps'].current_actual_value,
n_steps_per_fit=self.algo_params['n_steps_per_fit'].current_actual_value,
n_episodes=self.algo_params['n_episodes'].current_actual_value,
n_episodes_per_fit=self.algo_params['n_episodes_per_fit'].current_actual_value,
quiet=True)
#evaluation step:
res = BlockOutput(obj_name=str(self.obj_name)+'_result', log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity,
policy=self.construct_policy(policy=self.algo_object.policy, regressor_type=self.regressor_type))
if(self.deterministic_output_policy):
#If this method is called then in the metric DiscountedReward you can use batch_eval
res.make_policy_deterministic()
tmp_eval = self.eval_metric.evaluate(block_res=res, env=starting_env)
self.logger.info(msg='Current evaluation: '+str(tmp_eval))
#update dict_of_evals
self.update_dict_of_evals(current_epoch=n_epoch+1, single_episodes_eval=self.eval_metric.single_episode_evaluations,
env=starting_env)
self.is_learn_successful = True
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object learnt successfully!')
return res
def plot_dict_of_evals(self):
"""
This method plots and saves the dict_of_evals of the block.
"""
x = np.array(list(self.dict_of_evals.keys()))
if(len(x) == 0):
exc_msg = 'The \'dict_of_evals\' is empty!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
evals_values = list(self.dict_of_evals.values())
y = np.array([np.mean(evals_values[i]) for i in range(len(evals_values))])
std_dev = np.array([np.std(evals_values[i]) for i in range(len(evals_values))])
plt.figure()
plt.xlabel('Environment Steps')
plt.ylabel('Average Discounted Reward')
plt.title('Average Discounted Reward and Standard Deviation for '+str(self.obj_name))
plt.grid(True)
plt.plot(x, y, color='#FF9860')
if(len(evals_values[0]) > 1):
plt.fill_between(x, y-std_dev, y+std_dev, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9860')
plt.show()
def update_dict_of_evals(self, current_epoch, single_episodes_eval, env):
"""
Parameters
----------
current_epoch: This is a non-negative integer and it represents the current epoch.
single_episodes_eval: This is a list of floats containing the evaluation of the agent over the single episodes, for as
many episodes as specified by the eval_metric.
env: This is the environment in which we are acting. It must be an object of a Class inheriting from the Class
BaseEnvironmnet.
This method updates the dict_of_evals.
"""
number_of_steps = self.algo_params['n_steps'].current_actual_value
if(number_of_steps is None):
number_of_steps = env.horizon*self.algo_params['n_episodes'].current_actual_value
new_dict = {current_epoch*number_of_steps: single_episodes_eval}
if(len(list(self.dict_of_evals.keys())) == 0):
self.dict_of_evals = new_dict
else:
self.dict_of_evals = {**self.dict_of_evals, **new_dict}
def _create_core(self, env):
"""
Parameters
---------
env: This is the environment in which we are acting. It must be an object of a Class inheriting from the Class
BaseEnvironmnet.
This method updates the value of the core member by creating an object of Class mushroom_rl.core.Core.
"""
self.core = Core(agent=self.algo_object, mdp=env)
def analyse(self):
"""
This method is not yet implemented.
"""
raise NotImplementedError
def save(self):
"""
This method saves to a pickle file the object. Before saving it the core and the algo_object are cleared since these two
can weigh quite a bit.
"""
#clean up the core and algo_object: these two, in algorithms that have ReplayMemory, are going to make the output file,
#created when calling the method save, be very heavy.
#I need to clean these in a deep copy: otherwise erasing algo_object I cannot call twice in a row the learn method
#because the algo_object is set in the method set_params
copy_to_save = copy.deepcopy(self)
copy_to_save.core = None
copy_to_save.algo_object = None
#calls method save() implemented in base Class ModelGeneration of the instance copy_to_save
super(ModelGenerationMushroomOnline, copy_to_save).save()
class ModelGenerationMushroomOnlineDQN(ModelGenerationMushroomOnline):
"""
This Class implements a specific online model generation algorithm: DQN. This Class wraps the DQN method implemented in
MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/value/dqn/dqn.py
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
def __init__(self, eval_metric, obj_name, regressor_type='q_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'epsilon': LinearParameter(value=1, threshold_value=0.01, n=1000000)
'policy': EpsGreedy(epsilon=LinearParameter(value=1, threshold_value=0.01, n=1000000)),
'approximator': TorchApproximator,
'network': one hidden layer, 16 neurons,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': self.info_MDP.action_space.n,
'output_shape': (self.info_MDP.action_space.n,),
'optimizer': Adam,
'lr': 0.0001,
'critic_loss': smooth_l1_loss,
'batch_size': 32,
'target_update_frequency': 250,
'replay_memory': ReplayMemory,
'initial_replay_size': 50000,
'max_replay_size': 1000000,
'clip_reward': False,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50,
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'q_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to DQN, which is the Class of MushroomRL implementing DQN.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = False
self.works_on_discrete_action_space = True
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = DQN
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default Network with 1 hidden layer and ReLU activation functions.
Returns
-------
Network: the Class wrapper representing the default network.
"""
class Network(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, action=None):
h = F.relu(self.hl0(state.float()))
h = F.relu(self.hl1(h))
q = self.hl2(h)
if action is None:
return q
else:
q_acted = torch.squeeze(q.gather(1, action.long()))
return q_acted
return Network
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
approximator = Categorical(hp_name='approximator', obj_name='approximator_'+str(self.model.__name__),
current_actual_value=TorchApproximator)
network = Categorical(hp_name='network', obj_name='network_'+str(self.model.__name__),
current_actual_value=self._default_network())
optimizer_class = Categorical(hp_name='class', obj_name='optimizer_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
lr = Real(hp_name='lr', obj_name='optimizer_lr_'+str(self.model.__name__),
current_actual_value=0.0001, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='critic_loss', obj_name='critic_loss_'+str(self.model.__name__),
current_actual_value=F.smooth_l1_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=32, range_of_values=[16, 128], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
target_update_frequency = Integer(hp_name='target_update_frequency', current_actual_value=250,
range_of_values=[100,1000], to_mutate=True,
obj_name='target_update_frequency_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
range_of_values=[10000, 100000],
obj_name='initial_replay_size_'+str(self.model.__name__),
to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000, range_of_values=[10000, 1000000],
obj_name='max_replay_size_'+str(self.model.__name__), to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
replay_memory = Categorical(hp_name='replay_memory', obj_name='replay_memory_'+str(self.model.__name__),
current_actual_value=ReplayMemory(initial_size=initial_replay_size.current_actual_value,
max_size=max_replay_size.current_actual_value))
clip_reward = Categorical(hp_name='clip_reward', obj_name='clip_reward_'+str(self.model.__name__),
current_actual_value=False, possible_values=[True, False], to_mutate=True,
seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None,
to_mutate=False, obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,100],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
epsilon = Categorical(hp_name='epsilon', obj_name='epsilon_'+str(self.model.__name__),
current_actual_value=LinearParameter(value=1, threshold_value=0.01, n=1000000))
dict_of_params = {'approximator': approximator,
'network': network,
'class': optimizer_class,
'lr': lr,
'loss': critic_loss,
'batch_size': batch_size,
'target_update_frequency': target_update_frequency,
'replay_memory': replay_memory,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'clip_reward': clip_reward,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit,
'epsilon': epsilon
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def set_params(self, new_params):
"""
Parameters
----------
new_params: The new parameters to be used in the specific model generation algorithm. It must be a dictionary that does
not contain any dictionaries(i.e: all parameters must be at the same level).
We need to create the dictionary in the right form for MushroomRL. Then it needs to update self.algo_params.
Then it needs to update the object self.algo_object: to this we need to pass the actual values and not
the Hyperparameter objects.
We call _select_current_actual_value_from_hp_classes: to this method we need to pass the dictionary already
in its final form.
Returns
-------
bool: This method returns True if new_params is set correctly, and False otherwise.
"""
if(new_params is not None):
mdp_info = Categorical(hp_name='mdp_info', obj_name='mdp_info_'+str(self.model.__name__),
current_actual_value=self.info_MDP)
input_shape = Categorical(hp_name='input_shape', obj_name='input_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.observation_space.shape)
if(self.regressor_type == 'action_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(1,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'q_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(self.info_MDP.action_space.n,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'generic_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.shape)
#to have a generic regressor I must not specify n_actions
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=None)
policy = Categorical(hp_name='policy', obj_name='policy_'+str(self.model.__name__),
current_actual_value=EpsGreedy(new_params['epsilon'].current_actual_value))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'policy': policy,
'approximator_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape,
'optimizer': {'class': None, 'params': {'lr': None}},
}
}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info or policy
if(tmp_key in ['approximator', 'batch_size', 'target_update_frequency', 'replay_memory',
'initial_replay_size', 'max_replay_size', 'clip_reward']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['network', 'loss']):
tmp_structured_algo_params['approximator_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['class']):
tmp_structured_algo_params['approximator_params']['optimizer'].update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['lr']):
new_dict_to_add = {tmp_key: new_params[tmp_key]}
tmp_structured_algo_params['approximator_params']['optimizer']['params'].update(new_dict_to_add)
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for DQN
self.algo_object = DQN(**structured_dict_of_values)
final_dict_of_params = tmp_structured_algo_params
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit'],
'epsilon': new_params['epsilon']
}
final_dict_of_params = {**final_dict_of_params, **dict_to_add}
self.algo_params = final_dict_of_params
tmp_new_params = self.get_params()
if(tmp_new_params is not None):
self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params)
else:
self.logger.error(msg='There was an error getting the parameters!')
return False
return True
else:
self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!')
return False
class ModelGenerationMushroomOnlineAC(ModelGenerationMushroomOnline):
"""
This Class is used as base Class for actor critic methods implemented in MushroomRL. Specifically is used to contain some
common methods that would have the same implementation across different actor critic methods.
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
@abstractmethod
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
raise NotImplementedError
def set_params(self, new_params):
"""
Parameters
----------
new_params: The new parameters to be used in the specific model generation algorithm. It must be a dictionary that does
not contain any dictionaries(i.e: all parameters must be at the same level).
We need to create the dictionary in the right form for MushroomRL. Then it needs to update self.algo_params.
Then it needs to update the object self.algo_object: to this we need to pass the actual values and not
the Hyperparameter objects.
We call _select_current_actual_value_from_hp_classes: to this method we need to pass the dictionary already in
its final form.
Returns
-------
bool: This method returns True if new_params is set correctly, and False otherwise.
"""
if(new_params is not None):
mdp_info = Categorical(hp_name='mdp_info', obj_name='mdp_info_'+str(self.model.__name__),
current_actual_value=self.info_MDP)
input_shape = Categorical(hp_name='input_shape', obj_name='input_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.observation_space.shape)
if(self.regressor_type == 'action_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(1,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'q_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(self.info_MDP.action_space.n,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'generic_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.shape)
#to have a generic regressor I must not specify n_actions
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=None)
tmp_structured_algo_params, dict_to_add = self.model_specific_set_params(new_params=new_params, mdp_info=mdp_info,
input_shape=input_shape,
output_shape=output_shape,
n_actions=n_actions)
final_dict_of_params = {**tmp_structured_algo_params, **dict_to_add}
self.algo_params = final_dict_of_params
tmp_new_params = self.get_params()
if(tmp_new_params is not None):
self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params)
else:
self.logger.error(msg='There was an error getting the parameters!')
return False
return True
else:
self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!')
return False
class ModelGenerationMushroomOnlinePPO(ModelGenerationMushroomOnlineAC):
"""
This Class implements a specific online model generation algorithm: PPO. This Class wraps the PPO method
implemented in MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/ppo.py
This Class inherits from the Class ModelGenerationMushroomOnlineAC.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'policy': either BoltzmannTorchPolicy(beta=0.001) or GaussianTorchPolicy(std_0=1),
'network': one hidden layer, 16 neurons,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'actor_class': Adam,
'actor_lr': 3e-4,
'critic_class': Adam,
'critic_lr': 3e-4,
'loss': F.mse_loss,
'n_epochs_policy': 10,
'batch_size': 64,
'eps_ppo': 0.2,
'lam': 0.95,
'ent_coeff': 0,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to PPO, which is the Class of MushroomRL implementing PPO.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = True
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = PPO
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
if torch.cuda.is_available():
self.can_use_cuda = True
else:
self.can_use_cuda = False
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default Network with 1 hidden layer and ReLU activation functions.
Returns
-------
Network: the Class wrapper representing the default network.
"""
class Network(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(Network, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, **kwargs):
h = F.relu(self.hl0(state.float()))
h = F.relu(self.hl1(h))
return self.hl2(h)
return Network
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
network = Categorical(hp_name='network', obj_name='network_'+str(self.model.__name__),
current_actual_value=self._default_network())
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
n_epochs_policy = Integer(hp_name='n_epochs_policy', obj_name='n_epochs_policy_'+str(self.model.__name__),
current_actual_value=10, range_of_values=[1, 100], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=64, range_of_values=[8, 64], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
eps_ppo = Real(hp_name='eps_ppo', obj_name='eps_ppo_'+str(self.model.__name__), current_actual_value=0.2,
range_of_values=[0.08,0.35], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
lam = Real(hp_name='lam', obj_name='lam_'+str(self.model.__name__), current_actual_value=0.95,
range_of_values=[0.85, 0.99], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
ent_coeff = Real(hp_name='ent_coeff', obj_name='ent_coeff_'+str(self.model.__name__), current_actual_value=0,
range_of_values=[0, 0.02], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'actor_class': actor_class,
'actor_lr': actor_lr,
'network': network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': loss,
'n_epochs_policy': n_epochs_policy,
'batch_size': batch_size,
'eps_ppo': eps_ppo,
'lam': lam,
'ent_coeff': ent_coeff,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
if(isinstance(self.info_MDP.action_space, Discrete)):
#check if there is the beta parameter for the BoltzmannTorchPolicy
if('beta' not in list(new_params.keys())):
new_params['beta'] = Real(hp_name='beta', obj_name='beta_'+str(self.model.__name__), current_actual_value=0.001,
range_of_values=[0.0001, 0.9], to_mutate=False, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
o_policy = BoltzmannTorchPolicy(network=new_params['network'].current_actual_value,
input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value,
beta=new_params['beta'].current_actual_value, use_cuda=self.can_use_cuda,
n_actions=n_actions.current_actual_value, n_models=None)
else:
#check if there is the std deviation parameter for the GaussianTorchPolicy
if('std' not in list(new_params.keys())):
new_params['std'] = Real(hp_name='std', obj_name='std_'+str(self.model.__name__), current_actual_value=5,
range_of_values=[0.1, 20], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
o_policy = GaussianTorchPolicy(network=new_params['network'].current_actual_value,
input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value,
std_0=new_params['std'].current_actual_value, use_cuda=self.can_use_cuda,
n_actions=n_actions.current_actual_value, n_models=None)
policy = Categorical(hp_name='policy', obj_name='policy_'+str(self.model.__name__), current_actual_value=o_policy)
tmp_structured_algo_params = {'mdp_info': mdp_info,
'policy': policy,
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info or policy
if(tmp_key in ['n_epochs_policy', 'batch_size', 'eps_ppo', 'lam', 'ent_coeff']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['network', 'loss']):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for PPO
self.algo_object = PPO(**structured_dict_of_values)
#now that i have created the PPO object i can resolve the conflict between the 'actor_class', 'actor_lr',
#'critic_class' and 'critic_lr'. To resolve it, i need to change their keys from generic 'class' and 'lr', that are
#needed for MushroomRL, to 'actor_class', 'actor_lr', 'critic_class' and 'critic_lr':
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit']
}
if(isinstance(self.info_MDP.action_space, Discrete)):
dict_to_add.update({'beta': new_params['beta']})
else:
dict_to_add.update({'std': new_params['std']})
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineSAC(ModelGenerationMushroomOnlineAC):
"""
This Class implements a specific online model generation algorithm: SAC. This Class wraps the SAC method
implemented in MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/sac.py
This Class inherits from the Class ModelGenerationMushroomOnlineAC.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'actor_network': one hidden layer, 16 neurons,
'actor_class': Adam,
'actor_lr': 3e-4,
'critic_network': one hidden layer, 16 neurons,
'critic_class': Adam,
'critic_lr': 3e-4,
'loss': F.mse_loss,
'batch_size': 256,
'initial_replay_size': 50000,
'max_replay_size': 1000000,
'warmup_transitions': 100,
'tau': 0.005,
'lr_alpha': 3e-4,
'log_std_min': -20,
'log_std_max': 2,
'target_entropy': None,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to SAC, which is the Class of MushroomRL implementing SAC.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = SAC
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default CriticNetwork with 1 hidden layer and ReLU activation functions and a default ActorNetwork
with 1 hidden layer and ReLU activation functions.
Returns
-------
CriticNetwork, ActorNetwork: the Class wrappers representing the default CriticNetwork and ActorNetwork.
"""
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, action, **kwargs):
state_action = torch.cat((state.float(), action.float()), dim=1)
h = F.relu(self.hl0(state_action))
h = F.relu(self.hl1(h))
q = self.hl2(h)
return torch.squeeze(q)
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, **kwargs):
h = F.relu(self.hl0(torch.squeeze(state, 1).float()))
h = F.relu(self.hl1(h))
return self.hl2(h)
return CriticNetwork, ActorNetwork
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
critic, actor = self._default_network()
#actor:
actor_network_mu = Categorical(hp_name='actor_network_mu', obj_name='actor_network_mu_'+str(self.model.__name__),
current_actual_value=actor)
actor_network_sigma = Categorical(hp_name='actor_network_sigma',
obj_name='actor_network_sigma_'+str(self.model.__name__),
current_actual_value=copy.deepcopy(actor))
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
#critic:
critic_network = Categorical(hp_name='critic_network', obj_name='critic_network_'+str(self.model.__name__),
current_actual_value=critic)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=256, range_of_values=[8, 256], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
obj_name='initial_replay_size_'+str(self.model.__name__))
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000,
obj_name='max_replay_size_'+str(self.model.__name__))
warmup_transitions = Integer(hp_name='warmup_transitions', current_actual_value=100,
obj_name='warmup_transitions_'+str(self.model.__name__))
tau = Real(hp_name='tau', current_actual_value=0.005, obj_name='tau_'+str(self.model.__name__))
lr_alpha = Real(hp_name='lr_alpha', current_actual_value=3e-4, obj_name='lr_alpha_'+str(self.model.__name__))
log_std_min = Real(hp_name='log_std_min', current_actual_value=-20, obj_name='log_std_min_'+str(self.model.__name__))
log_std_max = Real(hp_name='log_std_max', current_actual_value=2, obj_name='log_std_max_'+str(self.model.__name__))
target_entropy = Real(hp_name='target_entropy', current_actual_value=None,
obj_name='target_entropy_'+str(self.model.__name__))
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'actor_network_mu': actor_network_mu,
'actor_network_sigma': actor_network_sigma,
'actor_class': actor_class,
'actor_lr': actor_lr,
'critic_network': critic_network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': critic_loss,
'batch_size': batch_size,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'warmup_transitions': warmup_transitions,
'tau': tau,
'lr_alpha': lr_alpha,
'log_std_min': log_std_min,
'log_std_max': log_std_max,
'target_entropy': target_entropy,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
critic_input_shape = Categorical(hp_name='critic_input_shape', obj_name='critic_input_shape_'+str(self.model.__name__),
current_actual_value=(input_shape.current_actual_value[0]+
self.info_MDP.action_space.shape[0],))
critic_output_shape = Categorical(hp_name='critic_output_shape', current_actual_value=(1,),
obj_name='critic_output_shape_'+str(self.model.__name__))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'actor_mu_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_sigma_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': critic_input_shape,
'output_shape': critic_output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key in ['batch_size', 'initial_replay_size', 'max_replay_size', 'warmup_transitions', 'tau', 'lr_alpha',
'log_std_min', 'log_std_max', 'target_entropy']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'loss'):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_network'):
tmp_structured_algo_params['critic_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_network_mu'):
tmp_structured_algo_params['actor_mu_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_network_sigma'):
tmp_structured_algo_params['actor_sigma_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for SAC
self.algo_object = SAC(**structured_dict_of_values)
#now that i have created the SAC object i can resolve the conflict between the 'actor_class', 'actor_lr', 'actor_network',
#'critic_class', 'critic_lr' and 'critic_network'. To resolve it, i need to change their keys from generic 'class'
#'lr' and 'network', that are needed for MushroomRL, to 'actor_class', 'actor_lr', 'actor_network', 'critic_class',
#critic_lr' and 'critic_network':
tmp_structured_algo_params['critic_params']['critic_network'] = tmp_structured_algo_params['critic_params']['network']
del tmp_structured_algo_params['critic_params']['network']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
new_val = tmp_structured_algo_params['actor_mu_params']['network']
tmp_structured_algo_params['actor_mu_params']['actor_network_mu'] = new_val
del tmp_structured_algo_params['actor_mu_params']['network']
new_val = tmp_structured_algo_params['actor_sigma_params']['network']
tmp_structured_algo_params['actor_sigma_params']['actor_network_sigma'] = new_val
del tmp_structured_algo_params['actor_sigma_params']['network']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit']
}
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineDDPG(ModelGenerationMushroomOnlineAC):
"""
This Class implements a specific online model generation algorithm: DDPG. This Class wraps the DDPG method
implemented in MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/ddpg.py
This Class inherits from the Class ModelGenerationMushroomOnlineAC.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'policy': OrnsteinUhlenbeckPolicy(sigma=0.2*np.ones(1), theta=0.15, dt=1e-2)
'actor_network': one hidden layer, 16 neurons,
'actor_class': Adam,
'actor_lr': 1e-3,
'critic_network': one hidden layer, 16 neurons,
'critic_class': Adam,
'critic_lr': 1e-3,
'loss': F.mse_loss,
'batch_size': 100,
'initial_replay_size': 50000,
'max_replay_size': 1000000,
'tau': 0.005,
'policy_delay': 1,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to DDPG, which is the Class of MushroomRL implementing DDPG.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = DDPG
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default CriticNetwork with 1 hidden layer and ReLU activation functions and a default ActorNetwork
with 1 hidden layer and ReLU activation functions.
Returns
-------
CriticNetwork, ActorNetwork: the Class wrappers representing the default CriticNetwork and ActorNetwork.
"""
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, action, **kwargs):
state_action = torch.cat((state.float(), action.float()), dim=1)
h = F.relu(self.hl0(state_action))
h = F.relu(self.hl1(h))
q = self.hl2(h)
return torch.squeeze(q)
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, **kwargs):
h = F.relu(self.hl0(torch.squeeze(state, 1).float()))
h = F.relu(self.hl1(h))
return self.hl2(h)
return CriticNetwork, ActorNetwork
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
policy_class = Categorical(hp_name='policy_class', obj_name='policy_class_'+str(self.model.__name__),
current_actual_value=OrnsteinUhlenbeckPolicy)
sigma = Real(hp_name='sigma', current_actual_value=0.2, obj_name='sigma_'+str(self.model.__name__))
theta = Real(hp_name='theta', current_actual_value=0.15, obj_name='theta_'+str(self.model.__name__))
dt = Real(hp_name='dt', current_actual_value=1e-2, obj_name='dt_'+str(self.model.__name__))
critic, actor = self._default_network()
#actor:
actor_network = Categorical(hp_name='actor_network', obj_name='actor_network_'+str(self.model.__name__),
current_actual_value=actor)
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=1e-3, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
#critic:
critic_network = Categorical(hp_name='critic_network', obj_name='critic_network_'+str(self.model.__name__),
current_actual_value=critic)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=1e-3, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=100, range_of_values=[8, 128], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
range_of_values=[1000, 10000], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, obj_name='initial_replay_size_'+str(self.model.__name__))
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000, range_of_values=[10000, 1000000],
to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
obj_name='max_replay_size_'+str(self.model.__name__))
tau = Real(hp_name='tau', current_actual_value=0.005, obj_name='tau_'+str(self.model.__name__))
policy_delay = Integer(hp_name='policy_delay', current_actual_value=1,
obj_name='policy_delay_'+str(self.model.__name__))
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, obj_name='n_steps_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'policy_class': policy_class,
'sigma': sigma,
'theta': theta,
'dt': dt,
'actor_network': actor_network,
'actor_class': actor_class,
'actor_lr': actor_lr,
'critic_network': critic_network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': critic_loss,
'batch_size': batch_size,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'tau': tau,
'policy_delay': policy_delay,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
critic_input_shape = Categorical(hp_name='critic_input_shape', obj_name='critic_input_shape_'+str(self.model.__name__),
current_actual_value=(input_shape.current_actual_value[0]+
self.info_MDP.action_space.shape[0],))
critic_output_shape = Categorical(hp_name='critic_output_shape', current_actual_value=(1,),
obj_name='critic_output_shape_'+str(self.model.__name__))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'actor_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': critic_input_shape,
'output_shape': critic_output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
#either np.ones(1) or np.ones(self.info_MDP.action_space.shape[0])
new_sigma = np.ones(1)*new_params['sigma'].current_actual_value
policy_params_dict = dict(sigma=new_sigma, theta=new_params['theta'].current_actual_value,
dt=new_params['dt'].current_actual_value)
policy_params = Categorical(hp_name='policy_params', current_actual_value=policy_params_dict,
obj_name='policy_params_'+str(self.model.__name__))
new_params.update({'policy_params': policy_params})
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key in ['policy_class', 'policy_params', 'batch_size', 'initial_replay_size', 'max_replay_size', 'tau',
'policy_delay']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'loss'):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_network'):
tmp_structured_algo_params['critic_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_network'):
tmp_structured_algo_params['actor_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for DDPG
self.algo_object = DDPG(**structured_dict_of_values)
#now that i have created the DDPG object i can resolve the conflict between the 'actor_class', 'actor_lr',
#'actor_network', 'critic_class', 'critic_lr' and 'critic_network'. To resolve it, i need to change their keys from
#generic 'class', 'lr' and 'network', that are needed for MushroomRL, to 'actor_class', 'actor_lr', 'actor_network',
#'critic_class', critic_lr' and 'critic_network':
tmp_structured_algo_params['critic_params']['critic_network'] = tmp_structured_algo_params['critic_params']['network']
del tmp_structured_algo_params['critic_params']['network']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
new_val = tmp_structured_algo_params['actor_params']['network']
tmp_structured_algo_params['actor_params']['actor_network'] = new_val
del tmp_structured_algo_params['actor_params']['network']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#delete policy_params: this is constructed new each time here:
del tmp_structured_algo_params['policy_params']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit, sigma, theta, dt:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit'],
'sigma': new_params['sigma'],
'theta': new_params['theta'],
'dt': new_params['dt']
}
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineGPOMDP(ModelGenerationMushroomOnline):
"""
This Class implements a specific online model generation algorithm: GPOMDP. This Class wraps the GPOMDP method implemented in
MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/policy_search/policy_gradient/gpomdp.py
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'policy': StateStdGaussianPolicy,
'approximator': LinearApproximator,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'optimizer': AdaptiveOptimizer,
'eps': 1e-2,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to GPOMDP, which is the Class of MushroomRL implementing GPOMDP.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = GPOMDP
self.core = None
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
optimizer = Categorical(hp_name='optimizer', obj_name='optimizer_'+str(self.model.__name__),
current_actual_value=AdaptiveOptimizer)
eps = Real(hp_name='eps', obj_name='eps_'+str(self.model.__name__), current_actual_value=1e-2,
range_of_values=[1e-4, 1e-1], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
to_maximize = Categorical(hp_name='maximize', obj_name='maximize_'+str(self.model.__name__),
current_actual_value=True, to_mutate=False, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,100],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'optimizer': optimizer,
'eps': eps,
'maximize': to_maximize,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def _create_policy(self, input_shape, n_actions, output_shape):
"""
Parameters
----------
input_shape: The shape of the observation space.
n_actions: If the space is Discrete this is the number of actions.
output_shape: The shape of the action space.
Returns
-------
policy: This is an object of Class Categorical and in the current_actual_value it contains a mushroom_rl policy object.
"""
approximator_value = Regressor(LinearApproximator, input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value,
n_actions=n_actions.current_actual_value)
approximator = Categorical(hp_name='approximator', obj_name='approximator_'+str(self.model.__name__),
current_actual_value=approximator_value)
sigma_value = Regressor(LinearApproximator, input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value, n_actions=n_actions.current_actual_value)
sigma = Categorical(hp_name='sigma', obj_name='sigma_'+str(self.model.__name__), current_actual_value=sigma_value)
sigma_weights = 0.25*np.ones(sigma.current_actual_value.weights_size)
sigma.current_actual_value.set_weights(sigma_weights)
policy_value = StateStdGaussianPolicy(mu=approximator.current_actual_value, std=sigma.current_actual_value)
policy = Categorical(hp_name='policy', obj_name='policy_'+str(self.model.__name__), current_actual_value=policy_value)
return policy
def set_params(self, new_params):
"""
Parameters
----------
new_params: The new parameters to be used in the specific model generation algorithm. It must be a dictionary that does
not contain any dictionaries(i.e: all parameters must be at the same level).
We need to create the dictionary in the right form for MushroomRL. Then it needs to update self.algo_params.
Then it needs to update the object self.algo_object: to this we need to pass the actual values and not
the Hyperparameter objects.
We call _select_current_actual_value_from_hp_classes: to this method we need to pass the dictionary already
in its final form.
Returns
-------
bool: This method returns True if new_params is set correctly, and False otherwise.
"""
if(new_params is not None):
mdp_info = Categorical(hp_name='mdp_info', obj_name='mdp_info_'+str(self.model.__name__),
current_actual_value=self.info_MDP)
input_shape = Categorical(hp_name='input_shape', obj_name='input_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.observation_space.shape)
if(self.regressor_type == 'action_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(1,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'q_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(self.info_MDP.action_space.n,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'generic_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.shape)
#to have a generic regressor I must not specify n_actions
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=None)
tmp_structured_algo_params = {'mdp_info': mdp_info}
#By subclassing this Class and changing the method _create_policy() one can specify a specific policy:
policy = self._create_policy(input_shape=input_shape, n_actions=n_actions, output_shape=output_shape)
tmp_structured_algo_params.update({'policy': policy})
opt_params = {}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key == 'optimizer'):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key not in ['mdp_info', 'policy', 'optimizer', 'n_epochs', 'n_steps', 'n_steps_per_fit', 'n_episodes',
'n_episodes_per_fit']):
opt_params.update({tmp_key: new_params[tmp_key]})
optimizer_vals = self._select_current_actual_value_from_hp_classes(params_structured_dict=opt_params)
opt = tmp_structured_algo_params['optimizer'].current_actual_value
tmp_structured_algo_params['optimizer'].current_actual_value = opt(**optimizer_vals)
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for GPOMDP
self.algo_object = GPOMDP(**structured_dict_of_values)
final_dict_of_params = tmp_structured_algo_params
#remove the optimizer object (that is needed for MushroomRL) and insert the optimizer Class instead:
final_dict_of_params['optimizer'].current_actual_value = opt
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit']
}
final_dict_of_params = {**final_dict_of_params, **dict_to_add, **opt_params}
self.algo_params = final_dict_of_params
tmp_new_params = self.get_params()
if(tmp_new_params is not None):
self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params)
else:
self.logger.error(msg='There was an error getting the parameters!')
return False
return True
else:
self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!')
return False
| [
"mushroom_rl.algorithms.actor_critic.deep_actor_critic.DDPG",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"torch.cuda.is_available",
"copy.deepcopy",
"torch.squeeze",
"mushroom_rl.core.Core",
"mushroom_rl.algorithms.actor_critic.deep_actor_critic.SAC",
"... | [((9796, 9808), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9806, 9808), True, 'import matplotlib.pyplot as plt\n'), ((9817, 9848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Environment Steps"""'], {}), "('Environment Steps')\n", (9827, 9848), True, 'import matplotlib.pyplot as plt\n'), ((9857, 9896), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Discounted Reward"""'], {}), "('Average Discounted Reward')\n", (9867, 9896), True, 'import matplotlib.pyplot as plt\n'), ((9999, 10013), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10007, 10013), True, 'import matplotlib.pyplot as plt\n'), ((10022, 10053), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""#FF9860"""'}), "(x, y, color='#FF9860')\n", (10030, 10053), True, 'import matplotlib.pyplot as plt\n'), ((10207, 10217), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10215, 10217), True, 'import matplotlib.pyplot as plt\n'), ((11840, 11877), 'mushroom_rl.core.Core', 'Core', ([], {'agent': 'self.algo_object', 'mdp': 'env'}), '(agent=self.algo_object, mdp=env)\n', (11844, 11877), False, 'from mushroom_rl.core import Core\n'), ((12670, 12689), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (12683, 12689), False, 'import copy\n'), ((18742, 18773), 'copy.deepcopy', 'copy.deepcopy', (['self.algo_params'], {}), '(self.algo_params)\n', (18755, 18773), False, 'import copy\n'), ((18879, 18909), 'torch.manual_seed', 'torch.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (18896, 18909), False, 'import torch\n'), ((18918, 18953), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (18940, 18953), False, 'import torch\n'), ((19168, 19195), 'numpy.random.seed', 'np.random.seed', (['self.seeder'], {}), '(self.seeder)\n', (19182, 19195), True, 'import numpy as np\n'), ((45557, 45588), 'copy.deepcopy', 'copy.deepcopy', (['self.algo_params'], {}), '(self.algo_params)\n', (45570, 45588), False, 'import copy\n'), ((45687, 45717), 'torch.manual_seed', 'torch.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (45704, 45717), False, 'import torch\n'), ((45726, 45761), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (45748, 45761), False, 'import torch\n'), ((45782, 45807), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (45805, 45807), False, 'import torch\n'), ((46122, 46149), 'numpy.random.seed', 'np.random.seed', (['self.seeder'], {}), '(self.seeder)\n', (46136, 46149), True, 'import numpy as np\n'), ((60506, 60538), 'mushroom_rl.algorithms.actor_critic.deep_actor_critic.PPO', 'PPO', ([], {}), '(**structured_dict_of_values)\n', (60509, 60538), False, 'from mushroom_rl.algorithms.actor_critic.deep_actor_critic import PPO, SAC, DDPG\n'), ((68402, 68433), 'copy.deepcopy', 'copy.deepcopy', (['self.algo_params'], {}), '(self.algo_params)\n', (68415, 68433), False, 'import copy\n'), ((68532, 68562), 'torch.manual_seed', 'torch.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (68549, 68562), False, 'import torch\n'), ((68571, 68606), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (68593, 68606), False, 'import torch\n'), ((68829, 68856), 'numpy.random.seed', 'np.random.seed', (['self.seeder'], {}), '(self.seeder)\n', (68843, 68856), True, 'import numpy as np\n'), ((84798, 84830), 'mushroom_rl.algorithms.actor_critic.deep_actor_critic.SAC', 'SAC', ([], {}), '(**structured_dict_of_values)\n', (84801, 84830), False, 'from mushroom_rl.algorithms.actor_critic.deep_actor_critic import PPO, SAC, DDPG\n'), ((93208, 93239), 'copy.deepcopy', 'copy.deepcopy', (['self.algo_params'], {}), '(self.algo_params)\n', (93221, 93239), False, 'import copy\n'), ((93343, 93373), 'torch.manual_seed', 'torch.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (93360, 93373), False, 'import torch\n'), ((93382, 93417), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['self.seeder'], {}), '(self.seeder)\n', (93404, 93417), False, 'import torch\n'), ((93640, 93667), 'numpy.random.seed', 'np.random.seed', (['self.seeder'], {}), '(self.seeder)\n', (93654, 93667), True, 'import numpy as np\n'), ((109431, 109464), 'mushroom_rl.algorithms.actor_critic.deep_actor_critic.DDPG', 'DDPG', ([], {}), '(**structured_dict_of_values)\n', (109435, 109464), False, 'from mushroom_rl.algorithms.actor_critic.deep_actor_critic import PPO, SAC, DDPG\n'), ((117448, 117479), 'copy.deepcopy', 'copy.deepcopy', (['self.algo_params'], {}), '(self.algo_params)\n', (117461, 117479), False, 'import copy\n'), ((117772, 117799), 'numpy.random.seed', 'np.random.seed', (['self.seeder'], {}), '(self.seeder)\n', (117786, 117799), True, 'import numpy as np\n'), ((122794, 122968), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'input_shape.current_actual_value', 'output_shape': 'output_shape.current_actual_value', 'n_actions': 'n_actions.current_actual_value'}), '(LinearApproximator, input_shape=input_shape.current_actual_value,\n output_shape=output_shape.current_actual_value, n_actions=n_actions.\n current_actual_value)\n', (122803, 122968), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((123270, 123444), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'input_shape.current_actual_value', 'output_shape': 'output_shape.current_actual_value', 'n_actions': 'n_actions.current_actual_value'}), '(LinearApproximator, input_shape=input_shape.current_actual_value,\n output_shape=output_shape.current_actual_value, n_actions=n_actions.\n current_actual_value)\n', (123279, 123444), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((123781, 123878), 'mushroom_rl.policy.StateStdGaussianPolicy', 'StateStdGaussianPolicy', ([], {'mu': 'approximator.current_actual_value', 'std': 'sigma.current_actual_value'}), '(mu=approximator.current_actual_value, std=sigma.\n current_actual_value)\n', (123803, 123878), False, 'from mushroom_rl.policy import EpsGreedy, GaussianTorchPolicy, BoltzmannTorchPolicy, StateStdGaussianPolicy\n'), ((5180, 5215), 'ARLO.block.block_output.BlockOutput', 'BlockOutput', ([], {'obj_name': 'self.obj_name'}), '(obj_name=self.obj_name)\n', (5191, 5215), False, 'from ARLO.block.block_output import BlockOutput\n'), ((5754, 5788), 'torch.set_num_threads', 'torch.set_num_threads', (['self.n_jobs'], {}), '(self.n_jobs)\n', (5775, 5788), False, 'import torch\n'), ((10104, 10207), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y - std_dev)', '(y + std_dev)'], {'alpha': '(0.5)', 'edgecolor': '"""#CC4F1B"""', 'facecolor': '"""#FF9860"""'}), "(x, y - std_dev, y + std_dev, alpha=0.5, edgecolor=\n '#CC4F1B', facecolor='#FF9860')\n", (10120, 10207), True, 'import matplotlib.pyplot as plt\n'), ((34075, 34107), 'mushroom_rl.algorithms.value.dqn.DQN', 'DQN', ([], {}), '(**structured_dict_of_values)\n', (34078, 34107), False, 'from mushroom_rl.algorithms.value.dqn import DQN\n'), ((56546, 56860), 'mushroom_rl.policy.BoltzmannTorchPolicy', 'BoltzmannTorchPolicy', ([], {'network': "new_params['network'].current_actual_value", 'input_shape': 'input_shape.current_actual_value', 'output_shape': 'output_shape.current_actual_value', 'beta': "new_params['beta'].current_actual_value", 'use_cuda': 'self.can_use_cuda', 'n_actions': 'n_actions.current_actual_value', 'n_models': 'None'}), "(network=new_params['network'].current_actual_value,\n input_shape=input_shape.current_actual_value, output_shape=output_shape\n .current_actual_value, beta=new_params['beta'].current_actual_value,\n use_cuda=self.can_use_cuda, n_actions=n_actions.current_actual_value,\n n_models=None)\n", (56566, 56860), False, 'from mushroom_rl.policy import EpsGreedy, GaussianTorchPolicy, BoltzmannTorchPolicy, StateStdGaussianPolicy\n'), ((57638, 57951), 'mushroom_rl.policy.GaussianTorchPolicy', 'GaussianTorchPolicy', ([], {'network': "new_params['network'].current_actual_value", 'input_shape': 'input_shape.current_actual_value', 'output_shape': 'output_shape.current_actual_value', 'std_0': "new_params['std'].current_actual_value", 'use_cuda': 'self.can_use_cuda', 'n_actions': 'n_actions.current_actual_value', 'n_models': 'None'}), "(network=new_params['network'].current_actual_value,\n input_shape=input_shape.current_actual_value, output_shape=output_shape\n .current_actual_value, std_0=new_params['std'].current_actual_value,\n use_cuda=self.can_use_cuda, n_actions=n_actions.current_actual_value,\n n_models=None)\n", (57657, 57951), False, 'from mushroom_rl.policy import EpsGreedy, GaussianTorchPolicy, BoltzmannTorchPolicy, StateStdGaussianPolicy\n'), ((106860, 106870), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (106867, 106870), True, 'import numpy as np\n'), ((123638, 123686), 'numpy.ones', 'np.ones', (['sigma.current_actual_value.weights_size'], {}), '(sigma.current_actual_value.weights_size)\n', (123645, 123686), True, 'import numpy as np\n'), ((128671, 128706), 'mushroom_rl.algorithms.policy_search.GPOMDP', 'GPOMDP', ([], {}), '(**structured_dict_of_values)\n', (128677, 128706), False, 'from mushroom_rl.algorithms.policy_search import GPOMDP\n'), ((9621, 9645), 'numpy.mean', 'np.mean', (['evals_values[i]'], {}), '(evals_values[i])\n', (9628, 9645), True, 'import numpy as np\n'), ((9719, 9742), 'numpy.std', 'np.std', (['evals_values[i]'], {}), '(evals_values[i])\n', (9725, 9742), True, 'import numpy as np\n'), ((19744, 19766), 'torch.nn.Linear', 'nn.Linear', (['n_input', '(16)'], {}), '(n_input, 16)\n', (19753, 19766), True, 'import torch.nn as nn\n'), ((19794, 19811), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(16)'], {}), '(16, 16)\n', (19803, 19811), True, 'import torch.nn as nn\n'), ((19839, 19862), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'n_output'], {}), '(16, n_output)\n', (19848, 19862), True, 'import torch.nn as nn\n'), ((35022, 35051), 'copy.deepcopy', 'copy.deepcopy', (['tmp_new_params'], {}), '(tmp_new_params)\n', (35035, 35051), False, 'import copy\n'), ((39586, 39615), 'copy.deepcopy', 'copy.deepcopy', (['tmp_new_params'], {}), '(tmp_new_params)\n', (39599, 39615), False, 'import copy\n'), ((46696, 46718), 'torch.nn.Linear', 'nn.Linear', (['n_input', '(16)'], {}), '(n_input, 16)\n', (46705, 46718), True, 'import torch.nn as nn\n'), ((46746, 46763), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(16)'], {}), '(16, 16)\n', (46755, 46763), True, 'import torch.nn as nn\n'), ((46791, 46814), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'n_output'], {}), '(16, n_output)\n', (46800, 46814), True, 'import torch.nn as nn\n'), ((69550, 69572), 'torch.nn.Linear', 'nn.Linear', (['n_input', '(16)'], {}), '(n_input, 16)\n', (69559, 69572), True, 'import torch.nn as nn\n'), ((69600, 69617), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(16)'], {}), '(16, 16)\n', (69609, 69617), True, 'import torch.nn as nn\n'), ((69645, 69668), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'n_output'], {}), '(16, n_output)\n', (69654, 69668), True, 'import torch.nn as nn\n'), ((70269, 70285), 'torch.squeeze', 'torch.squeeze', (['q'], {}), '(q)\n', (70282, 70285), False, 'import torch\n'), ((70587, 70609), 'torch.nn.Linear', 'nn.Linear', (['n_input', '(16)'], {}), '(n_input, 16)\n', (70596, 70609), True, 'import torch.nn as nn\n'), ((70637, 70654), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(16)'], {}), '(16, 16)\n', (70646, 70654), True, 'import torch.nn as nn\n'), ((70682, 70705), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'n_output'], {}), '(16, n_output)\n', (70691, 70705), True, 'import torch.nn as nn\n'), ((94361, 94383), 'torch.nn.Linear', 'nn.Linear', (['n_input', '(16)'], {}), '(n_input, 16)\n', (94370, 94383), True, 'import torch.nn as nn\n'), ((94411, 94428), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(16)'], {}), '(16, 16)\n', (94420, 94428), True, 'import torch.nn as nn\n'), ((94456, 94479), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'n_output'], {}), '(16, n_output)\n', (94465, 94479), True, 'import torch.nn as nn\n'), ((95080, 95096), 'torch.squeeze', 'torch.squeeze', (['q'], {}), '(q)\n', (95093, 95096), False, 'import torch\n'), ((95398, 95420), 'torch.nn.Linear', 'nn.Linear', (['n_input', '(16)'], {}), '(n_input, 16)\n', (95407, 95420), True, 'import torch.nn as nn\n'), ((95448, 95465), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(16)'], {}), '(16, 16)\n', (95457, 95465), True, 'import torch.nn as nn\n'), ((95493, 95516), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'n_output'], {}), '(16, n_output)\n', (95502, 95516), True, 'import torch.nn as nn\n'), ((129798, 129827), 'copy.deepcopy', 'copy.deepcopy', (['tmp_new_params'], {}), '(tmp_new_params)\n', (129811, 129827), False, 'import copy\n'), ((24300, 24418), 'mushroom_rl.utils.replay_memory.ReplayMemory', 'ReplayMemory', ([], {'initial_size': 'initial_replay_size.current_actual_value', 'max_size': 'max_replay_size.current_actual_value'}), '(initial_size=initial_replay_size.current_actual_value,\n max_size=max_replay_size.current_actual_value)\n', (24312, 24418), False, 'from mushroom_rl.utils.replay_memory import ReplayMemory\n'), ((27004, 27061), 'mushroom_rl.utils.parameters.LinearParameter', 'LinearParameter', ([], {'value': '(1)', 'threshold_value': '(0.01)', 'n': '(1000000)'}), '(value=1, threshold_value=0.01, n=1000000)\n', (27019, 27061), False, 'from mushroom_rl.utils.parameters import LinearParameter\n'), ((31854, 31907), 'mushroom_rl.policy.EpsGreedy', 'EpsGreedy', (["new_params['epsilon'].current_actual_value"], {}), "(new_params['epsilon'].current_actual_value)\n", (31863, 31907), False, 'from mushroom_rl.policy import EpsGreedy, GaussianTorchPolicy, BoltzmannTorchPolicy, StateStdGaussianPolicy\n'), ((72379, 72399), 'copy.deepcopy', 'copy.deepcopy', (['actor'], {}), '(actor)\n', (72392, 72399), False, 'import copy\n'), ((19942, 19972), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (19964, 19972), True, 'import torch.nn as nn\n'), ((20036, 20066), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (20058, 20066), True, 'import torch.nn as nn\n'), ((20130, 20160), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (20152, 20160), True, 'import torch.nn as nn\n'), ((46894, 46924), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (46916, 46924), True, 'import torch.nn as nn\n'), ((46988, 47018), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (47010, 47018), True, 'import torch.nn as nn\n'), ((47082, 47112), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (47104, 47112), True, 'import torch.nn as nn\n'), ((69748, 69778), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (69770, 69778), True, 'import torch.nn as nn\n'), ((69842, 69872), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (69864, 69872), True, 'import torch.nn as nn\n'), ((69936, 69966), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (69958, 69966), True, 'import torch.nn as nn\n'), ((70785, 70815), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (70807, 70815), True, 'import torch.nn as nn\n'), ((70879, 70909), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (70901, 70909), True, 'import torch.nn as nn\n'), ((70973, 71003), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (70995, 71003), True, 'import torch.nn as nn\n'), ((94559, 94589), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (94581, 94589), True, 'import torch.nn as nn\n'), ((94653, 94683), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (94675, 94683), True, 'import torch.nn as nn\n'), ((94747, 94777), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (94769, 94777), True, 'import torch.nn as nn\n'), ((95596, 95626), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (95618, 95626), True, 'import torch.nn as nn\n'), ((95690, 95720), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (95712, 95720), True, 'import torch.nn as nn\n'), ((95784, 95814), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (95806, 95814), True, 'import torch.nn as nn\n'), ((71103, 71126), 'torch.squeeze', 'torch.squeeze', (['state', '(1)'], {}), '(state, 1)\n', (71116, 71126), False, 'import torch\n'), ((95914, 95937), 'torch.squeeze', 'torch.squeeze', (['state', '(1)'], {}), '(state, 1)\n', (95927, 95937), False, 'import torch\n')] |
import numpy as np
from scipy.optimize import curve_fit
def power(x, l):
return np.exp(x/l)
def calc_gurevich_len_1(data, z_cut):
data_e = data[data["particle"] == 11] # choose electrons
data_e = data_e[np.logical_and(data_e["z"]> -z_cut, data_e["z"]<z_cut)]
indx = np.isin(data_e["id"], data["parent_id"])
data_e = data_e[indx]
z = np.sort(data_e["z"])
Ne = np.arange(1,z.size+1)
popt, pcov = curve_fit(power, z, Ne, sigma=Ne**0.5)
return popt, pcov
| [
"scipy.optimize.curve_fit",
"numpy.logical_and",
"numpy.sort",
"numpy.isin",
"numpy.exp",
"numpy.arange"
] | [((86, 99), 'numpy.exp', 'np.exp', (['(x / l)'], {}), '(x / l)\n', (92, 99), True, 'import numpy as np\n'), ((287, 327), 'numpy.isin', 'np.isin', (["data_e['id']", "data['parent_id']"], {}), "(data_e['id'], data['parent_id'])\n", (294, 327), True, 'import numpy as np\n'), ((363, 383), 'numpy.sort', 'np.sort', (["data_e['z']"], {}), "(data_e['z'])\n", (370, 383), True, 'import numpy as np\n'), ((393, 417), 'numpy.arange', 'np.arange', (['(1)', '(z.size + 1)'], {}), '(1, z.size + 1)\n', (402, 417), True, 'import numpy as np\n'), ((432, 472), 'scipy.optimize.curve_fit', 'curve_fit', (['power', 'z', 'Ne'], {'sigma': '(Ne ** 0.5)'}), '(power, z, Ne, sigma=Ne ** 0.5)\n', (441, 472), False, 'from scipy.optimize import curve_fit\n'), ((218, 275), 'numpy.logical_and', 'np.logical_and', (["(data_e['z'] > -z_cut)", "(data_e['z'] < z_cut)"], {}), "(data_e['z'] > -z_cut, data_e['z'] < z_cut)\n", (232, 275), True, 'import numpy as np\n')] |
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image, ImageOps
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_ramne.png', **options):
super().__init__(name='ラムネ', body=body, pantie_position=[412, 835], **options)
self.mask = io.imread('./mask/mask_ramne.png')
self.sign_position = [844, 666]
try:
self.add_sign = self.options['add_sign']
except:
self.add_sign = self.ask(question='Add immoral sign?', default=False)
if self.add_sign:
try:
sign = Image.open(self.options['fsign'])
except:
sign = Image.open('./material/anna_sign.png')
left = ImageOps.mirror(sign)
margin = 25
self.sign = Image.new("RGBA", (sign.size[0] * 2 + margin, sign.size[1]))
self.sign.paste(sign, (sign.size[0] + int(margin/2), 0))
self.sign.paste(left, (0, 0))
def convert(self, image):
pantie = np.array(image)
# Rear to front
patch = np.copy(pantie[-110:-5, 548:, :])[::-1, ::-1, :]
[pr, pc, d] = patch.shape
pantie[105:105 + pr, :pc, :] = patch
pantie = pantie[:-100, :, :]
pantie = np.pad(pantie, [(100, 0), (0, 0), (0, 0)], mode='constant')
pantie = perspective_transform(pantie, np.matrix('1, 0.01, 0; 0, 1, 0; -0.0008,0,1'))
# Affine transform
[r, c, d] = pantie.shape
src_cols = np.linspace(0, c, 10)
src_rows = np.linspace(0, r, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
shifter_row = np.zeros(src.shape[0])
shifter_col = np.zeros(src.shape[0])
shifter_row = (np.sin(np.linspace(0, 1 * np.pi, src.shape[0]) - np.pi / 4) * 40)
shifter_col = -np.sin(np.linspace(0, 1 * np.pi, src.shape[0]) + np.pi / 8) * 20
shifter_row[shifter_row < 0] = 0
shifter_row = np.convolve(shifter_row, np.ones(10) / 10, mode='valid')
shifter_row = skt.resize(shifter_row, (100, 1), anti_aliasing=True, mode='reflect')[:, 0]
shifter_col = np.convolve(shifter_col, np.ones(10) / 10, mode='valid')
shifter_col = skt.resize(shifter_col, (100, 1), anti_aliasing=True, mode='reflect')[:, 0]
dst_rows = src[:, 1] + shifter_row
dst_cols = src[:, 0] + shifter_col
dst = np.vstack([dst_cols, dst_rows]).T
affin = skt.PiecewiseAffineTransform()
affin.estimate(src, dst)
pantie = skt.warp(pantie, affin)
# Mirroring
pantie = pantie[25:290, 19:430, :]
pantie = skt.resize(pantie, (np.int(pantie.shape[0] * 1.47), np.int(pantie.shape[1] * 1.49)), anti_aliasing=True, mode='reflect')
pantie = np.bitwise_and(np.uint8(pantie[7:, :, :] * 255), self.mask)
[r, c, d] = pantie.shape
npantie = np.zeros((r, c * 2, d), dtype=np.uint8)
npantie[:, c:, :] = pantie
npantie[:, :c, :] = pantie[:, ::-1, :]
return Image.fromarray(npantie)
def patch(self, image, transparent=False):
image = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
if self.add_sign:
self.paste(patched, self.sign, self.sign_position)
patched = self.paste(patched, image, self.pantie_position)
return patched
| [
"numpy.uint8",
"PIL.Image.new",
"numpy.array",
"skimage.transform.PiecewiseAffineTransform",
"numpy.linspace",
"numpy.vstack",
"numpy.meshgrid",
"numpy.matrix",
"PIL.ImageOps.mirror",
"numpy.ones",
"skimage.io.imread",
"skimage.transform.resize",
"numpy.int",
"numpy.copy",
"PIL.Image.fro... | [((382, 416), 'skimage.io.imread', 'io.imread', (['"""./mask/mask_ramne.png"""'], {}), "('./mask/mask_ramne.png')\n", (391, 416), True, 'import skimage.io as io\n'), ((1112, 1127), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1120, 1127), True, 'import numpy as np\n'), ((1359, 1418), 'numpy.pad', 'np.pad', (['pantie', '[(100, 0), (0, 0), (0, 0)]'], {'mode': '"""constant"""'}), "(pantie, [(100, 0), (0, 0), (0, 0)], mode='constant')\n", (1365, 1418), True, 'import numpy as np\n'), ((1601, 1622), 'numpy.linspace', 'np.linspace', (['(0)', 'c', '(10)'], {}), '(0, c, 10)\n', (1612, 1622), True, 'import numpy as np\n'), ((1642, 1663), 'numpy.linspace', 'np.linspace', (['(0)', 'r', '(10)'], {}), '(0, r, 10)\n', (1653, 1663), True, 'import numpy as np\n'), ((1693, 1724), 'numpy.meshgrid', 'np.meshgrid', (['src_rows', 'src_cols'], {}), '(src_rows, src_cols)\n', (1704, 1724), True, 'import numpy as np\n'), ((1806, 1828), 'numpy.zeros', 'np.zeros', (['src.shape[0]'], {}), '(src.shape[0])\n', (1814, 1828), True, 'import numpy as np\n'), ((1851, 1873), 'numpy.zeros', 'np.zeros', (['src.shape[0]'], {}), '(src.shape[0])\n', (1859, 1873), True, 'import numpy as np\n'), ((2596, 2626), 'skimage.transform.PiecewiseAffineTransform', 'skt.PiecewiseAffineTransform', ([], {}), '()\n', (2624, 2626), True, 'import skimage.transform as skt\n'), ((2677, 2700), 'skimage.transform.warp', 'skt.warp', (['pantie', 'affin'], {}), '(pantie, affin)\n', (2685, 2700), True, 'import skimage.transform as skt\n'), ((3031, 3070), 'numpy.zeros', 'np.zeros', (['(r, c * 2, d)'], {'dtype': 'np.uint8'}), '((r, c * 2, d), dtype=np.uint8)\n', (3039, 3070), True, 'import numpy as np\n'), ((3169, 3193), 'PIL.Image.fromarray', 'Image.fromarray', (['npantie'], {}), '(npantie)\n', (3184, 3193), False, 'from PIL import Image, ImageOps\n'), ((822, 843), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['sign'], {}), '(sign)\n', (837, 843), False, 'from PIL import Image, ImageOps\n'), ((892, 952), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(sign.size[0] * 2 + margin, sign.size[1])'], {}), "('RGBA', (sign.size[0] * 2 + margin, sign.size[1]))\n", (901, 952), False, 'from PIL import Image, ImageOps\n'), ((1177, 1210), 'numpy.copy', 'np.copy', (['pantie[-110:-5, 548:, :]'], {}), '(pantie[-110:-5, 548:, :])\n', (1184, 1210), True, 'import numpy as np\n'), ((1466, 1511), 'numpy.matrix', 'np.matrix', (['"""1, 0.01, 0; 0, 1, 0; -0.0008,0,1"""'], {}), "('1, 0.01, 0; 0, 1, 0; -0.0008,0,1')\n", (1475, 1511), True, 'import numpy as np\n'), ((1739, 1780), 'numpy.dstack', 'np.dstack', (['[src_cols.flat, src_rows.flat]'], {}), '([src_cols.flat, src_rows.flat])\n', (1748, 1780), True, 'import numpy as np\n'), ((2193, 2262), 'skimage.transform.resize', 'skt.resize', (['shifter_row', '(100, 1)'], {'anti_aliasing': '(True)', 'mode': '"""reflect"""'}), "(shifter_row, (100, 1), anti_aliasing=True, mode='reflect')\n", (2203, 2262), True, 'import skimage.transform as skt\n'), ((2370, 2439), 'skimage.transform.resize', 'skt.resize', (['shifter_col', '(100, 1)'], {'anti_aliasing': '(True)', 'mode': '"""reflect"""'}), "(shifter_col, (100, 1), anti_aliasing=True, mode='reflect')\n", (2380, 2439), True, 'import skimage.transform as skt\n'), ((2546, 2577), 'numpy.vstack', 'np.vstack', (['[dst_cols, dst_rows]'], {}), '([dst_cols, dst_rows])\n', (2555, 2577), True, 'import numpy as np\n'), ((2935, 2967), 'numpy.uint8', 'np.uint8', (['(pantie[7:, :, :] * 255)'], {}), '(pantie[7:, :, :] * 255)\n', (2943, 2967), True, 'import numpy as np\n'), ((3332, 3365), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'self.body_size'], {}), "('RGBA', self.body_size)\n", (3341, 3365), False, 'from PIL import Image, ImageOps\n'), ((687, 720), 'PIL.Image.open', 'Image.open', (["self.options['fsign']"], {}), "(self.options['fsign'])\n", (697, 720), False, 'from PIL import Image, ImageOps\n'), ((2139, 2150), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (2146, 2150), True, 'import numpy as np\n'), ((2316, 2327), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (2323, 2327), True, 'import numpy as np\n'), ((2802, 2832), 'numpy.int', 'np.int', (['(pantie.shape[0] * 1.47)'], {}), '(pantie.shape[0] * 1.47)\n', (2808, 2832), True, 'import numpy as np\n'), ((2834, 2864), 'numpy.int', 'np.int', (['(pantie.shape[1] * 1.49)'], {}), '(pantie.shape[1] * 1.49)\n', (2840, 2864), True, 'import numpy as np\n'), ((764, 802), 'PIL.Image.open', 'Image.open', (['"""./material/anna_sign.png"""'], {}), "('./material/anna_sign.png')\n", (774, 802), False, 'from PIL import Image, ImageOps\n'), ((1904, 1943), 'numpy.linspace', 'np.linspace', (['(0)', '(1 * np.pi)', 'src.shape[0]'], {}), '(0, 1 * np.pi, src.shape[0])\n', (1915, 1943), True, 'import numpy as np\n'), ((1993, 2032), 'numpy.linspace', 'np.linspace', (['(0)', '(1 * np.pi)', 'src.shape[0]'], {}), '(0, 1 * np.pi, src.shape[0])\n', (2004, 2032), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import det_curve
from sklearn.metrics import plot_det_curve
@pytest.fixture(scope="module")
def data():
return load_iris(return_X_y=True)
@pytest.fixture(scope="module")
def data_binary(data):
X, y = data
return X[y < 2], y[y < 2]
@pytest.mark.parametrize(
"response_method", ["predict_proba", "decision_function"]
)
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
def test_plot_det_curve(
pyplot,
response_method,
data_binary,
with_sample_weight,
with_strings
):
X, y = data_binary
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
viz = plot_det_curve(
lr, X, y, alpha=0.8, sample_weight=sample_weight,
)
y_pred = getattr(lr, response_method)(X)
if y_pred.ndim == 2:
y_pred = y_pred[:, 1]
fpr, fnr, _ = det_curve(
y, y_pred, sample_weight=sample_weight, pos_label=pos_label,
)
assert_allclose(viz.fpr, fpr)
assert_allclose(viz.fnr, fnr)
assert viz.estimator_name == "LogisticRegression"
# cannot fail thanks to pyplot fixture
import matplotlib as mpl # noqal
assert isinstance(viz.line_, mpl.lines.Line2D)
assert viz.line_.get_alpha() == 0.8
assert isinstance(viz.ax_, mpl.axes.Axes)
assert isinstance(viz.figure_, mpl.figure.Figure)
assert viz.line_.get_label() == "LogisticRegression"
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = (
f"False Negative Rate (Positive label: {expected_pos_label})"
)
expected_xlabel = (
f"False Positive Rate (Positive label: {expected_pos_label})"
)
assert viz.ax_.get_ylabel() == expected_ylabel
assert viz.ax_.get_xlabel() == expected_xlabel
| [
"sklearn.datasets.load_iris",
"sklearn.metrics.plot_det_curve",
"numpy.testing.assert_allclose",
"sklearn.linear_model.LogisticRegression",
"pytest.mark.parametrize",
"numpy.array",
"numpy.random.RandomState",
"pytest.fixture",
"sklearn.metrics.det_curve"
] | [((252, 282), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (266, 282), False, 'import pytest\n'), ((336, 366), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (350, 366), False, 'import pytest\n'), ((439, 525), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""response_method"""', "['predict_proba', 'decision_function']"], {}), "('response_method', ['predict_proba',\n 'decision_function'])\n", (462, 525), False, 'import pytest\n'), ((529, 589), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_sample_weight"""', '[True, False]'], {}), "('with_sample_weight', [True, False])\n", (552, 589), False, 'import pytest\n'), ((591, 645), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_strings"""', '[True, False]'], {}), "('with_strings', [True, False])\n", (614, 645), False, 'import pytest\n'), ((306, 332), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (315, 332), False, 'from sklearn.datasets import load_iris\n'), ((1069, 1089), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1087, 1089), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1118, 1182), 'sklearn.metrics.plot_det_curve', 'plot_det_curve', (['lr', 'X', 'y'], {'alpha': '(0.8)', 'sample_weight': 'sample_weight'}), '(lr, X, y, alpha=0.8, sample_weight=sample_weight)\n', (1132, 1182), False, 'from sklearn.metrics import plot_det_curve\n'), ((1318, 1388), 'sklearn.metrics.det_curve', 'det_curve', (['y', 'y_pred'], {'sample_weight': 'sample_weight', 'pos_label': 'pos_label'}), '(y, y_pred, sample_weight=sample_weight, pos_label=pos_label)\n', (1327, 1388), False, 'from sklearn.metrics import det_curve\n'), ((1409, 1438), 'numpy.testing.assert_allclose', 'assert_allclose', (['viz.fpr', 'fpr'], {}), '(viz.fpr, fpr)\n', (1424, 1438), False, 'from numpy.testing import assert_allclose\n'), ((1443, 1472), 'numpy.testing.assert_allclose', 'assert_allclose', (['viz.fnr', 'fnr'], {}), '(viz.fnr, fnr)\n', (1458, 1472), False, 'from numpy.testing import assert_allclose\n'), ((933, 958), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (954, 958), True, 'import numpy as np\n'), ((843, 863), 'numpy.array', 'np.array', (["['c', 'b']"], {}), "(['c', 'b'])\n", (851, 863), True, 'import numpy as np\n')] |
import cv2
import myface.face as face
import myface.utils.utils as utils
from myface.classes.test import Face_test
import numpy as np
MODEL_PATH = '../model/'
# openCv video capture : webcam or video
video_capture = cv2.VideoCapture(0)
# video_capture = cv2.VideoCapture('/Users/zane/Movies/video/ET/ET.mp4')
# Load Trained model
model_name = 'A1'
trained_model = utils.load_model(MODEL_PATH, model_name)
Test = Face_test(trained_model)
PROCESS_FRAME_RATE = 2
SCALE_FRAME = 2
frame_cnt = 0
encoded_faces = []
recognize_result = {}
TOLERANCE = 0.55
while True:
ret, frame = video_capture.read()
if frame_cnt == 0:
# begin process frame
small_frame = cv2.resize(
frame, (0, 0), fx=1 / SCALE_FRAME, fy=1 / SCALE_FRAME)
# find all faces and encode
detect_result = face.detect_face_and_encode(small_frame)
encoded_faces = detect_result['encoded_faces']
# recognize all faces
recognize_result = Test.predict_with_encode_faces(
encoded_faces, TOLERANCE)
# print out recognized result
# print('!!!',recognize_result)
# count the frames
frame_cnt = frame_cnt + 1 if frame_cnt < PROCESS_FRAME_RATE - 1 else 0
# display the results
for rect, name in zip(detect_result['detected_faces'], recognize_result):
top = rect.top() * SCALE_FRAME
bottom = rect.bottom() * SCALE_FRAME
left = rect.left() * SCALE_FRAME
right = rect.right() * SCALE_FRAME
label = ''
if name['posibility'].size:
target_index = np.argmin(name['posibility'])
target_label = name['label'][target_index]
target_distance = name['posibility'][target_index]
label = str(target_label) + ' : ' + str(target_distance.round(2))
else:
label = 'Unknown'
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom),
(right, bottom + 35), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, label, (left + 6, bottom + 25),
font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"myface.face.detect_face_and_encode",
"myface.utils.utils.load_model",
"cv2.imshow",
"myface.classes.test.Face_test",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"numpy.argmin",
"cv2.resize",
"cv2.waitKey"
] | [((218, 237), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (234, 237), False, 'import cv2\n'), ((367, 407), 'myface.utils.utils.load_model', 'utils.load_model', (['MODEL_PATH', 'model_name'], {}), '(MODEL_PATH, model_name)\n', (383, 407), True, 'import myface.utils.utils as utils\n'), ((416, 440), 'myface.classes.test.Face_test', 'Face_test', (['trained_model'], {}), '(trained_model)\n', (425, 440), False, 'from myface.classes.test import Face_test\n'), ((2492, 2515), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2513, 2515), False, 'import cv2\n'), ((2313, 2339), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (2323, 2339), False, 'import cv2\n'), ((682, 747), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(1 / SCALE_FRAME)', 'fy': '(1 / SCALE_FRAME)'}), '(frame, (0, 0), fx=1 / SCALE_FRAME, fy=1 / SCALE_FRAME)\n', (692, 747), False, 'import cv2\n'), ((822, 862), 'myface.face.detect_face_and_encode', 'face.detect_face_and_encode', (['small_frame'], {}), '(small_frame)\n', (849, 862), True, 'import myface.face as face\n'), ((1893, 1959), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (1906, 1959), False, 'import cv2\n'), ((2019, 2107), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, bottom)', '(right, bottom + 35)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left, bottom), (right, bottom + 35), (0, 0, 255), cv2\n .FILLED)\n', (2032, 2107), False, 'import cv2\n'), ((2172, 2258), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(left + 6, bottom + 25)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, label, (left + 6, bottom + 25), font, 1.0, (255, 255, \n 255), 1)\n', (2183, 2258), False, 'import cv2\n'), ((1578, 1607), 'numpy.argmin', 'np.argmin', (["name['posibility']"], {}), "(name['posibility'])\n", (1587, 1607), True, 'import numpy as np\n'), ((2387, 2401), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2398, 2401), False, 'import cv2\n')] |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from common.python import session
from common.python.federation import roles_to_parties
from common.python.utils import log_utils
from kernel.base.statics import MultivariateStatisticalSummary
from kernel.components.binning.core import binning_util
from kernel.components.binning.core.base_binning import Binning
from kernel.components.binning.horzfeaturebinning.param import HorzFeatureBinningParam
from kernel.transfer.framework import weights
from kernel.transfer.variables.transfer_class.horz_feature_binning_transfer_variable import HorzBinningTransferVariable
from kernel.utils import consts
LOGGER = log_utils.get_logger()
class SplitPointNode(object):
def __init__(self, value, min_value, max_value, aim_rank=None, allow_error_rank=0, last_rank=-1):
self.value = value
self.min_value = min_value
self.max_value = max_value
self.aim_rank = aim_rank
self.allow_error_rank = allow_error_rank
self.last_rank = last_rank
self.fixed = False
def set_aim_rank(self, rank):
self.aim_rank = rank
def create_right_new(self):
value = (self.value + self.max_value) / 2
if np.fabs(value - self.value) <= consts.FLOAT_ZERO * 0.1:
self.fixed = True
return self
min_value = self.value
return SplitPointNode(value, min_value, self.max_value, self.aim_rank, self.allow_error_rank)
def create_left_new(self):
value = (self.value + self.min_value) / 2
if np.fabs(value - self.value) <= consts.FLOAT_ZERO * 0.1:
self.fixed = True
return self
max_value = self.max_value
return SplitPointNode(value, self.min_value, max_value, self.aim_rank, self.allow_error_rank)
class RankArray(object):
def __init__(self, rank_array, error_rank, last_rank_array=None):
self.rank_array = rank_array
self.last_rank_array = last_rank_array
self.error_rank = error_rank
self.all_fix = False
self.fixed_array = np.zeros(len(self.rank_array), dtype=bool)
self._compare()
def _compare(self):
if self.last_rank_array is None:
return
else:
self.fixed_array = abs(self.rank_array - self.last_rank_array) < self.error_rank
assert isinstance(self.fixed_array, np.ndarray)
if (self.fixed_array == True).all():
self.all_fix = True
def __iadd__(self, other: 'RankArray'):
for idx, is_fixed in enumerate(self.fixed_array):
if not is_fixed:
self.rank_array[idx] += other.rank_array[idx]
self._compare()
return self
def __add__(self, other: 'RankArray'):
res_array = []
for idx, is_fixed in enumerate(self.fixed_array):
if not is_fixed:
res_array.append(self.rank_array[idx] + other.rank_array[idx])
else:
res_array.append(self.rank_array[idx])
return RankArray(np.array(res_array), self.error_rank, self.last_rank_array)
class Server(Binning):
def fit_split_points(self, data_instances):
pass
def __init__(self, params=None, abnormal_list=None):
super().__init__(params, abnormal_list)
self.aggregator = None
self.transfer_variable = HorzBinningTransferVariable()
self.suffix = None
def set_suffix(self, suffix):
self.suffix = suffix
def set_transfer_variable(self, variable):
self.transfer_variable = variable
def set_aggregator(self, aggregator):
self.aggregator = aggregator
def get_total_count(self):
total_count = self.aggregator.sum_model(suffix=(self.suffix, 'total_count'))
LOGGER.debug(f'total_count={total_count}')
self.aggregator.send_aggregated_model(total_count, suffix=(self.suffix, 'total_count'))
return total_count
def get_missing_count(self):
missing_count = self.aggregator.sum_model(suffix=(self.suffix, 'missing_count'))
self.aggregator.send_aggregated_model(missing_count, suffix=(self.suffix, 'missing_count'))
return missing_count
def get_min_max(self, clients=None):
if clients is not None:
members = roles_to_parties(list(clients))
else:
members = roles_to_parties([consts.PROMOTER, consts.PROVIDER])
LOGGER.debug(f'member_id={members}')
local_values = self.transfer_variable.local_static_values.get_parties(parties=members,
suffix=(self.suffix, "min-max"))
LOGGER.debug(f'local_min_max={local_values}')
max_array, min_array = [], []
for local_max, local_min in local_values:
max_array.append(local_max)
min_array.append(local_min)
max_values = np.max(max_array, axis=0)
min_values = np.min(min_array, axis=0)
self.transfer_variable.global_static_values.remote_parties((max_values, min_values), parties=members,
suffix=(self.suffix, "min-max"))
return min_values, max_values
def query_values(self):
rank_weight = self.aggregator.aggregate_tables(suffix=(self.suffix, 'rank'))
self.aggregator.send_aggregated_tables(rank_weight, suffix=(self.suffix, 'rank'))
def calc_event(self):
merge_event = self.aggregator.add_tables(suffix=(self.suffix, 'event_count'))
self.aggregator.send_aggregated_tables(merge_event, suffix=(self.suffix, 'event_count'))
class Client(Binning):
def fit_split_points(self, data_instances):
pass
def __init__(self, params: HorzFeatureBinningParam = None, abnormal_list=None):
super().__init__(params, abnormal_list)
self.aggregator = None
self.transfer_variable = HorzBinningTransferVariable()
self.max_values, self.min_values = None, None
self.suffix = None
self.total_count = 0
def aggregator_bin_counts(self, result_counts):
transform_result_counts = []
for feature, result_count in result_counts.items():
counts = []
for result in result_count:
counts.extend(result)
transform_result_counts.append((feature, np.array(counts)))
LOGGER.info(f'result_counts_tables={transform_result_counts}')
result_count_tables = session.parallelize(transform_result_counts, partition=10, include_key=True,
need_send=True)
LOGGER.info(f'result_count_tables={result_count_tables.first()}')
self.aggregator.send_table(result_count_tables, suffix=(self.suffix, 'event_count'))
merge_result_counts_tables = self.aggregator.get_aggregated_table(suffix=(self.suffix, 'event_count'))
LOGGER.info(f'merge_result_counts_tables={list(merge_result_counts_tables.collect())}')
new_result_counts = {}
for value in list(merge_result_counts_tables.collect()):
feature_name = value[0]
all_event_counts = value[1].tolist()
all_event_counts = [round(x) for x in all_event_counts]
LOGGER.debug(f'feature_name={feature_name},all_event_counts={all_event_counts}')
new_result_counts[feature_name] = [all_event_counts[i: i + 2] for i in range(0, len(all_event_counts), 2)]
LOGGER.info(f'result_counts={result_counts}, new_result_counts={new_result_counts}')
return new_result_counts
def set_suffix(self, suffix):
self.suffix = suffix
def set_transfer_variable(self, variable):
self.transfer_variable = variable
def set_aggregator(self, aggregator):
self.aggregator = aggregator
def get_total_count(self, data_inst):
count = data_inst.count()
count_weight = weights.NumericWeights(count)
self.aggregator.send_model(count_weight, suffix=(self.suffix, 'total_count'))
total_count = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'total_count')).unboxed
return total_count
def get_missing_count(self, summary_table):
missing_table = summary_table.mapValues(lambda x: x.missing_count)
missing_value_counts = dict(missing_table.collect())
LOGGER.info(f'missing_value_counts={missing_value_counts}')
missing_weight = weights.DictWeights(missing_value_counts)
self.aggregator.send_model(missing_weight, suffix=(self.suffix, 'missing_count'))
missing_counts = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'missing_count')).unboxed
return missing_counts
def get_min_max(self, data_inst):
"""
Get max and min value of each selected columns
Returns:
max_values, min_values: dict
eg. {"x1": 10, "x2": 3, ... }
"""
if self.max_values and self.min_values:
return self.max_values, self.min_values
statistic_obj = MultivariateStatisticalSummary(data_inst,
cols_index=self.bin_inner_param.bin_indexes,
abnormal_list=self.abnormal_list)
max_values = statistic_obj.get_max()
min_values = statistic_obj.get_min()
max_list = [max_values[x] for x in self.bin_inner_param.bin_names]
min_list = [min_values[x] for x in self.bin_inner_param.bin_names]
local_min_max_values = (max_list, min_list)
self.transfer_variable.local_static_values.remote(local_min_max_values,
suffix=(self.suffix, "min-max"))
self.max_values, self.min_values = self.transfer_variable.global_static_values.get(
idx=0, suffix=(self.suffix, "min-max"))
return self.max_values, self.min_values
def query_values(self, summary_table, query_points):
local_ranks = summary_table.join(query_points, binning_util.query_table)
LOGGER.debug(f'local_ranks={local_ranks.first()}')
self.aggregator.send_table(local_ranks, suffix=(self.suffix, 'rank'))
global_rank = self.aggregator.get_aggregated_table(suffix=(self.suffix, 'rank'))
global_rank = global_rank.mapValues(lambda x: np.array(x, dtype=int))
return global_rank
| [
"kernel.transfer.variables.transfer_class.horz_feature_binning_transfer_variable.HorzBinningTransferVariable",
"numpy.fabs",
"kernel.base.statics.MultivariateStatisticalSummary",
"kernel.transfer.framework.weights.DictWeights",
"numpy.max",
"numpy.array",
"kernel.transfer.framework.weights.NumericWeight... | [((1830, 1852), 'common.python.utils.log_utils.get_logger', 'log_utils.get_logger', ([], {}), '()\n', (1850, 1852), False, 'from common.python.utils import log_utils\n'), ((4533, 4562), 'kernel.transfer.variables.transfer_class.horz_feature_binning_transfer_variable.HorzBinningTransferVariable', 'HorzBinningTransferVariable', ([], {}), '()\n', (4560, 4562), False, 'from kernel.transfer.variables.transfer_class.horz_feature_binning_transfer_variable import HorzBinningTransferVariable\n'), ((6078, 6103), 'numpy.max', 'np.max', (['max_array'], {'axis': '(0)'}), '(max_array, axis=0)\n', (6084, 6103), True, 'import numpy as np\n'), ((6125, 6150), 'numpy.min', 'np.min', (['min_array'], {'axis': '(0)'}), '(min_array, axis=0)\n', (6131, 6150), True, 'import numpy as np\n'), ((7096, 7125), 'kernel.transfer.variables.transfer_class.horz_feature_binning_transfer_variable.HorzBinningTransferVariable', 'HorzBinningTransferVariable', ([], {}), '()\n', (7123, 7125), False, 'from kernel.transfer.variables.transfer_class.horz_feature_binning_transfer_variable import HorzBinningTransferVariable\n'), ((7661, 7757), 'common.python.session.parallelize', 'session.parallelize', (['transform_result_counts'], {'partition': '(10)', 'include_key': '(True)', 'need_send': '(True)'}), '(transform_result_counts, partition=10, include_key=True,\n need_send=True)\n', (7680, 7757), False, 'from common.python import session\n'), ((9099, 9128), 'kernel.transfer.framework.weights.NumericWeights', 'weights.NumericWeights', (['count'], {}), '(count)\n', (9121, 9128), False, 'from kernel.transfer.framework import weights\n'), ((9624, 9665), 'kernel.transfer.framework.weights.DictWeights', 'weights.DictWeights', (['missing_value_counts'], {}), '(missing_value_counts)\n', (9643, 9665), False, 'from kernel.transfer.framework import weights\n'), ((10239, 10364), 'kernel.base.statics.MultivariateStatisticalSummary', 'MultivariateStatisticalSummary', (['data_inst'], {'cols_index': 'self.bin_inner_param.bin_indexes', 'abnormal_list': 'self.abnormal_list'}), '(data_inst, cols_index=self.bin_inner_param.\n bin_indexes, abnormal_list=self.abnormal_list)\n', (10269, 10364), False, 'from kernel.base.statics import MultivariateStatisticalSummary\n'), ((2386, 2413), 'numpy.fabs', 'np.fabs', (['(value - self.value)'], {}), '(value - self.value)\n', (2393, 2413), True, 'import numpy as np\n'), ((2722, 2749), 'numpy.fabs', 'np.fabs', (['(value - self.value)'], {}), '(value - self.value)\n', (2729, 2749), True, 'import numpy as np\n'), ((4216, 4235), 'numpy.array', 'np.array', (['res_array'], {}), '(res_array)\n', (4224, 4235), True, 'import numpy as np\n'), ((5531, 5583), 'common.python.federation.roles_to_parties', 'roles_to_parties', (['[consts.PROMOTER, consts.PROVIDER]'], {}), '([consts.PROMOTER, consts.PROVIDER])\n', (5547, 5583), False, 'from common.python.federation import roles_to_parties\n'), ((11544, 11566), 'numpy.array', 'np.array', (['x'], {'dtype': 'int'}), '(x, dtype=int)\n', (11552, 11566), True, 'import numpy as np\n'), ((7541, 7557), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (7549, 7557), True, 'import numpy as np\n')] |
import sys
from timeit import default_timer
import numpy as np
import keras
class EvaluateInputTensor(keras.callbacks.Callback):
""" Validate a model which does not expect external numpy data during training.
Keras does not expect external numpy data at training time, and thus cannot
accept numpy arrays for validation when all of a Keras Model's
`Input(input_tensor)` layers are provided an `input_tensor` parameter,
and the call to `Model.compile(target_tensors)` defines all `target_tensors`
Instead, create a second model configured with input tensors for validation
and add it to the `EvaluateInputTensor` callback to perform validation.
It is recommended that this callback be the first in the list of callbacks
because it defines the validation variables required by many other callbacks,
and Callbacks are made in order.
#TODO(ahundt) replace when https://github.com/keras-team/keras/pull/9105 is resolved
# Arguments
model: Keras model on which to call model.evaluate().
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
"""
def __init__(self, model, steps, metrics_prefix='val', verbose=1):
# parameter of callbacks passed during initialization
# pass evalation mode directly
super(EvaluateInputTensor, self).__init__()
self.val_model = model
self.num_steps = steps
self.verbose = verbose
self.metrics_prefix = metrics_prefix
def on_epoch_end(self, epoch, logs={}):
self.val_model.set_weights(self.model.get_weights())
results = self.val_model.evaluate(None, None, steps=int(self.num_steps),
verbose=self.verbose)
metrics_str = '\n'
for result, name in zip(results, self.val_model.metrics_names):
metric_name = self.metrics_prefix + '_' + name
logs[metric_name] = result
if self.verbose > 0:
metrics_str = metrics_str + metric_name + ': ' + str(result) + ' '
if self.verbose > 0:
print(metrics_str)
class EvaluateInputGenerator(keras.callbacks.Callback):
""" Validate a model which does not expect external numpy data during training.
Keras does not expect external numpy data at training time, and thus cannot
accept numpy arrays for validation when all of a Keras Model's
`Input(input_tensor)` layers are provided an `input_tensor` parameter,
and the call to `Model.compile(target_tensors)` defines all `target_tensors`
Instead, create a second model configured with input tensors for validation
and add it to the `EvaluateInputTensor` callback to perform validation.
It is recommended that this callback be the first in the list of callbacks
because it defines the validation variables required by many other callbacks,
and Callbacks are made in order.
#TODO(ahundt) replace when https://github.com/keras-team/keras/pull/9105 is available
# Arguments
model: Keras model on which to call model.evaluate().
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
"""
def __init__(self, generator, steps, metrics_prefix='test', verbose=1):
# parameter of callbacks passed during initialization
# pass evalation mode directly
super(EvaluateInputGenerator, self).__init__()
self.generator = generator
self.num_steps = steps
self.verbose = verbose
self.metrics_prefix = metrics_prefix
def on_epoch_end(self, epoch, logs={}):
results = self.model.evaluate_generator(self.generator, steps=int(self.num_steps))
metrics_str = '\n'
for result, name in zip(results, self.model.metrics_names):
metric_name = self.metrics_prefix + '_' + name
logs[metric_name] = result
if self.verbose > 0:
metrics_str = metrics_str + metric_name + ': ' + str(result) + ' '
if self.verbose > 0:
print(metrics_str)
class PrintLogsCallback(keras.callbacks.Callback):
""" Prints the log data at the end of each epoch.
"""
def on_epoch_end(self, epoch, logs={}):
print('')
print('logs: ' + str(logs))
class FineTuningCallback(keras.callbacks.Callback):
""" Switch to fine tuning mode at the specified epoch
Unlocks layers to make them trainable and resets the learning rate
to a new initial value.
# TODO(ahundt) update when https://github.com/keras-team/keras/issues/9477 is resolved.
# Arguments
epoch: The epoch at which to enable fine tuning
layers: Integer for the min index in model.layers which will
have trainable set to True, along with all layers after it.
learning_rate: The new fine tuning learning rate to reset to.
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, epoch=100, layers=0, learning_rate=0.0001, verbose=1, output_file=sys.stderr):
super(FineTuningCallback, self).__init__()
self.epoch = epoch
self.layers = layers
self.learning_rate = learning_rate
self.verbose = verbose
self.output_file = output_file
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = keras.backend.get_value(self.model.optimizer.lr)
# fine_tuning = epoch >= self.epoch
# logs['fine_tuning'] = fine_tuning
if epoch == self.epoch:
if self.verbose > 0:
print('\n\n--------------------------------------------------------\n'
'Epoch %05d Fine tuning initialized with a new '
'learning rate of %s.' % (epoch + 1, self.learning_rate))
for layer in self.model.layers[self.layers:]:
layer.trainable = True
self.model.compile(self.model.optimizer, self.model.loss, self.model.metrics)
if self.verbose > 1:
print('\n\nepoch:' + str(epoch) + ' self.epoch: ' + str(self.epoch) + ' lr: ' + str(logs['lr']) +
' self.learning_rate: ' + str(self.learning_rate) + ' float(K.get_value(self.model.optimizer.lr)): ' +
str(float(keras.backend.get_value(self.model.optimizer.lr))) + ' what is going on?0\n\n')
keras.backend.set_value(self.model.optimizer.lr, self.learning_rate)
if self.verbose > 1:
print('\n\nepoch:' + str(epoch) + ' self.epoch: ' + str(self.epoch) + ' lr: ' + str(logs['lr']) +
' self.learning_rate: ' + str(self.learning_rate) + ' float(K.get_value(self.model.optimizer.lr)): ' +
str(float(keras.backend.get_value(self.model.optimizer.lr))) + ' what is going on?1\n\n')
class SlowModelStopping(keras.callbacks.Callback):
""" Stop a model from training if it runs too slowly.
"""
def __init__(
self,
min_batch_patience=30,
max_batch_time_seconds=1,
max_epoch_time_seconds=7000,
max_epoch_to_check_batch_time=2,
verbose=0):
self._min_batch_patience = min_batch_patience
self._max_batch_time_seconds = max_batch_time_seconds
self._max_epoch_time_seconds = max_epoch_time_seconds
self._epoch_elapsed = 0
self.verbose = verbose
self.stopped_epoch = 0
self.current_epoch = 0
self.max_epoch_to_check_batch_time = max_epoch_to_check_batch_time
# start timers on init just in case of
# atypical situations like 0 training steps
self._epoch_start = default_timer()
self._batch_start = default_timer()
self._batch_elapsed = []
def on_epoch_begin(self, epoch, logs=None):
self._epoch_start = default_timer()
self.current_epoch = epoch
def on_batch_begin(self, batch, logs=None):
self._batch_start = default_timer()
self._batch_elapsed = []
def on_batch_end(self, batch, logs=None):
batch_elapsed = default_timer() - self._batch_start
self._batch_elapsed.append(batch_elapsed)
if(self._min_batch_patience is not None and
self._max_batch_time_seconds is not None and
self.current_epoch < self.max_epoch_to_check_batch_time and
batch > self._min_batch_patience and
batch < self._min_batch_patience + 10):
mean_batch = np.mean(self._batch_elapsed)
if mean_batch > self._max_batch_time_seconds:
raise ValueError('Batches took too long: ' + str(mean_batch) +
' vs max allowed: ' + str(self._max_batch_time_seconds))
def on_epoch_end(self, epoch, logs=None):
logs = logs if logs is not None else {}
# Only log results if there are more than 0 train steps
if self._batch_elapsed:
self._epoch_elapsed = default_timer() - self._epoch_start
logs['epoch_elapsed'] = np.array(self._epoch_elapsed)
mean_batch = np.mean(self._batch_elapsed)
# keras will check for value.item(), so don't cast this to a float, leave it as a numpy array
logs['mean_batch_elapsed'] = mean_batch
if mean_batch > self._max_batch_time_seconds:
self.model.stop_training = True
self.stopped_epoch = epoch
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: slow model, stopping early' % (self.stopped_epoch + 1))
class InaccurateModelStopping(keras.callbacks.Callback):
""" Stop a model from training if it ends up on perverse solutions like always 0 or 1.
"""
def __init__(self, min_batch_patience=300, min_pred=0.05, max_pred=0.95, metric='mean_pred', verbose=0):
self._min_batch_patience = min_batch_patience
self._max_pred = max_pred
self._min_pred = min_pred
self._epoch_elapsed = 0
self.verbose = verbose
self.stopped_epoch = 0
self.metric = metric
self.metric_values = []
def on_batch_end(self, batch, logs=None):
if self.metric in logs:
value = logs[self.metric]
self.metric_values += [value]
if(self._min_batch_patience is not None and
self._max_pred is not None and
batch > self._min_batch_patience):
value = np.mean(self.metric_values)
if value > self._max_pred or value < self._min_pred:
raise ValueError(str(self.metric) + ' was inaccurate: ' + str(value) +
' vs allowed range [ ' + str(self._min_pred) +
', ' + str(self._max_pred) + ']')
def on_epoch_end(self, epoch, logs=None):
self.metric_values = []
| [
"numpy.mean",
"keras.backend.get_value",
"timeit.default_timer",
"keras.backend.set_value",
"numpy.array"
] | [((5588, 5636), 'keras.backend.get_value', 'keras.backend.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (5611, 5636), False, 'import keras\n'), ((7893, 7908), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (7906, 7908), False, 'from timeit import default_timer\n'), ((7937, 7952), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (7950, 7952), False, 'from timeit import default_timer\n'), ((8063, 8078), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (8076, 8078), False, 'from timeit import default_timer\n'), ((8191, 8206), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (8204, 8206), False, 'from timeit import default_timer\n'), ((6611, 6679), 'keras.backend.set_value', 'keras.backend.set_value', (['self.model.optimizer.lr', 'self.learning_rate'], {}), '(self.model.optimizer.lr, self.learning_rate)\n', (6634, 6679), False, 'import keras\n'), ((8311, 8326), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (8324, 8326), False, 'from timeit import default_timer\n'), ((8701, 8729), 'numpy.mean', 'np.mean', (['self._batch_elapsed'], {}), '(self._batch_elapsed)\n', (8708, 8729), True, 'import numpy as np\n'), ((9254, 9283), 'numpy.array', 'np.array', (['self._epoch_elapsed'], {}), '(self._epoch_elapsed)\n', (9262, 9283), True, 'import numpy as np\n'), ((9309, 9337), 'numpy.mean', 'np.mean', (['self._batch_elapsed'], {}), '(self._batch_elapsed)\n', (9316, 9337), True, 'import numpy as np\n'), ((9182, 9197), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (9195, 9197), False, 'from timeit import default_timer\n'), ((10708, 10735), 'numpy.mean', 'np.mean', (['self.metric_values'], {}), '(self.metric_values)\n', (10715, 10735), True, 'import numpy as np\n'), ((6969, 7017), 'keras.backend.get_value', 'keras.backend.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (6992, 7017), False, 'import keras\n'), ((6519, 6567), 'keras.backend.get_value', 'keras.backend.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (6542, 6567), False, 'import keras\n')] |
import streamlit as st
from PIL import Image
import pandas as pd
import numpy as np
import pickle
import plotly.express as px
import shap
st.set_page_config(layout="wide")
### Functions ###
COLOR_BR_r = ['#00CC96', '#e03838']
COLOR_BR =['#e03838', '#00CC96']
@st.cache
def histogram(df, x='str', legend=True, client=None):
'''client = [df_test, input_client] '''
if x == "TARGET":
fig = px.histogram(df,
x=x,
color="TARGET",
width=300,
height=300,
category_orders={"TARGET": [1, 0]},
color_discrete_sequence=COLOR_BR,
marginal='box')
fig.update_xaxes(showticklabels=False)
fig.update_layout(margin=dict(l=10, r=10, t=10, b=10))
else:
fig = px.histogram(df,
x=x,
color="TARGET",
width=300,
height=250,
category_orders={"TARGET": [1, 0]},
color_discrete_sequence=COLOR_BR,
barmode="group",
histnorm='percent',
marginal='box')
fig.update_layout(margin=dict(l=10, r=10, t=10, b=10))
if legend == True:
fig.update_layout(legend=dict(yanchor="top",xanchor="right"))
else:
fig.update_layout(showlegend=False)
if client:
client_data = client[0][client[0].SK_ID_CURR == client[1]]
vline = client_data[x].to_numpy()[0]
fig.add_vline(x=vline, line_width=3, line_dash="dash", line_color="black")
return fig
### Data ###
train = pd.read_csv('Datasets/reduced_train.csv')
test = pd.read_csv('Datasets/reduced_test.csv')
test_ID = test['SK_ID_CURR']
test_features = test.drop(columns=['SK_ID_CURR'])
with open('Model/Final_Model.pkl', 'rb') as file:
Final_Model = pickle.load(file)
### Title principal + input client ###
col1, col2 = st.columns((252,1024))
symbol = Image.open('Images/Symbol.png')
hcg = Image.open('Images/Home Credit Group.png')
col1.image(symbol, use_column_width=True)
col2.image(hcg, use_column_width=True)
st.write('''
# Client's Scoring
Machine learning model to predict how capable each applicant is
of repaying a loan from [**Home Credit Default Risk**]
(https://www.kaggle.com/c/home-credit-default-risk/data).
''')
st.markdown(':arrow_upper_left: Click on the left sidebar to adjust most important variables and improve the prediction score.')
st.write(' *** ')
col1, col2 = st.columns(2)
input_client = col1.selectbox('Select Client ID', test_ID)
### Prediction ###
data_for_prediction = test_features[test['SK_ID_CURR']==input_client]
y_prob = Final_Model.predict_proba(data_for_prediction)
y_prob = [y_prob.flatten()[0], y_prob.flatten()[1]]
fig = px.pie(values=y_prob, names=['Success', 'Failure '], color=[0,1], color_discrete_sequence=COLOR_BR_r,
width=230, height=230)
fig.update_layout(margin=dict(l=0, r=0, t=30, b=0))
# fig.update_layout(legend=dict(yanchor="top",xanchor="right"))
col2.plotly_chart(fig, use_container_width=True)
if y_prob[1] < y_prob[0]:
col2.subheader('**Successful payment probability.**')
else:
col2.subheader('**Failure payment probability.**')
### Summary plot SHAP Values ###
data_for_prediction_array = data_for_prediction.values.reshape(1, -1)
def st_shap(plot, height=None):
shap_html = f"<head>{shap.getjs()}</head><body>{plot.html()}</body>"
st.components.v1.html(shap_html, height=height)
shap.initjs()
explainer = shap.TreeExplainer(Final_Model)
st.set_option('deprecation.showPyplotGlobalUse', False)
shap_values = explainer.shap_values(test_features)
shap_sum = np.abs(shap_values[0]).mean(axis=0)
importance_df = pd.DataFrame([test_features.columns.tolist(), shap_sum.tolist()]).T
importance_df.columns = ['column_name', 'shap_importance']
importance_df = importance_df.sort_values('shap_importance', ascending=False)
most_important_var = importance_df['column_name'][0:3].tolist()
st.write(''' *** ''')
st.subheader('''**Summary plot with SHAP Values:**''')
st.pyplot(shap.summary_plot(shap_values[1], test_features, max_display=10))
st.write(''' *** ''')
st.subheader('''**Most important variables:**''')
for x in most_important_var:
st.plotly_chart(histogram(train, x=x, client=[test, input_client]), use_container_width=True)
st.write(''' *** ''')
st.subheader('''**Force plot with SHAP Values:**''')
shap_values = explainer.shap_values(data_for_prediction_array)
st_shap(shap.force_plot(explainer.expected_value[1], shap_values[1], data_for_prediction, plot_cmap=COLOR_BR_r))
st.write(''' *** ''')
### Sidebar ###
st.sidebar.write('# Adjustable parameters:')
def adjusted_variables():
var_1 = st.sidebar.slider(most_important_var[0], train[most_important_var[0]].min(), train[most_important_var[0]].max(), float(data_for_prediction[most_important_var[0]]))
var_2 = st.sidebar.slider(most_important_var[1], train[most_important_var[1]].min(), train[most_important_var[1]].max(), float(data_for_prediction[most_important_var[1]]))
var_3 = st.sidebar.slider(most_important_var[2], train[most_important_var[2]].min(), train[most_important_var[2]].max(), float(data_for_prediction[most_important_var[2]]))
#var_4 = st.sidebar.slider(most_important_var[3], train[most_important_var[3]].min(), train[most_important_var[3]].max(), float(data_for_prediction[most_important_var[3]]))
#var_5 = st.sidebar.slider(most_important_var[4], float(train[most_important_var[4]].min()), float(train[most_important_var[4]].max()), float(data_for_prediction[most_important_var[4]]))
dict = {most_important_var[0] : [var_1],
most_important_var[1] : [var_2],
most_important_var[2] : [var_3]}
#most_important_var[3] : [var_4],
#most_important_var[4] : [var_5]}
data_adjusted = data_for_prediction.copy()
for key,value in dict.items():
data_adjusted[key] = value
return data_adjusted
### Adjusted prediction ###
adj = adjusted_variables()
y_prob_adj = Final_Model.predict_proba(adj)
y_prob_adj = [y_prob_adj.flatten()[0], y_prob_adj.flatten()[1]]
st.sidebar.write(''' *** ''')
st.sidebar.write('# Result on predictions:')
fig = px.pie(values=y_prob_adj, names=['Success', 'Failure '], color=[0,1], color_discrete_sequence=COLOR_BR_r,
width=230, height=230)
fig.update_layout(margin=dict(l=0, r=0, t=30, b=0))
# fig.update_layout(legend=dict(yanchor="top",xanchor="right"))
st.sidebar.plotly_chart(fig, use_container_width=True)
if y_prob_adj[1] < y_prob_adj[0]:
st.sidebar.subheader('**Successful payment probability after adjusting variables.**')
else:
st.sidebar.subheader('**Failure payment probability after adjusting variables.**')
| [
"pandas.read_csv",
"shap.summary_plot",
"shap.force_plot",
"shap.TreeExplainer",
"streamlit.components.v1.html",
"plotly.express.pie",
"streamlit.sidebar.write",
"streamlit.set_page_config",
"streamlit.set_option",
"streamlit.columns",
"numpy.abs",
"streamlit.markdown",
"plotly.express.histo... | [((139, 172), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (157, 172), True, 'import streamlit as st\n'), ((1643, 1684), 'pandas.read_csv', 'pd.read_csv', (['"""Datasets/reduced_train.csv"""'], {}), "('Datasets/reduced_train.csv')\n", (1654, 1684), True, 'import pandas as pd\n'), ((1692, 1732), 'pandas.read_csv', 'pd.read_csv', (['"""Datasets/reduced_test.csv"""'], {}), "('Datasets/reduced_test.csv')\n", (1703, 1732), True, 'import pandas as pd\n'), ((1953, 1976), 'streamlit.columns', 'st.columns', (['(252, 1024)'], {}), '((252, 1024))\n', (1963, 1976), True, 'import streamlit as st\n'), ((1986, 2017), 'PIL.Image.open', 'Image.open', (['"""Images/Symbol.png"""'], {}), "('Images/Symbol.png')\n", (1996, 2017), False, 'from PIL import Image\n'), ((2024, 2066), 'PIL.Image.open', 'Image.open', (['"""Images/Home Credit Group.png"""'], {}), "('Images/Home Credit Group.png')\n", (2034, 2066), False, 'from PIL import Image\n'), ((2149, 2373), 'streamlit.write', 'st.write', (['"""\n# Client\'s Scoring\n\nMachine learning model to predict how capable each applicant is\nof repaying a loan from [**Home Credit Default Risk**]\n(https://www.kaggle.com/c/home-credit-default-risk/data).\n"""'], {}), '(\n """\n# Client\'s Scoring\n\nMachine learning model to predict how capable each applicant is\nof repaying a loan from [**Home Credit Default Risk**]\n(https://www.kaggle.com/c/home-credit-default-risk/data).\n"""\n )\n', (2157, 2373), True, 'import streamlit as st\n'), ((2365, 2503), 'streamlit.markdown', 'st.markdown', (['""":arrow_upper_left: Click on the left sidebar to adjust most important variables and improve the prediction score."""'], {}), "(\n ':arrow_upper_left: Click on the left sidebar to adjust most important variables and improve the prediction score.'\n )\n", (2376, 2503), True, 'import streamlit as st\n'), ((2495, 2512), 'streamlit.write', 'st.write', (['""" *** """'], {}), "(' *** ')\n", (2503, 2512), True, 'import streamlit as st\n'), ((2526, 2539), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (2536, 2539), True, 'import streamlit as st\n'), ((2806, 2935), 'plotly.express.pie', 'px.pie', ([], {'values': 'y_prob', 'names': "['Success', 'Failure ']", 'color': '[0, 1]', 'color_discrete_sequence': 'COLOR_BR_r', 'width': '(230)', 'height': '(230)'}), "(values=y_prob, names=['Success', 'Failure '], color=[0, 1],\n color_discrete_sequence=COLOR_BR_r, width=230, height=230)\n", (2812, 2935), True, 'import plotly.express as px\n'), ((3506, 3519), 'shap.initjs', 'shap.initjs', ([], {}), '()\n', (3517, 3519), False, 'import shap\n'), ((3532, 3563), 'shap.TreeExplainer', 'shap.TreeExplainer', (['Final_Model'], {}), '(Final_Model)\n', (3550, 3563), False, 'import shap\n'), ((3565, 3620), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (3578, 3620), True, 'import streamlit as st\n'), ((4008, 4025), 'streamlit.write', 'st.write', (['""" *** """'], {}), "(' *** ')\n", (4016, 4025), True, 'import streamlit as st\n'), ((4031, 4081), 'streamlit.subheader', 'st.subheader', (['"""**Summary plot with SHAP Values:**"""'], {}), "('**Summary plot with SHAP Values:**')\n", (4043, 4081), True, 'import streamlit as st\n'), ((4164, 4181), 'streamlit.write', 'st.write', (['""" *** """'], {}), "(' *** ')\n", (4172, 4181), True, 'import streamlit as st\n'), ((4187, 4232), 'streamlit.subheader', 'st.subheader', (['"""**Most important variables:**"""'], {}), "('**Most important variables:**')\n", (4199, 4232), True, 'import streamlit as st\n'), ((4366, 4383), 'streamlit.write', 'st.write', (['""" *** """'], {}), "(' *** ')\n", (4374, 4383), True, 'import streamlit as st\n'), ((4389, 4437), 'streamlit.subheader', 'st.subheader', (['"""**Force plot with SHAP Values:**"""'], {}), "('**Force plot with SHAP Values:**')\n", (4401, 4437), True, 'import streamlit as st\n'), ((4620, 4637), 'streamlit.write', 'st.write', (['""" *** """'], {}), "(' *** ')\n", (4628, 4637), True, 'import streamlit as st\n'), ((4660, 4704), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""# Adjustable parameters:"""'], {}), "('# Adjustable parameters:')\n", (4676, 4704), True, 'import streamlit as st\n'), ((6169, 6194), 'streamlit.sidebar.write', 'st.sidebar.write', (['""" *** """'], {}), "(' *** ')\n", (6185, 6194), True, 'import streamlit as st\n'), ((6200, 6244), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""# Result on predictions:"""'], {}), "('# Result on predictions:')\n", (6216, 6244), True, 'import streamlit as st\n'), ((6252, 6385), 'plotly.express.pie', 'px.pie', ([], {'values': 'y_prob_adj', 'names': "['Success', 'Failure ']", 'color': '[0, 1]', 'color_discrete_sequence': 'COLOR_BR_r', 'width': '(230)', 'height': '(230)'}), "(values=y_prob_adj, names=['Success', 'Failure '], color=[0, 1],\n color_discrete_sequence=COLOR_BR_r, width=230, height=230)\n", (6258, 6385), True, 'import plotly.express as px\n'), ((6497, 6551), 'streamlit.sidebar.plotly_chart', 'st.sidebar.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (6520, 6551), True, 'import streamlit as st\n'), ((1881, 1898), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1892, 1898), False, 'import pickle\n'), ((3457, 3504), 'streamlit.components.v1.html', 'st.components.v1.html', (['shap_html'], {'height': 'height'}), '(shap_html, height=height)\n', (3478, 3504), True, 'import streamlit as st\n'), ((4097, 4161), 'shap.summary_plot', 'shap.summary_plot', (['shap_values[1]', 'test_features'], {'max_display': '(10)'}), '(shap_values[1], test_features, max_display=10)\n', (4114, 4161), False, 'import shap\n'), ((4514, 4621), 'shap.force_plot', 'shap.force_plot', (['explainer.expected_value[1]', 'shap_values[1]', 'data_for_prediction'], {'plot_cmap': 'COLOR_BR_r'}), '(explainer.expected_value[1], shap_values[1],\n data_for_prediction, plot_cmap=COLOR_BR_r)\n', (4529, 4621), False, 'import shap\n'), ((6591, 6681), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""**Successful payment probability after adjusting variables.**"""'], {}), "(\n '**Successful payment probability after adjusting variables.**')\n", (6611, 6681), True, 'import streamlit as st\n'), ((6687, 6774), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""**Failure payment probability after adjusting variables.**"""'], {}), "(\n '**Failure payment probability after adjusting variables.**')\n", (6707, 6774), True, 'import streamlit as st\n'), ((407, 561), 'plotly.express.histogram', 'px.histogram', (['df'], {'x': 'x', 'color': '"""TARGET"""', 'width': '(300)', 'height': '(300)', 'category_orders': "{'TARGET': [1, 0]}", 'color_discrete_sequence': 'COLOR_BR', 'marginal': '"""box"""'}), "(df, x=x, color='TARGET', width=300, height=300,\n category_orders={'TARGET': [1, 0]}, color_discrete_sequence=COLOR_BR,\n marginal='box')\n", (419, 561), True, 'import plotly.express as px\n'), ((856, 1047), 'plotly.express.histogram', 'px.histogram', (['df'], {'x': 'x', 'color': '"""TARGET"""', 'width': '(300)', 'height': '(250)', 'category_orders': "{'TARGET': [1, 0]}", 'color_discrete_sequence': 'COLOR_BR', 'barmode': '"""group"""', 'histnorm': '"""percent"""', 'marginal': '"""box"""'}), "(df, x=x, color='TARGET', width=300, height=250,\n category_orders={'TARGET': [1, 0]}, color_discrete_sequence=COLOR_BR,\n barmode='group', histnorm='percent', marginal='box')\n", (868, 1047), True, 'import plotly.express as px\n'), ((3684, 3706), 'numpy.abs', 'np.abs', (['shap_values[0]'], {}), '(shap_values[0])\n', (3690, 3706), True, 'import numpy as np\n'), ((3405, 3417), 'shap.getjs', 'shap.getjs', ([], {}), '()\n', (3415, 3417), False, 'import shap\n')] |
from atom.api import Atom, List, ContainerList, Coerced, Delegator
import numpy as np
class TestClass(Atom):
l1 = ContainerList()
tc = TestClass()
tc.l1 = [1,23,3]
tc.l1 = np.array([]) | [
"numpy.array",
"atom.api.ContainerList"
] | [((182, 194), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (190, 194), True, 'import numpy as np\n'), ((119, 134), 'atom.api.ContainerList', 'ContainerList', ([], {}), '()\n', (132, 134), False, 'from atom.api import Atom, List, ContainerList, Coerced, Delegator\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# <NAME> — March 2014
"""
Qt adaptation of <NAME>'s tutorial to integrate Matplotlib
http://docs.enthought.com/traitsui/tutorials/traits_ui_scientific_app.html#extending-traitsui-adding-a-matplotlib-figure-to-our-application
based on Qt-based code shared by <NAME>, May 2012
http://markmail.org/message/z3hnoqruk56g2bje
adapted and tested to work with PySide from Anaconda in March 2014
"""
from pyface.qt import QtGui, QtCore
import matplotlib
# We want matplotlib to use a QT backend
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
from traits.api import Any, Instance
from traitsui.qt4.editor import Editor
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
class _MPLFigureEditor(Editor):
scrollable = True
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
""" Create the MPL canvas. """
# matplotlib commands to create a canvas
frame = QtGui.QWidget()
mpl_canvas = FigureCanvas(self.value)
mpl_canvas.setParent(frame)
mpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(mpl_canvas)
vbox.addWidget(mpl_toolbar)
frame.setLayout(vbox)
return frame
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
if __name__ == "__main__":
# Create a window to demo the editor
from traits.api import HasTraits, Int, Float, on_trait_change
from traitsui.api import View, Item
from numpy import sin, cos, linspace, pi
class Test(HasTraits):
figure = Instance(Figure, ())
n = Int(11)
a = Float(0.5)
view = View(Item('figure', editor=MPLFigureEditor(),
show_label=False),
Item('n'),
Item('a'),
width=400,
height=300,
resizable=True)
def __init__(self):
super(Test, self).__init__()
axes = self.figure.add_subplot(111)
self._t = linspace(0, 2*pi, 200)
self.plot()
@on_trait_change('n,a')
def plot(self):
t = self._t
a = self.a
n = self.n
axes = self.figure.axes[0]
if not axes.lines:
axes.plot(sin(t)*(1+a*cos(n*t)), cos(t)*(1+a*cos(n*t)))
else:
l = axes.lines[0]
l.set_xdata(sin(t)*(1+a*cos(n*t)))
l.set_ydata(cos(t)*(1+a*cos(n*t)))
canvas = self.figure.canvas
if canvas is not None:
canvas.draw()
t = Test()
t.configure_traits()
| [
"matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg",
"traits.api.Instance",
"traits.api.on_trait_change",
"matplotlib.use",
"pyface.qt.QtGui.QWidget",
"pyface.qt.QtGui.QVBoxLayout",
"numpy.sin",
"traitsui.api.Item",
"numpy.linspace",
"numpy.cos",
"traits.api.Int",
"matplotlib.backends.backe... | [((532, 556), 'matplotlib.use', 'matplotlib.use', (['"""Qt4Agg"""'], {}), "('Qt4Agg')\n", (546, 556), False, 'import matplotlib\n'), ((1225, 1240), 'pyface.qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (1238, 1240), False, 'from pyface.qt import QtGui, QtCore\n'), ((1261, 1285), 'matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.value'], {}), '(self.value)\n', (1273, 1285), True, 'from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n'), ((1342, 1381), 'matplotlib.backends.backend_qt4agg.NavigationToolbar2QT', 'NavigationToolbar2QT', (['mpl_canvas', 'frame'], {}), '(mpl_canvas, frame)\n', (1362, 1381), False, 'from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT\n'), ((1396, 1415), 'pyface.qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (1413, 1415), False, 'from pyface.qt import QtGui, QtCore\n'), ((1869, 1889), 'traits.api.Instance', 'Instance', (['Figure', '()'], {}), '(Figure, ())\n', (1877, 1889), False, 'from traits.api import Any, Instance\n'), ((1901, 1908), 'traits.api.Int', 'Int', (['(11)'], {}), '(11)\n', (1904, 1908), False, 'from traits.api import HasTraits, Int, Float, on_trait_change\n'), ((1920, 1930), 'traits.api.Float', 'Float', (['(0.5)'], {}), '(0.5)\n', (1925, 1930), False, 'from traits.api import HasTraits, Int, Float, on_trait_change\n'), ((2408, 2430), 'traits.api.on_trait_change', 'on_trait_change', (['"""n,a"""'], {}), "('n,a')\n", (2423, 2430), False, 'from traits.api import HasTraits, Int, Float, on_trait_change\n'), ((2061, 2070), 'traitsui.api.Item', 'Item', (['"""n"""'], {}), "('n')\n", (2065, 2070), False, 'from traitsui.api import View, Item\n'), ((2091, 2100), 'traitsui.api.Item', 'Item', (['"""a"""'], {}), "('a')\n", (2095, 2100), False, 'from traitsui.api import View, Item\n'), ((2346, 2370), 'numpy.linspace', 'linspace', (['(0)', '(2 * pi)', '(200)'], {}), '(0, 2 * pi, 200)\n', (2354, 2370), False, 'from numpy import sin, cos, linspace, pi\n'), ((2615, 2621), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (2618, 2621), False, 'from numpy import sin, cos, linspace, pi\n'), ((2638, 2644), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (2641, 2644), False, 'from numpy import sin, cos, linspace, pi\n'), ((2740, 2746), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (2743, 2746), False, 'from numpy import sin, cos, linspace, pi\n'), ((2791, 2797), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (2794, 2797), False, 'from numpy import sin, cos, linspace, pi\n'), ((2627, 2637), 'numpy.cos', 'cos', (['(n * t)'], {}), '(n * t)\n', (2630, 2637), False, 'from numpy import sin, cos, linspace, pi\n'), ((2650, 2660), 'numpy.cos', 'cos', (['(n * t)'], {}), '(n * t)\n', (2653, 2660), False, 'from numpy import sin, cos, linspace, pi\n'), ((2752, 2762), 'numpy.cos', 'cos', (['(n * t)'], {}), '(n * t)\n', (2755, 2762), False, 'from numpy import sin, cos, linspace, pi\n'), ((2803, 2813), 'numpy.cos', 'cos', (['(n * t)'], {}), '(n * t)\n', (2806, 2813), False, 'from numpy import sin, cos, linspace, pi\n')] |
"""High level coarsened grid function."""
import logging
log = logging.getLogger(__name__)
import os
import numpy as np
import resqpy.crs as rqc
import resqpy.grid as grr
import resqpy.model as rq
import resqpy.olio.fine_coarse as fc
import resqpy.olio.uuid as bu
import resqpy.olio.xml_et as rqet
import resqpy.property as rqp
from resqpy.derived_model._common import _write_grid
def coarsened_grid(epc_file,
source_grid,
fine_coarse,
inherit_properties = False,
inherit_realization = None,
inherit_all_realizations = False,
set_parent_window = None,
infill_missing_geometry = True,
new_grid_title = None,
new_epc_file = None):
"""Generates a coarsened version of an unsplit source grid, todo: optionally inheriting properties.
arguments:
epc_file (string): file name to rewrite the model's xml to; if source grid is None, model is loaded from this file
source_grid (grid.Grid object, optional): if None, the epc_file is loaded and it should contain one ijk grid object
(or one 'ROOT' grid) which is used as the source grid
fine_coarse (resqpy.olio.fine_coarse.FineCoarse object): the mapping between cells in the fine (source) and
coarse (output) grids
inherit_properties (boolean, default False): if True, the new grid will have a copy of any properties associated
with the source grid, with values upscaled or sampled
inherit_realization (int, optional): realization number for which properties will be inherited; ignored if
inherit_properties is False
inherit_all_realizations (boolean, default False): if True (and inherit_realization is None), properties for all
realizations will be inherited; if False, only properties with a realization of None are inherited; ignored if
inherit_properties is False or inherit_realization is not None
set_parent_window (boolean or str, optional): if True or 'parent', the coarsened grid has its parent window attribute
set; if False, the parent window is not set; if None, the default will be True if new_epc_file is None or False
otherwise; if 'grandparent' then an intervening parent window with no refinement or coarsening will be skipped
and its box used in the parent window for the new grid, relating directly to the original grid
infill_missing_geometry (boolean, default True): if True, an attempt is made to generate grid geometry in the
source grid wherever it is undefined; if False, any undefined geometry will result in an assertion failure
new_grid_title (string): used as the citation title text for the new grid object
new_epc_file (string, optional): if None, the source epc_file is extended with the new grid object; if present,
a new epc file (& associated h5 file) is created to contain the refined grid (& crs)
returns:
new grid object being the coarsened grid; the epc and hdf5 files are written to as an intentional side effect
note:
this function coarsens an entire grid; to coarsen a local area of a grid, first use the extract_box function
and then use this function on the extracted grid; in such a case, using a value of 'grandparent' for the
set_parent_window argument will relate the coarsened grid back to the original
"""
new_epc_file, model, source_grid = _establish_files_and_model(epc_file, new_epc_file, source_grid)
if set_parent_window is None:
set_parent_window = (new_epc_file is None)
assert fine_coarse is not None and isinstance(fine_coarse, fc.FineCoarse)
assert not source_grid.has_split_coordinate_lines, 'coarsening only available for unsplit grids: use other functions to heal faults first'
if infill_missing_geometry and (not source_grid.geometry_defined_for_all_cells() or
not source_grid.geometry_defined_for_all_pillars()):
log.debug('attempting infill of geometry missing in source grid')
source_grid.set_geometry_is_defined(treat_as_nan = None,
treat_dots_as_nan = True,
complete_partial_pillars = True,
nullify_partial_pillars = False,
complete_all = True)
assert source_grid.geometry_defined_for_all_pillars(), 'coarsening requires geometry to be defined for all pillars'
assert source_grid.geometry_defined_for_all_cells(), 'coarsening requires geometry to be defined for all cells'
assert not source_grid.k_gaps, 'coarsening of grids with k gaps not currently supported'
assert tuple(fine_coarse.fine_extent_kji) == tuple(source_grid.extent_kji), \
'fine_coarse mapping fine extent does not match that of source grid'
fine_coarse.assert_valid()
source_grid.cache_all_geometry_arrays()
source_points = source_grid.points_ref().reshape((source_grid.nk + 1), (source_grid.nj + 1) * (source_grid.ni + 1),
3)
# create a new, empty grid object
grid = grr.Grid(model)
# inherit attributes from source grid
grid.grid_representation = 'IjkGrid'
grid.extent_kji = fine_coarse.coarse_extent_kji
grid.nk, grid.nj, grid.ni = grid.extent_kji[0], grid.extent_kji[1], grid.extent_kji[2]
grid.k_direction_is_down = source_grid.k_direction_is_down
grid.grid_is_right_handed = source_grid.grid_is_right_handed
grid.pillar_shape = source_grid.pillar_shape
grid.has_split_coordinate_lines = False
grid.split_pillars_count = None
# inherit the coordinate reference system used by the grid geometry
grid.crs_uuid = source_grid.crs_uuid
if source_grid.model is not model:
model.duplicate_node(source_grid.model.root_for_uuid(grid.crs_uuid), add_as_part = True)
grid.crs = rqc.Crs(model, grid.crs_uuid)
coarsened_points = np.empty(
(grid.nk + 1, (grid.nj + 1) * (grid.ni + 1), 3)) # note: gets reshaped after being populated
k_ratio_constant = fine_coarse.constant_ratios[0]
if k_ratio_constant:
k_indices = None
else:
k_indices = np.empty(grid.nk + 1, dtype = int)
k_indices[0] = 0
for k in range(grid.nk):
k_indices[k + 1] = k_indices[k] + fine_coarse.vector_ratios[0][k]
assert k_indices[-1] == source_grid.nk
for cjp in range(grid.nj + 1):
for cji in range(grid.ni + 1):
natural_coarse_pillar = cjp * (grid.ni + 1) + cji
natural_fine_pillar = fine_coarse.fine_for_coarse_natural_pillar_index(natural_coarse_pillar)
if k_ratio_constant:
coarsened_points[:, natural_coarse_pillar, :] = source_points[0:source_grid.nk + 1:k_ratio_constant,
natural_fine_pillar, :]
else:
coarsened_points[:, natural_coarse_pillar, :] = source_points[k_indices, natural_fine_pillar, :]
grid.points_cached = coarsened_points.reshape(((grid.nk + 1), (grid.nj + 1), (grid.ni + 1), 3))
grid.geometry_defined_for_all_pillars_cached = True
grid.geometry_defined_for_all_cells_cached = True
grid.array_cell_geometry_is_defined = np.full(tuple(grid.extent_kji), True, dtype = bool)
collection = None
if inherit_properties:
source_collection = source_grid.extract_property_collection()
if source_collection is not None:
collection = rqp.GridPropertyCollection()
collection.set_grid(grid)
collection.extend_imported_list_copying_properties_from_other_grid_collection(
source_collection,
coarsening = fine_coarse,
realization = inherit_realization,
copy_all_realizations = inherit_all_realizations)
_set_parent_window_in_grid(set_parent_window, source_grid, grid, fine_coarse)
# write grid
if new_grid_title is None or len(new_grid_title) == 0:
new_grid_title = 'grid coarsened from ' + str(rqet.citation_title_for_node(source_grid.root))
model.h5_release()
if new_epc_file:
_write_grid(new_epc_file, grid, property_collection = collection, grid_title = new_grid_title, mode = 'w')
else:
ext_uuid, _ = model.h5_uuid_and_path_for_node(rqet.find_nested_tags(source_grid.root, ['Geometry', 'Points']),
'Coordinates')
_write_grid(epc_file,
grid,
ext_uuid = ext_uuid,
property_collection = collection,
grid_title = new_grid_title,
mode = 'a')
return grid
def _set_parent_window_in_grid(set_parent_window, source_grid, grid, fine_coarse):
if set_parent_window:
pw_grid_uuid = source_grid.uuid
if isinstance(set_parent_window, str):
if set_parent_window == 'grandparent':
assert fine_coarse.within_fine_box is None or (np.all(fine_coarse.within_fine_box[0] == 0) and
np.all(fine_coarse.within_fine_box[1]) == source_grid.extent_kji - 1), \
'attempt to set grandparent window for grid when parent window is present'
source_fine_coarse = source_grid.parent_window
if source_fine_coarse is not None and (source_fine_coarse.within_fine_box is not None or
source_fine_coarse.within_coarse_box is not None):
assert source_fine_coarse.fine_extent_kji == source_fine_coarse.coarse_extent_kji, 'parentage involves refinement or coarsening'
if source_fine_coarse.within_fine_box is not None:
fine_coarse.within_fine_box = source_fine_coarse.within_fine_box
else:
fine_coarse.within_fine_box = source_fine_coarse.within_coarse_box
pw_grid_uuid = bu.uuid_from_string(
rqet.find_nested_tags_text(source_grid.root, ['ParentWindow', 'ParentGrid', 'UUID']))
else:
assert set_parent_window == 'parent', 'set_parent_window value not recognized: ' + set_parent_window
grid.set_parent(pw_grid_uuid, False, fine_coarse)
def _establish_files_and_model(epc_file, new_epc_file, source_grid):
assert epc_file or new_epc_file, 'epc file name not specified'
if new_epc_file and epc_file and (
(new_epc_file == epc_file) or
(os.path.exists(new_epc_file) and os.path.exists(epc_file) and os.path.samefile(new_epc_file, epc_file))):
new_epc_file = None
assert epc_file or source_grid is not None, 'neither epc file name nor source grid supplied'
if epc_file:
model = rq.Model(epc_file)
if source_grid is None:
source_grid = model.grid() # requires there to be exactly one grid in model (or one named ROOT)
else:
model = source_grid.model
assert source_grid.grid_representation == 'IjkGrid'
assert model is not None
return new_epc_file, model, source_grid
| [
"logging.getLogger",
"resqpy.property.GridPropertyCollection",
"os.path.exists",
"os.path.samefile",
"resqpy.olio.xml_et.find_nested_tags_text",
"resqpy.model.Model",
"resqpy.grid.Grid",
"resqpy.crs.Crs",
"resqpy.olio.xml_et.citation_title_for_node",
"numpy.empty",
"numpy.all",
"resqpy.olio.xm... | [((65, 92), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (82, 92), False, 'import logging\n'), ((5320, 5335), 'resqpy.grid.Grid', 'grr.Grid', (['model'], {}), '(model)\n', (5328, 5335), True, 'import resqpy.grid as grr\n'), ((6084, 6113), 'resqpy.crs.Crs', 'rqc.Crs', (['model', 'grid.crs_uuid'], {}), '(model, grid.crs_uuid)\n', (6091, 6113), True, 'import resqpy.crs as rqc\n'), ((6138, 6195), 'numpy.empty', 'np.empty', (['(grid.nk + 1, (grid.nj + 1) * (grid.ni + 1), 3)'], {}), '((grid.nk + 1, (grid.nj + 1) * (grid.ni + 1), 3))\n', (6146, 6195), True, 'import numpy as np\n'), ((6385, 6417), 'numpy.empty', 'np.empty', (['(grid.nk + 1)'], {'dtype': 'int'}), '(grid.nk + 1, dtype=int)\n', (6393, 6417), True, 'import numpy as np\n'), ((8389, 8494), 'resqpy.derived_model._common._write_grid', '_write_grid', (['new_epc_file', 'grid'], {'property_collection': 'collection', 'grid_title': 'new_grid_title', 'mode': '"""w"""'}), "(new_epc_file, grid, property_collection=collection, grid_title=\n new_grid_title, mode='w')\n", (8400, 8494), False, 'from resqpy.derived_model._common import _write_grid\n'), ((8702, 8822), 'resqpy.derived_model._common._write_grid', '_write_grid', (['epc_file', 'grid'], {'ext_uuid': 'ext_uuid', 'property_collection': 'collection', 'grid_title': 'new_grid_title', 'mode': '"""a"""'}), "(epc_file, grid, ext_uuid=ext_uuid, property_collection=\n collection, grid_title=new_grid_title, mode='a')\n", (8713, 8822), False, 'from resqpy.derived_model._common import _write_grid\n'), ((11081, 11099), 'resqpy.model.Model', 'rq.Model', (['epc_file'], {}), '(epc_file)\n', (11089, 11099), True, 'import resqpy.model as rq\n'), ((7722, 7750), 'resqpy.property.GridPropertyCollection', 'rqp.GridPropertyCollection', ([], {}), '()\n', (7748, 7750), True, 'import resqpy.property as rqp\n'), ((8560, 8623), 'resqpy.olio.xml_et.find_nested_tags', 'rqet.find_nested_tags', (['source_grid.root', "['Geometry', 'Points']"], {}), "(source_grid.root, ['Geometry', 'Points'])\n", (8581, 8623), True, 'import resqpy.olio.xml_et as rqet\n'), ((8288, 8334), 'resqpy.olio.xml_et.citation_title_for_node', 'rqet.citation_title_for_node', (['source_grid.root'], {}), '(source_grid.root)\n', (8316, 8334), True, 'import resqpy.olio.xml_et as rqet\n'), ((10817, 10845), 'os.path.exists', 'os.path.exists', (['new_epc_file'], {}), '(new_epc_file)\n', (10831, 10845), False, 'import os\n'), ((10850, 10874), 'os.path.exists', 'os.path.exists', (['epc_file'], {}), '(epc_file)\n', (10864, 10874), False, 'import os\n'), ((10879, 10919), 'os.path.samefile', 'os.path.samefile', (['new_epc_file', 'epc_file'], {}), '(new_epc_file, epc_file)\n', (10895, 10919), False, 'import os\n'), ((9255, 9298), 'numpy.all', 'np.all', (['(fine_coarse.within_fine_box[0] == 0)'], {}), '(fine_coarse.within_fine_box[0] == 0)\n', (9261, 9298), True, 'import numpy as np\n'), ((10314, 10402), 'resqpy.olio.xml_et.find_nested_tags_text', 'rqet.find_nested_tags_text', (['source_grid.root', "['ParentWindow', 'ParentGrid', 'UUID']"], {}), "(source_grid.root, ['ParentWindow', 'ParentGrid',\n 'UUID'])\n", (10340, 10402), True, 'import resqpy.olio.xml_et as rqet\n'), ((9366, 9404), 'numpy.all', 'np.all', (['fine_coarse.within_fine_box[1]'], {}), '(fine_coarse.within_fine_box[1])\n', (9372, 9404), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import tensorflow_probability as tfp
from tensorflow.python.framework import ops
from tf_agents.policies import policy_step
from tf_agents.policies import tf_policy
from tf_agents.policies.q_policy import QPolicy
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.policies.random_py_policy import RandomPyPolicy
from tf_agents.specs import array_spec, tensor_spec
from tf_agents.utils import nest_utils
class DummyTradePolicy(tf_policy.Base):
def _variables(self):
return []
def _action(self, time_step, policy_state, seed):
step = time_step.observation.numpy()[0][-1]
if step < 15:
a_ = np.random.randint(0, 66)
else:
days_left = np.max([30 - step, 1])
can_sale = time_step.observation.numpy()[0][-3]
can_sale_daily = can_sale / days_left
a_ = -np.random.randint(0, can_sale_daily+1)
a_ += 500 # add shift to start with 0
# print('Observation', time_step.observation.numpy())
# print('Step', step, 'Action:', a_, 'Action value', a_-500)
action_ = ops.convert_to_tensor(np.array([a_], dtype=np.int32))
if time_step is not None:
with tf.control_dependencies(tf.nest.flatten(time_step)):
action_ = tf.nest.map_structure(tf.identity, action_)
return policy_step.PolicyStep(action_, policy_state)
def _distribution(self, time_step, policy_state):
raise NotImplementedError(
'DummyTradePolicy does not support distributions'
)
class FilteredRandomTFPolicy(RandomTFPolicy):
def _action(self, time_step, policy_state, seed):
outer_dims = nest_utils.get_outer_shape(
time_step, self._time_step_spec)
observation = time_step.observation.numpy()[0]
amount_now = observation[-3] # can sale
amount_available = observation[-2] # can buy
lower_bound = int(500 - amount_now)
upper_bound = int(amount_available + 1)
actions_available = np.arange(lower_bound, upper_bound)
a_ = np.random.choice(actions_available)
action_ = ops.convert_to_tensor(np.array([a_], dtype=np.int32))
if time_step is not None:
with tf.control_dependencies(tf.nest.flatten(time_step)):
action_ = tf.nest.map_structure(tf.identity, action_)
return policy_step.PolicyStep(action_, policy_state)
class FilteredQPolicy(QPolicy):
def _distribution(self, time_step, policy_state):
q_values, policy_state = self._q_network(
time_step.observation, time_step.step_type, policy_state,
)
q_values.shape.assert_has_rank(2)
if self._action_shape.ndims == 1:
q_values = tf.expand_dims(q_values, -2)
observation = time_step.observation.numpy()[0]
amount_now = observation[-3] # can sale
amount_available = observation[-2] # can buy
q_values_np = q_values.numpy()[0]
lower_bound = int(500 - amount_now)
upper_bound = int(amount_available + 1)
q_values_np[:lower_bound] = -np.inf
q_values_np[upper_bound:] = -np.inf
new_q_values = ops.convert_to_tensor(
np.array([q_values_np], dtype=np.float32))
distribution = tfp.distributions.Categorical(
logits=new_q_values, dtype=self._action_dtype
)
distribution = tf.nest.pack_sequence_as(
self._action_spec, [distribution]
)
return policy_step.PolicyStep(distribution, policy_state)
class FilteredRandomPyPolicy(RandomPyPolicy):
def _action(self, time_step, policy_state):
outer_dims = self._outer_dims
if outer_dims is None:
if self.time_step_spec.observation:
outer_dims = nest_utils.get_outer_array_shape(
time_step.observation,
self.time_step_spec.observation,
)
else:
outer_dims = ()
random_action = array_spec.sample_spec_nest(
self._action_spec,
self._rng,
outer_dims=outer_dims,
)
print('Action rnd', random_action)
return policy_step.PolicyStep(random_action, policy_state)
| [
"tf_agents.utils.nest_utils.get_outer_array_shape",
"tf_agents.policies.policy_step.PolicyStep",
"tensorflow.nest.flatten",
"numpy.random.choice",
"numpy.max",
"numpy.array",
"tensorflow.nest.pack_sequence_as",
"numpy.random.randint",
"tf_agents.specs.array_spec.sample_spec_nest",
"tensorflow.expa... | [((1440, 1485), 'tf_agents.policies.policy_step.PolicyStep', 'policy_step.PolicyStep', (['action_', 'policy_state'], {}), '(action_, policy_state)\n', (1462, 1485), False, 'from tf_agents.policies import policy_step\n'), ((1771, 1830), 'tf_agents.utils.nest_utils.get_outer_shape', 'nest_utils.get_outer_shape', (['time_step', 'self._time_step_spec'], {}), '(time_step, self._time_step_spec)\n', (1797, 1830), False, 'from tf_agents.utils import nest_utils\n'), ((2123, 2158), 'numpy.arange', 'np.arange', (['lower_bound', 'upper_bound'], {}), '(lower_bound, upper_bound)\n', (2132, 2158), True, 'import numpy as np\n'), ((2172, 2207), 'numpy.random.choice', 'np.random.choice', (['actions_available'], {}), '(actions_available)\n', (2188, 2207), True, 'import numpy as np\n'), ((2479, 2524), 'tf_agents.policies.policy_step.PolicyStep', 'policy_step.PolicyStep', (['action_', 'policy_state'], {}), '(action_, policy_state)\n', (2501, 2524), False, 'from tf_agents.policies import policy_step\n'), ((3388, 3464), 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'logits': 'new_q_values', 'dtype': 'self._action_dtype'}), '(logits=new_q_values, dtype=self._action_dtype)\n', (3417, 3464), True, 'import tensorflow_probability as tfp\n'), ((3510, 3569), 'tensorflow.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['self._action_spec', '[distribution]'], {}), '(self._action_spec, [distribution])\n', (3534, 3569), True, 'import tensorflow as tf\n'), ((3607, 3657), 'tf_agents.policies.policy_step.PolicyStep', 'policy_step.PolicyStep', (['distribution', 'policy_state'], {}), '(distribution, policy_state)\n', (3629, 3657), False, 'from tf_agents.policies import policy_step\n'), ((4126, 4211), 'tf_agents.specs.array_spec.sample_spec_nest', 'array_spec.sample_spec_nest', (['self._action_spec', 'self._rng'], {'outer_dims': 'outer_dims'}), '(self._action_spec, self._rng, outer_dims=outer_dims\n )\n', (4153, 4211), False, 'from tf_agents.specs import array_spec, tensor_spec\n'), ((4312, 4363), 'tf_agents.policies.policy_step.PolicyStep', 'policy_step.PolicyStep', (['random_action', 'policy_state'], {}), '(random_action, policy_state)\n', (4334, 4363), False, 'from tf_agents.policies import policy_step\n'), ((745, 769), 'numpy.random.randint', 'np.random.randint', (['(0)', '(66)'], {}), '(0, 66)\n', (762, 769), True, 'import numpy as np\n'), ((808, 830), 'numpy.max', 'np.max', (['[30 - step, 1]'], {}), '([30 - step, 1])\n', (814, 830), True, 'import numpy as np\n'), ((1218, 1248), 'numpy.array', 'np.array', (['[a_]'], {'dtype': 'np.int32'}), '([a_], dtype=np.int32)\n', (1226, 1248), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.array', 'np.array', (['[a_]'], {'dtype': 'np.int32'}), '([a_], dtype=np.int32)\n', (2256, 2278), True, 'import numpy as np\n'), ((2852, 2880), 'tensorflow.expand_dims', 'tf.expand_dims', (['q_values', '(-2)'], {}), '(q_values, -2)\n', (2866, 2880), True, 'import tensorflow as tf\n'), ((3321, 3362), 'numpy.array', 'np.array', (['[q_values_np]'], {'dtype': 'np.float32'}), '([q_values_np], dtype=np.float32)\n', (3329, 3362), True, 'import numpy as np\n'), ((959, 999), 'numpy.random.randint', 'np.random.randint', (['(0)', '(can_sale_daily + 1)'], {}), '(0, can_sale_daily + 1)\n', (976, 999), True, 'import numpy as np\n'), ((1380, 1423), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.identity', 'action_'], {}), '(tf.identity, action_)\n', (1401, 1423), True, 'import tensorflow as tf\n'), ((2419, 2462), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.identity', 'action_'], {}), '(tf.identity, action_)\n', (2440, 2462), True, 'import tensorflow as tf\n'), ((3904, 3997), 'tf_agents.utils.nest_utils.get_outer_array_shape', 'nest_utils.get_outer_array_shape', (['time_step.observation', 'self.time_step_spec.observation'], {}), '(time_step.observation, self.time_step_spec\n .observation)\n', (3936, 3997), False, 'from tf_agents.utils import nest_utils\n'), ((1325, 1351), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['time_step'], {}), '(time_step)\n', (1340, 1351), True, 'import tensorflow as tf\n'), ((2364, 2390), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['time_step'], {}), '(time_step)\n', (2379, 2390), True, 'import tensorflow as tf\n')] |
from numpy.random import seed
seed(8)
import os, logging
logging.disable(logging.WARNING)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow
tensorflow.random.set_seed(7)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.python.keras import models
from tensorflow.python.keras import layers
from tensorflow.keras.applications import Xception
import argparse
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.python.ops import math_ops
import numpy as np
import pandas as pd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--base_path", type=str, default='coronet_org_data/four_classes')
parser.add_argument("--image_dim", type=int, default=150)
parser.add_argument("--bs", type=int, default=10)
parser.add_argument("--epochs", type=int, default=80)
parser.add_argument("--lr", type=float, default=0.0001)
args = parser.parse_args()
base_log_name = "coronet_dummy_impl_default_train_test_val_" + str(args.image_dim) + "_" + str(args.epochs) + "_" + str(
args.bs) + "_" + str(args.lr)
log = open(base_log_name + ".txt", 'a', buffering=1)
BASE_PATH = args.base_path
DATASET_PATH = BASE_PATH + '/train2'
VALIDATION_PATH = BASE_PATH + '/val'
test_dir = BASE_PATH + '/test'
IMAGE_SIZE = (args.image_dim, args.image_dim)
BATCH_SIZE = args.bs
NUM_EPOCHS = args.epochs
LEARNING_RATE = args.lr
train_datagen = ImageDataGenerator(rescale=1. / 255,
rotation_range=50,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.25,
zoom_range=0.1,
zca_whitening=True,
channel_shift_range=20,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.2,
fill_mode='constant')
train_batches = train_datagen.flow_from_directory(DATASET_PATH,
target_size=IMAGE_SIZE,
shuffle=True,
batch_size=BATCH_SIZE,
seed=42,
class_mode="categorical")
valid_batches = train_datagen.flow_from_directory(VALIDATION_PATH,
target_size=IMAGE_SIZE,
shuffle=True,
batch_size=BATCH_SIZE,
seed=42,
class_mode="categorical")
conv_base = Xception(weights='imagenet',
include_top=False,
input_shape=(args.image_dim, args.image_dim, 3))
conv_base.trainable = True
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
train_batch_len = len(train_batches)
validation_batch_len = len(valid_batches)
print("Train batch len: " + str(train_batch_len))
print("Valid batch len: " + str(validation_batch_len))
accuracy = tf.keras.metrics.CategoricalAccuracy()
accuracy_val = tf.keras.metrics.CategoricalAccuracy()
loss_fn = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
target_names = os.listdir(test_dir)
print(target_names)
for epoch in range(NUM_EPOCHS):
total_train_loss = 0
for step, (x, y) in enumerate(train_batches):
with tf.GradientTape() as tape:
logits = model(x)
loss_value = loss_fn(y, logits)
accuracy.update_state(y, logits)
total_train_loss += loss_value.numpy()
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
weight_list = []
for w in model.trainable_weights:
weight_list.append(w.numpy())
i = 0
for w in model.trainable_weights:
w.assign(weight_list[i])
i += 1
if step >= (train_batch_len - 1):
break
print("Train Epoch ", epoch, " Loss ", total_train_loss / train_batch_len, " Accuracy ", accuracy.result().numpy())
log.write("Train Epoch " + str(epoch) + " Loss " + str(total_train_loss / train_batch_len) + " Accuracy " + str(
accuracy.result().numpy()))
log.write('\n')
accuracy.reset_states()
total_val_loss = 0
for step, (x, y) in enumerate(valid_batches):
with tf.GradientTape() as tape:
logits = model(x)
loss_value = loss_fn(y, logits)
accuracy_val.update_state(y, logits)
total_val_loss += loss_value.numpy()
if step >= (validation_batch_len - 1):
break
print("Val Epoch ", epoch, " Loss ", total_val_loss / validation_batch_len, " Accuracy ", accuracy_val.result().numpy())
log.write("Val Epoch " + str(epoch) + " Loss " + str(total_val_loss / validation_batch_len) + " Accuracy " + str(
accuracy_val.result().numpy()))
log.write('\n')
accuracy_val.reset_states()
# model.save('4-class-Covid19-Mod-Xception.h5')
test_datagen = ImageDataGenerator(rescale=1. / 255)
test_batches = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SIZE, batch_size=1,
shuffle=False, seed=42, class_mode="categorical")
test_batch_len = len(test_batches)
test_batches.reset()
accuracy_test = tf.keras.metrics.CategoricalAccuracy()
total_test_loss = 0
original_label = []
predicted_label = []
for step, (x, y) in enumerate(test_batches):
with tf.GradientTape() as tape:
logits = model(x)
loss_value = loss_fn(y, logits)
original_label.append(math_ops.argmax(y, axis=-1).numpy()[0])
predicted_label.append(math_ops.argmax(logits, axis=-1).numpy()[0])
accuracy_test.update_state(y, logits)
total_test_loss += loss_value.numpy()
if step > test_batch_len:
break
print("Test ", "Loss ", total_test_loss / test_batch_len, " Accuracy ", accuracy_test.result().numpy())
log.write("Val " + "Loss " + str(total_test_loss / test_batch_len) + " Accuracy " + str(accuracy_test.result().numpy()))
log.write('\n')
accuracy_test.reset_states()
conf_matrix = confusion_matrix(original_label, predicted_label)
class_report = classification_report(original_label, predicted_label, target_names=target_names, output_dict=True)
print('Confusion Matrix')
print(target_names)
print(conf_matrix)
print('Classification Report')
print(class_report)
log.write("Confusion Matrix")
log.write('\n')
for name in target_names:
log.write(name + " ")
log.write('\n')
log.write(np.array2string(conf_matrix, separator=', '))
log.write('\n')
pd.DataFrame(class_report).transpose().to_csv(base_log_name + ".csv")
log.close()
| [
"tensorflow.python.ops.math_ops.argmax",
"sklearn.metrics.classification_report",
"numpy.array2string",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.GradientTape",
"tensorflow.keras.losses.CategoricalCrossentropy",
"os.listdir",
"ar... | [((31, 38), 'numpy.random.seed', 'seed', (['(8)'], {}), '(8)\n', (35, 38), False, 'from numpy.random import seed\n'), ((58, 90), 'logging.disable', 'logging.disable', (['logging.WARNING'], {}), '(logging.WARNING)\n', (73, 90), False, 'import os, logging\n'), ((152, 181), 'tensorflow.random.set_seed', 'tensorflow.random.set_seed', (['(7)'], {}), '(7)\n', (178, 181), False, 'import tensorflow\n'), ((620, 645), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (643, 645), False, 'import argparse\n'), ((1523, 1869), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'rotation_range': '(50)', 'featurewise_center': '(True)', 'featurewise_std_normalization': '(True)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.25)', 'zoom_range': '(0.1)', 'zca_whitening': '(True)', 'channel_shift_range': '(20)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'validation_split': '(0.2)', 'fill_mode': '"""constant"""'}), "(rescale=1.0 / 255, rotation_range=50, featurewise_center\n =True, featurewise_std_normalization=True, width_shift_range=0.2,\n height_shift_range=0.2, shear_range=0.25, zoom_range=0.1, zca_whitening\n =True, channel_shift_range=20, horizontal_flip=True, vertical_flip=True,\n validation_split=0.2, fill_mode='constant')\n", (1541, 1869), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3248, 3348), 'tensorflow.keras.applications.Xception', 'Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(args.image_dim, args.image_dim, 3)'}), "(weights='imagenet', include_top=False, input_shape=(args.image_dim,\n args.image_dim, 3))\n", (3256, 3348), False, 'from tensorflow.keras.applications import Xception\n'), ((3438, 3457), 'tensorflow.python.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (3455, 3457), False, 'from tensorflow.python.keras import models\n'), ((3872, 3910), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (3908, 3910), True, 'import tensorflow as tf\n'), ((3930, 3968), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (3966, 3968), True, 'import tensorflow as tf\n'), ((3983, 4024), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (4022, 4024), True, 'import tensorflow as tf\n'), ((4041, 4094), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'LEARNING_RATE'}), '(learning_rate=LEARNING_RATE)\n', (4065, 4094), True, 'import tensorflow as tf\n'), ((4114, 4134), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (4124, 4134), False, 'import os, logging\n'), ((6125, 6162), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (6143, 6162), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((6448, 6486), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (6484, 6486), True, 'import tensorflow as tf\n'), ((7319, 7368), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['original_label', 'predicted_label'], {}), '(original_label, predicted_label)\n', (7335, 7368), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((7388, 7492), 'sklearn.metrics.classification_report', 'classification_report', (['original_label', 'predicted_label'], {'target_names': 'target_names', 'output_dict': '(True)'}), '(original_label, predicted_label, target_names=\n target_names, output_dict=True)\n', (7409, 7492), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((3497, 3513), 'tensorflow.python.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3511, 3513), False, 'from tensorflow.python.keras import layers\n'), ((3529, 3548), 'tensorflow.python.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (3543, 3548), False, 'from tensorflow.python.keras import layers\n'), ((3564, 3600), 'tensorflow.python.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (3576, 3600), False, 'from tensorflow.python.keras import layers\n'), ((3616, 3653), 'tensorflow.python.keras.layers.Dense', 'layers.Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (3628, 3653), False, 'from tensorflow.python.keras import layers\n'), ((7772, 7816), 'numpy.array2string', 'np.array2string', (['conf_matrix'], {'separator': '""", """'}), "(conf_matrix, separator=', ')\n", (7787, 7816), True, 'import numpy as np\n'), ((6622, 6639), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6637, 6639), True, 'import tensorflow as tf\n'), ((4296, 4313), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4311, 4313), True, 'import tensorflow as tf\n'), ((5415, 5432), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5430, 5432), True, 'import tensorflow as tf\n'), ((7842, 7868), 'pandas.DataFrame', 'pd.DataFrame', (['class_report'], {}), '(class_report)\n', (7854, 7868), True, 'import pandas as pd\n'), ((6753, 6780), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (6768, 6780), False, 'from tensorflow.python.ops import math_ops\n'), ((6824, 6856), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (6839, 6856), False, 'from tensorflow.python.ops import math_ops\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 1/10/21
# @Author : <NAME>
# @email : <EMAIL>
from math import pi as PI
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from slip_control.slip.slip_trajectory import SlipTrajectory
from slip_control.slip.slip_model import SlipModel, X, X_DOT, X_DDOT, Z, Z_DOT, Z_DDOT
from slip_control.controllers.target_to_states_generator import CycleStateGenerator, ForwardSpeedStateGenerator
from slip_control.controllers.diff_flat_slip_controller import SlipDiffFlatController
from slip_control.utils import plot_utils
cmap = plt.cm.get_cmap('gist_heat')
if __name__ == "__main__":
# Instantiate SLIP model
m = 80 # [kg]
r0 = 1.0 # [m]
n_legs = 1
k_rel = 10.7
slip = SlipModel(mass=m, leg_length=r0, k_rel=k_rel * n_legs)
g = SlipModel.g
# Error deviation weights during the stance trajectory
traj_weights = np.array([1., 1., 1., 1., 1., 1.])
traj_weights /= np.linalg.norm(traj_weights)
# Error deviation weights of target take-off states
take_off_state_error_weights = np.array([0.0, 1.0, 0., 1.0, 1.0, 0.])
take_off_state_error_weights /= np.linalg.norm(take_off_state_error_weights)
n_cycles = 5 # Generate a trajectory of 5 cycles
max_theta_dot = 4*PI # [rad/s] max angular leg velocity during flight
# Define a forward velocity
forward_speed = 4 * slip.r0 # [m/s]
# Define a desired gait duty cycle (time of stance / time of cycle) in [0.2, 1.0]
duty_cycle = 0.8
z_init = slip.r0
# Set an initial state (assumed to be a flight phase state) [x, x', x'', z, z', z'']
init_to_state = np.array([0.0, forward_speed, 0.0, z_init, 0.0, -g])
# Set a desired take off state defining the forward and vertical velocity desired
to_des_state = init_to_state
# Configure Differentially flat controller
slip_controller = SlipDiffFlatController(slip_model=slip,
traj_weights=traj_weights,
max_flight_theta_dot=max_theta_dot,
debug=False)
to_state_generator = ForwardSpeedStateGenerator(slip_model=slip, target_state_weights=take_off_state_error_weights,
desired_forward_speed=forward_speed,
desired_duty_cycle=duty_cycle)
slip_controller.target_to_state_generator = to_state_generator
# Generate SLIP trajectory tree without future cycle planning
tree = slip_controller.generate_slip_trajectory_tree(desired_gait_cycles=n_cycles,
initial_state=init_to_state,
max_samples_per_cycle=30,
angle_epsilon=np.deg2rad(.02),
look_ahead_cycles=0)
slip_traj_no_future = tree.get_optimal_trajectory()
plot_utils.plot_slip_trajectory(slip_traj_no_future, plot_passive=True, plot_td_angles=True,
title="Without future cycle planning",
color=(23/255., 0/255., 194/255.))
plt.show()
# Generate SLIP trajectory tree with future cycle planning
tree = slip_controller.generate_slip_trajectory_tree(desired_gait_cycles=n_cycles,
initial_state=init_to_state,
max_samples_per_cycle=30,
angle_epsilon=np.deg2rad(.02),
look_ahead_cycles=1)
slip_traj = tree.get_optimal_trajectory()
plot_utils.plot_slip_trajectory(slip_traj, plot_passive=True, plot_td_angles=True,
title="With future cycle planing",
color=(23 / 255., 154 / 255., 194 / 255.))
plt.show()
# Plot controlled trajectory tree
print("This takes a while... should optimize soon")
tree.plot()
plt.show()
# Compare first two cycles.
short_traj = SlipTrajectory(slip, slip_gait_cycles=slip_traj.gait_cycles[:2])
short_traj_no_future = SlipTrajectory(slip, slip_gait_cycles=slip_traj_no_future.gait_cycles[:2])
axs = plot_utils.plot_slip_trajectory(short_traj, plot_passive=True, plot_td_angles=True,
color=(23 / 255., 154 / 255., 194 / 255.))
axs = plot_utils.plot_slip_trajectory(short_traj_no_future, plot_passive=True, plot_td_angles=True, plt_axs=axs,
color=(23/255., 0/255., 194/255.))
plt.show()
# Plot limit cycles of controlled trajectory
phase_axs = plot_utils.plot_limit_cycles(slip_traj)
plt.show() | [
"slip_control.utils.plot_utils.plot_limit_cycles",
"slip_control.utils.plot_utils.plot_slip_trajectory",
"slip_control.slip.slip_trajectory.SlipTrajectory",
"slip_control.controllers.diff_flat_slip_controller.SlipDiffFlatController",
"numpy.array",
"numpy.deg2rad",
"slip_control.controllers.target_to_st... | [((619, 647), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""gist_heat"""'], {}), "('gist_heat')\n", (634, 647), True, 'import matplotlib.pyplot as plt\n'), ((788, 842), 'slip_control.slip.slip_model.SlipModel', 'SlipModel', ([], {'mass': 'm', 'leg_length': 'r0', 'k_rel': '(k_rel * n_legs)'}), '(mass=m, leg_length=r0, k_rel=k_rel * n_legs)\n', (797, 842), False, 'from slip_control.slip.slip_model import SlipModel, X, X_DOT, X_DDOT, Z, Z_DOT, Z_DDOT\n'), ((943, 983), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (951, 983), True, 'import numpy as np\n'), ((998, 1026), 'numpy.linalg.norm', 'np.linalg.norm', (['traj_weights'], {}), '(traj_weights)\n', (1012, 1026), True, 'import numpy as np\n'), ((1118, 1158), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 1.0, 0.0])\n', (1126, 1158), True, 'import numpy as np\n'), ((1193, 1237), 'numpy.linalg.norm', 'np.linalg.norm', (['take_off_state_error_weights'], {}), '(take_off_state_error_weights)\n', (1207, 1237), True, 'import numpy as np\n'), ((1679, 1731), 'numpy.array', 'np.array', (['[0.0, forward_speed, 0.0, z_init, 0.0, -g]'], {}), '([0.0, forward_speed, 0.0, z_init, 0.0, -g])\n', (1687, 1731), True, 'import numpy as np\n'), ((1921, 2040), 'slip_control.controllers.diff_flat_slip_controller.SlipDiffFlatController', 'SlipDiffFlatController', ([], {'slip_model': 'slip', 'traj_weights': 'traj_weights', 'max_flight_theta_dot': 'max_theta_dot', 'debug': '(False)'}), '(slip_model=slip, traj_weights=traj_weights,\n max_flight_theta_dot=max_theta_dot, debug=False)\n', (1943, 2040), False, 'from slip_control.controllers.diff_flat_slip_controller import SlipDiffFlatController\n'), ((2197, 2368), 'slip_control.controllers.target_to_states_generator.ForwardSpeedStateGenerator', 'ForwardSpeedStateGenerator', ([], {'slip_model': 'slip', 'target_state_weights': 'take_off_state_error_weights', 'desired_forward_speed': 'forward_speed', 'desired_duty_cycle': 'duty_cycle'}), '(slip_model=slip, target_state_weights=\n take_off_state_error_weights, desired_forward_speed=forward_speed,\n desired_duty_cycle=duty_cycle)\n', (2223, 2368), False, 'from slip_control.controllers.target_to_states_generator import CycleStateGenerator, ForwardSpeedStateGenerator\n'), ((3080, 3263), 'slip_control.utils.plot_utils.plot_slip_trajectory', 'plot_utils.plot_slip_trajectory', (['slip_traj_no_future'], {'plot_passive': '(True)', 'plot_td_angles': '(True)', 'title': '"""Without future cycle planning"""', 'color': '(23 / 255.0, 0 / 255.0, 194 / 255.0)'}), "(slip_traj_no_future, plot_passive=True,\n plot_td_angles=True, title='Without future cycle planning', color=(23 /\n 255.0, 0 / 255.0, 194 / 255.0))\n", (3111, 3263), False, 'from slip_control.utils import plot_utils\n'), ((3335, 3345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3343, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3882, 4054), 'slip_control.utils.plot_utils.plot_slip_trajectory', 'plot_utils.plot_slip_trajectory', (['slip_traj'], {'plot_passive': '(True)', 'plot_td_angles': '(True)', 'title': '"""With future cycle planing"""', 'color': '(23 / 255.0, 154 / 255.0, 194 / 255.0)'}), "(slip_traj, plot_passive=True,\n plot_td_angles=True, title='With future cycle planing', color=(23 / \n 255.0, 154 / 255.0, 194 / 255.0))\n", (3913, 4054), False, 'from slip_control.utils import plot_utils\n'), ((4131, 4141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4139, 4141), True, 'import matplotlib.pyplot as plt\n'), ((4257, 4267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4265, 4267), True, 'import matplotlib.pyplot as plt\n'), ((4318, 4382), 'slip_control.slip.slip_trajectory.SlipTrajectory', 'SlipTrajectory', (['slip'], {'slip_gait_cycles': 'slip_traj.gait_cycles[:2]'}), '(slip, slip_gait_cycles=slip_traj.gait_cycles[:2])\n', (4332, 4382), False, 'from slip_control.slip.slip_trajectory import SlipTrajectory\n'), ((4410, 4484), 'slip_control.slip.slip_trajectory.SlipTrajectory', 'SlipTrajectory', (['slip'], {'slip_gait_cycles': 'slip_traj_no_future.gait_cycles[:2]'}), '(slip, slip_gait_cycles=slip_traj_no_future.gait_cycles[:2])\n', (4424, 4484), False, 'from slip_control.slip.slip_trajectory import SlipTrajectory\n'), ((4495, 4628), 'slip_control.utils.plot_utils.plot_slip_trajectory', 'plot_utils.plot_slip_trajectory', (['short_traj'], {'plot_passive': '(True)', 'plot_td_angles': '(True)', 'color': '(23 / 255.0, 154 / 255.0, 194 / 255.0)'}), '(short_traj, plot_passive=True,\n plot_td_angles=True, color=(23 / 255.0, 154 / 255.0, 194 / 255.0))\n', (4526, 4628), False, 'from slip_control.utils import plot_utils\n'), ((4674, 4833), 'slip_control.utils.plot_utils.plot_slip_trajectory', 'plot_utils.plot_slip_trajectory', (['short_traj_no_future'], {'plot_passive': '(True)', 'plot_td_angles': '(True)', 'plt_axs': 'axs', 'color': '(23 / 255.0, 0 / 255.0, 194 / 255.0)'}), '(short_traj_no_future, plot_passive=True,\n plot_td_angles=True, plt_axs=axs, color=(23 / 255.0, 0 / 255.0, 194 / \n 255.0))\n', (4705, 4833), False, 'from slip_control.utils import plot_utils\n'), ((4862, 4872), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4870, 4872), True, 'import matplotlib.pyplot as plt\n'), ((4939, 4978), 'slip_control.utils.plot_utils.plot_limit_cycles', 'plot_utils.plot_limit_cycles', (['slip_traj'], {}), '(slip_traj)\n', (4967, 4978), False, 'from slip_control.utils import plot_utils\n'), ((4983, 4993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4991, 4993), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2941), 'numpy.deg2rad', 'np.deg2rad', (['(0.02)'], {}), '(0.02)\n', (2935, 2941), True, 'import numpy as np\n'), ((3737, 3753), 'numpy.deg2rad', 'np.deg2rad', (['(0.02)'], {}), '(0.02)\n', (3747, 3753), True, 'import numpy as np\n')] |
import datetime
from couchdbkit import ResourceNotFound
from django.utils.safestring import mark_safe
import logging
import numpy
import pytz
from corehq.apps.indicators.models import DynamicIndicatorDefinition, CombinedCouchViewIndicatorDefinition
from dimagi.utils.decorators.memoized import memoized
from mvp.models import MVP
from mvp.reports import MVPIndicatorReport
class HealthCoordinatorReport(MVPIndicatorReport):
"""
MVP Custom Report: MVIS Health Coordinator
"""
slug = "health_coordinator"
name = "MVIS Health Coordinator Report"
report_template_path = "mvp/reports/health_coordinator.html"
flush_layout = True
hide_filters = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter']
emailable = True
@property
def timezone(self):
return pytz.utc
@property
@memoized
def template_report(self):
if self.is_rendered_as_email:
self.report_template_path = "mvp/reports/health_coordinator_email.html"
return super(HealthCoordinatorReport, self).template_report
@property
def report_context(self):
report_matrix = []
month_headers = None
for category_group in self.indicator_slugs:
category_indicators = []
total_rowspan = 0
for slug in category_group['indicator_slugs']:
try:
indicator = DynamicIndicatorDefinition.get_current(MVP.NAMESPACE, self.domain, slug,
wrap_correctly=True)
if self.is_rendered_as_email:
retrospective = indicator.get_monthly_retrospective(user_ids=self.user_ids)
else:
retrospective = indicator.get_monthly_retrospective(return_only_dates=True)
if not month_headers:
month_headers = self.get_month_headers(retrospective)
if isinstance(indicator, CombinedCouchViewIndicatorDefinition):
table = self.get_indicator_table(retrospective)
indicator_rowspan = 3
else:
table = self.get_indicator_row(retrospective)
indicator_rowspan = 1
total_rowspan += indicator_rowspan + 1
category_indicators.append(dict(
title=indicator.description,
table=table,
load_url="%s?indicator=%s" % (self.get_url(self.domain, render_as='partial'), indicator.slug),
rowspan=indicator_rowspan
))
except (AttributeError, ResourceNotFound):
logging.info("Could not grab indicator %s in domain %s" % (slug, self.domain))
report_matrix.append(dict(
category_title=category_group['category_title'],
category_slug=category_group['category_slug'],
rowspan=total_rowspan,
indicators=category_indicators,
))
return dict(
months=month_headers,
report=report_matrix,
)
@property
def indicator_slugs(self):
return [
{
'category_title': "Vital Events",
'category_slug': 'vital_events',
'indicator_slugs': [
"num_births_occured",
"num_births_recorded",
"maternal_deaths",
"neonatal_deaths",
"infant_deaths",
"under5_deaths",
"over5_deaths",
]
},
{
'category_title': "Visits",
'category_slug': 'chw_visits',
'indicator_slugs': [
"households_routine_visit_past90days", # A1 - 23, all set
"households_routine_visit_past30days", # A1 - 44, all set
"pregnant_routine_visit_past30days", # A1 - 46
"pregnant_routine_checkup_proportion_6weeks",
"neonate_routine_visit_past7days", # A1 - 47
"newborn_7day_visit_proportion", # A2 - 6, denom slightly off
"under1_check_ups_proportion",
"under5_routine_visit_past30days", # A1 - 45
"urgent_referrals_proportion", # A2 - 13, updated to spec
]
},
{
'category_title': "Maternal Health",
'category_slug': 'maternal_health',
'indicator_slugs': [
"no_anc_proportion", # A3 - 2
"anc4_proportion", # A2 - 3
"facility_births_proportion", # A2 - 4
"low_birth_weight_proportion",
"family_planning_proportion", # A2 - 1
]
},
{
'category_title': "Child Health",
'category_slug': 'child_health',
'indicator_slugs': [
"muac_routine_proportion",
"muac_wasting_proportion",
"moderate_muac_wasting_proportion",
"severe_muac_wasting_proportion",
"under5_diarrhea_ors_proportion", # A2 - 37
"under5_diarrhea_zinc_proportion", # B - 38
"under5_complicated_fever_referred_proportion",
"under5_complicated_fever_facility_followup_proportion",
"under1_immunized_proportion", # A2 - 8
"under6month_exclusive_breastfeeding_proportion",
]
},
{
'category_title': "Malaria",
'category_slug': 'malaria',
'indicator_slugs': [
"under5_fever_rdt_proportion",
"under5_fever_rdt_positive_proportion",
"under5_fever_rdt_not_received_proportion",
"under5_fever_rdt_positive_medicated_proportion",
"under5_fever_rdt_negative_medicated_proportion",
"over5_positive_rdt_medicated_proportion",
]
},
{
'category_title': "Household health",
'category_slug': 'household_health',
'indicator_slugs': [
"functioning_bednet_proportion",
"handwashing_near_latrine_proportion",
]
}
]
def get_month_headers(self, retrospective):
headers = list()
month_fmt = "%b %Y"
num_months = len(retrospective)
for i, result in enumerate(retrospective):
month = result.get('date')
month_text = month.strftime(month_fmt) if isinstance(month, datetime.datetime) else "Unknown"
month_desc = "(-%d)" % (num_months-(i+1)) if (num_months-i) > 1 else "(Current)"
headers.append(mark_safe("%s<br />%s" % (month_text, month_desc)))
return headers
def get_indicator_table(self, retrospective):
n_row = [i.get('numerator', 0) for i in retrospective]
d_row = [i.get('denominator', 0) for i in retrospective]
r_row = [i.get('ratio') for i in retrospective]
n_stats = []
d_stats = []
r_stats = []
for i in range(len(retrospective)):
if r_row[i] is not None:
n_stats.append(n_row[i])
d_stats.append(d_row[i])
r_stats.append(r_row[i])
n_row.extend(self._get_statistics(n_stats))
d_row.extend(self._get_statistics(d_stats))
r_row.extend(self._get_statistics(r_stats))
return dict(
numerators=self._format_row(n_row),
denominators=self._format_row(d_row),
percentages=self._format_row(r_row, True)
)
def _format_row(self, row, as_percent=False):
formatted = list()
num_cols = len(row)
for i, val in enumerate(row):
if val is not None and not numpy.isnan(val):
text = "%.f%%" % (val*100) if as_percent else "%d" % int(val)
else:
text = "--"
if i == num_cols-4:
css = "current_month"
elif i > num_cols-4:
css = "summary"
else:
css = ""
formatted.append(dict(
raw_value=val,
text=text,
css=css
))
return formatted
def _get_statistics(self, nonzero_row):
if nonzero_row:
return [numpy.average(nonzero_row), numpy.median(nonzero_row), numpy.std(nonzero_row)]
return [None]*3
def get_indicator_row(self, retrospective):
row = [i.get('value', 0) for i in retrospective]
nonzero_row = [r for r in row if r]
row.extend(self._get_statistics(nonzero_row))
return dict(
numerators=self._format_row(row)
)
def get_response_for_indicator(self, indicator):
try:
retrospective = indicator.get_monthly_retrospective(user_ids=self.user_ids)
if isinstance(indicator, CombinedCouchViewIndicatorDefinition):
table = self.get_indicator_table(retrospective)
else:
table = self.get_indicator_row(retrospective)
return {
'table': table,
}
except AttributeError:
pass
return None
| [
"numpy.median",
"numpy.average",
"corehq.apps.indicators.models.DynamicIndicatorDefinition.get_current",
"numpy.isnan",
"django.utils.safestring.mark_safe",
"numpy.std",
"logging.info"
] | [((7196, 7246), 'django.utils.safestring.mark_safe', 'mark_safe', (["('%s<br />%s' % (month_text, month_desc))"], {}), "('%s<br />%s' % (month_text, month_desc))\n", (7205, 7246), False, 'from django.utils.safestring import mark_safe\n'), ((8866, 8892), 'numpy.average', 'numpy.average', (['nonzero_row'], {}), '(nonzero_row)\n', (8879, 8892), False, 'import numpy\n'), ((8894, 8919), 'numpy.median', 'numpy.median', (['nonzero_row'], {}), '(nonzero_row)\n', (8906, 8919), False, 'import numpy\n'), ((8921, 8943), 'numpy.std', 'numpy.std', (['nonzero_row'], {}), '(nonzero_row)\n', (8930, 8943), False, 'import numpy\n'), ((1477, 1574), 'corehq.apps.indicators.models.DynamicIndicatorDefinition.get_current', 'DynamicIndicatorDefinition.get_current', (['MVP.NAMESPACE', 'self.domain', 'slug'], {'wrap_correctly': '(True)'}), '(MVP.NAMESPACE, self.domain, slug,\n wrap_correctly=True)\n', (1515, 1574), False, 'from corehq.apps.indicators.models import DynamicIndicatorDefinition, CombinedCouchViewIndicatorDefinition\n'), ((8298, 8314), 'numpy.isnan', 'numpy.isnan', (['val'], {}), '(val)\n', (8309, 8314), False, 'import numpy\n'), ((2857, 2935), 'logging.info', 'logging.info', (["('Could not grab indicator %s in domain %s' % (slug, self.domain))"], {}), "('Could not grab indicator %s in domain %s' % (slug, self.domain))\n", (2869, 2935), False, 'import logging\n')] |
import logging
from typing import Dict, List, Sequence, Union, Tuple
# TODO: align with main pypesto multiprocessing format
from multiprocess import Pool, Manager, Queue, Pipe
from tqdm import tqdm
import numpy as np
import copy
import queue
import time
from ..problem import Problem
from .sampler import Sampler, InternalSampler, InternalSample
from .result import McmcPtResult
logger = logging.getLogger(__name__)
# _q: Union[None, Queue] = None
# _r: Union[None, Queue] = None
# _idx: Union[None, int] = None
# _sampler: Union[None, InternalSampler] = None
# def worker_init(work_queue: Queue, return_queue: Queue,
# idx: int, sampler_obj: InternalSampler) -> bool:
# global _q, _r, _idx, _sampler
# _q = work_queue
# _r = return_queue
# _idx = idx
# _sampler = sampler_obj
# return True
# def worker_run() -> Tuple[int, InternalSampler]:
# global _q, _r, _idx, _sampler
# while True:
# try:
# logger.debug(f'sampler {_idx}: WAITING')
# idx, new_last_sample, beta, stop = _q.get(timeout=5)
# if _idx == idx:
# logger.debug(f'sampler {_idx}: new_last_sample={new_last_sample}, beta={beta}, stop={stop}')
# else:
# logger.debug(f'sampler {_idx}: encountered incorrect instruction')
# raise ProcessLookupError('received wrong instructions.')
# if stop is True:
# # logger.debug(f'sampler {_idx}: STOPPING trace_x: {len(_sampler.trace_x)}')
# _q.task_done()
# # logger.debug(f'sampler {_idx}: RETURNING')
# return _idx, _sampler
# if new_last_sample is not None:
# _sampler.set_last_sample(copy.deepcopy(new_last_sample))
# # logger.debug(f'sampler {_idx}: SAMPLING')
# _sampler.sample(n_samples=1, beta=beta)
# # logger.debug(f'sampler {idx} trace_x: {len(_sampler.trace_x)}')
# logger.debug(f'sampler {_idx}: RETURNING')
# _r.put((idx, copy.deepcopy(_sampler.get_last_sample()), beta))
# # logger.debug(f'sampler {_idx}: MARKING COMPLETE')
# _q.task_done()
# except (EOFError, queue.Empty):
# time.sleep(1)
# continue
def worker_run_combined(
work_queue: Queue, return_queue: Queue, idx: int, sampler_obj: InternalSampler
) -> bool:
_q = work_queue
_r = return_queue
_idx = idx
_sampler = sampler_obj
while True:
try:
# logger.debug(f'sampler {_idx}: WAITING')
idx, new_last_sample, beta, stop = _q.get()
# if _idx == idx:
# logger.debug(f'sampler {_idx}: new_last_sample={new_last_sample}, beta={beta}, stop={stop}')
if _idx != idx:
# logger.debug(f'sampler {_idx}: encountered incorrect instruction')
raise ProcessLookupError('received wrong instructions.')
if stop is True:
# logger.debug(f'sampler {_idx}: STOPPING trace_x: {len(_sampler.trace_x)}')
_q.task_done()
# logger.debug(f'sampler {_idx}: RETURNING')
return _idx, _sampler.get_samples()
if new_last_sample is not None:
_sampler.set_last_sample(copy.deepcopy(new_last_sample))
logger.debug(f'sampler {_idx}: SAMPLING')
_sampler.sample(n_samples=1, beta=beta)
# logger.debug(f'sampler {idx} trace_x: {len(_sampler.trace_x)}')
# logger.debug(f'sampler {_idx}: RETURNING')
_r.put((idx, copy.deepcopy(_sampler.get_last_sample()), beta))
logger.debug(f'sampler {_idx}: MARKING COMPLETE')
_q.task_done()
except (EOFError, queue.Empty):
time.sleep(1)
continue
class ParallelTemperingSampler(Sampler):
"""Simple parallel tempering sampler."""
# TODO: use this as base class, roll parallelized into another class.
def __init__(
self,
internal_sampler: InternalSampler,
betas: Sequence[float] = None,
n_chains: int = None,
options: Dict = None):
super().__init__(options)
# set betas
if (betas is None) == (n_chains is None):
raise ValueError("Set either betas or n_chains.")
if betas is None:
betas = near_exponential_decay_betas(
n_chains=n_chains, exponent=self.options['exponent'],
max_temp=self.options['max_temp'])
if betas[0] != 1.:
raise ValueError("The first chain must have beta=1.0")
self.betas0 = np.array(betas)
self.betas = None
self.temper_lpost = self.options['temper_log_posterior']
self.samplers = [copy.deepcopy(internal_sampler)
for _ in range(len(self.betas0))]
# configure internal samplers
for sampler in self.samplers:
sampler.make_internal(temper_lpost=self.temper_lpost)
@classmethod
def default_options(cls) -> Dict:
return {
'max_temp': 5e4,
'exponent': 4,
'temper_log_posterior': False,
}
def initialize(self,
problem: Problem,
x0: Union[np.ndarray, List[np.ndarray]]):
# initialize all samplers
n_chains = len(self.samplers)
if isinstance(x0, list):
x0s = x0
else:
x0s = [x0 for _ in range(n_chains)]
for sampler, x0 in zip(self.samplers, x0s):
_problem = copy.deepcopy(problem)
sampler.initialize(_problem, x0)
self.betas = self.betas0
def sample(
self, n_samples: int, beta: float = 1.):
# loop over iterations
for i_sample in tqdm(range(int(n_samples))): # TODO test
# sample
for sampler, beta in zip(self.samplers, self.betas):
sampler.sample(n_samples=1, beta=beta)
# swap samples
swapped = self.swap_samples()
# adjust temperatures
self.adjust_betas(i_sample, swapped)
def get_samples(self) -> McmcPtResult:
"""Concatenate all chains."""
results = [sampler.get_samples() for sampler in self.samplers]
trace_x = np.array([result.trace_x[0] for result in results])
trace_neglogpost = np.array([result.trace_neglogpost[0]
for result in results])
trace_neglogprior = np.array([result.trace_neglogprior[0]
for result in results])
return McmcPtResult(
trace_x=trace_x,
trace_neglogpost=trace_neglogpost,
trace_neglogprior=trace_neglogprior,
betas=self.betas
)
def swap_samples(self) -> Sequence[bool]:
"""Swap samples as in Vousden2016."""
# for recording swaps
swapped = []
if len(self.betas) == 1:
# nothing to be done
return swapped
# beta differences
dbetas = self.betas[:-1] - self.betas[1:]
# loop over chains from highest temperature down
for dbeta, sampler1, sampler2 in reversed(
list(zip(dbetas, self.samplers[:-1], self.samplers[1:]))):
# extract samples
sample1 = sampler1.get_last_sample()
sample2 = sampler2.get_last_sample()
# extract log likelihood values
sample1_llh = sample1.lpost - sample1.lprior
sample2_llh = sample2.lpost - sample2.lprior
# swapping probability
p_acc_swap = dbeta * (sample2_llh - sample1_llh)
# flip a coin
u = np.random.uniform(0, 1)
# check acceptance
swap = np.log(u) < p_acc_swap
if swap:
# swap
sampler2.set_last_sample(sample1)
sampler1.set_last_sample(sample2)
# record
swapped.insert(0, swap)
return swapped
def adjust_betas(self, i_sample: int, swapped: Sequence[bool]):
"""Adjust temperature values. Default: Do nothing."""
class PoolParallelTemperingSampler(ParallelTemperingSampler):
def __init__(self,
internal_sampler: InternalSampler,
betas: Sequence[float] = None,
n_chains: int = None,
options: Dict = None,
parallel_pool: Pool = None
):
super().__init__(internal_sampler, betas, n_chains, options)
self.num_chains = n_chains
# set betas
if (betas is None) == (n_chains is None):
raise ValueError("Set either betas or n_chains.")
if betas is None:
betas = near_exponential_decay_betas(
n_chains=n_chains, exponent=self.options['exponent'],
max_temp=self.options['max_temp'])
if betas[0] != 1.:
raise ValueError("The first chain must have beta=1.0")
self.betas0 = np.array(betas)
self.betas = None
self.temper_lpost = self.options['temper_log_posterior']
self.parallel_pool = parallel_pool if parallel_pool else Pool(processes=n_chains)
self.samplers = [copy.deepcopy(internal_sampler)
for _ in range(n_chains)]
# configure internal samplers
for sampler in self.samplers:
sampler.make_internal(temper_lpost=self.temper_lpost)
def initialize(self,
problem: Problem,
x0: Union[np.ndarray, List[np.ndarray]]):
# initialize all samplers
n_chains = len(self.samplers)
if isinstance(x0, list):
x0s = x0
else:
x0s = [x0 for _ in range(n_chains)]
for sampler, x0 in zip(self.samplers, x0s):
_problem = copy.deepcopy(problem)
sampler.initialize(_problem, x0)
self.betas = self.betas0
def sample(self, n_samples: int, beta: float = 1.):
with Manager() as mgr:
queues_work = [mgr.Queue(maxsize=2) for _ in range(self.num_chains)]
queues_return = [mgr.Queue(maxsize=2) for _ in range(self.num_chains)]
worker_results = self.parallel_pool.starmap_async(
func=worker_run_combined, # func=worker_init
iterable=[(queues_work[idx], queues_return[idx], idx, self.samplers[idx])
for idx in range(self.num_chains)])
time.sleep(3.0)
# worker_results = [self.parallel_pool.apply_async(func=worker_run) for _ in range(self.num_chains)]
# time.sleep(3.0)
swapped = [None for _ in self.samplers]
last_samples = [None for _ in self.samplers]
for i_sample in range(int(n_samples)): # tqdm(range(int(n_samples))):
print(f"!! Iteration {i_sample:5} / {int(n_samples):5} !! start time: {time.time()}")
logger.debug('MAIN PROCESS: deploying work...')
for idx, beta in enumerate(self.betas):
queues_work[idx].put((idx, copy.deepcopy(swapped[idx]), beta, False)) # sample
logger.debug('MAIN PROCESS: waiting for return...')
for idx in range(len(self.samplers)):
idx, last_sample, beta = queues_return[idx].get() # get sample
last_samples[idx] = last_sample
logger.debug('MAIN PROCESS: swapping samples...')
swapped = self.swap_samples(last_samples) # swap samples
# logger.debug('MAIN PROCESS: swapping samples...')
self.adjust_betas(i_sample, swapped, last_samples) # adjust temps
# logger.debug(f"swapped: {swapped}")
# logger.debug(f"last_sample: {last_samples}")
# # logger.debug('stopping workers...')
logger.debug('MAIN PROCESS: stopping workers...')
_ = [queues_work[idx].put((idx, None, 0.00, True)) for idx in range(self.num_chains)]
logger.debug('MAIN PROCESS: waiting for workers to stop...')
_ = [queues_work[idx].join() for idx in range(self.num_chains)]
# # logger.debug('reached getting from finalqueue')
# for worker_result in worker_results:
idxs_and_sampler_objs = {idx: sampler for idx, sampler in worker_results.get()}
# print(f"idxs_and_sampler_objs: {[key for key in idxs_and_sampler_objs.keys()]}")
# logger.debug(f'GATHERED sampler {idx} trace_x: {len(sampler_obj.trace_x)}')
for idx, sampler_result in idxs_and_sampler_objs.items():
self.samplers[idx] = sampler_result
# print(f"self.samplers: {[type(x) for x in self.samplers]}")
##### NOT SURE IF THIS IS NEEDED
# for qu in queues_work:
# qu.close()
# for qu in queues_return:
# qu.close()
##### END UNSURE BLOCK
self.parallel_pool.close()
self.parallel_pool.join()
# # logger.debug('joined all workers')
def get_samples(self) -> McmcPtResult:
"""Concatenate all chains."""
# results = [sampler.get_samples() for sampler in self.samplers]
results = self.samplers
# for idx, result in enumerate(results):
# print(f"{idx}: {result.trace_x.shape}")
trace_x = np.array([result.trace_x[0] for result in results])
trace_neglogpost = np.array([result.trace_neglogpost[0]
for result in results])
trace_neglogprior = np.array([result.trace_neglogprior[0]
for result in results])
return McmcPtResult(
trace_x=trace_x,
trace_neglogpost=trace_neglogpost,
trace_neglogprior=trace_neglogprior,
betas=self.betas
)
def swap_samples(self, last_samples: List[Union[InternalSample, None]]) -> List[Union[InternalSample, None]]:
"""Swap samples as in Vousden2016."""
# for recording swaps
swapped = copy.deepcopy(last_samples)
if len(self.betas) == 1:
# nothing to be done
return swapped
# beta differences
dbetas = self.betas[:-1] - self.betas[1:]
# loop over chains from highest temperature down
for dbeta, sampler1_idx, sampler2_idx in reversed(list(zip(
dbetas, list(range(len(self.samplers[:-1]))), list(range(len(self.samplers[1:])))))):
# extract samples
sample1 = last_samples[sampler1_idx]
sample2 = last_samples[sampler2_idx]
# extract log likelihood values
sample1_llh = sample1.lpost - sample1.lprior
sample2_llh = sample2.lpost - sample2.lprior
# swapping probability
p_acc_swap = dbeta * (sample2_llh - sample1_llh)
# flip a coin
u = np.random.uniform(0, 1)
# check acceptance
swap = np.log(u) < p_acc_swap
if swap:
# swap
# sampler2.set_last_sample(sample1)
# sampler1.set_last_sample(sample2)
swapped[sampler2_idx] = sample1
swapped[sampler1_idx] = sample2
else:
swapped[sampler2_idx] = sample2
swapped[sampler1_idx] = sample1
# record
# swapped.insert(0, swap)
return swapped
def adjust_betas(self, i_sample: int,
swapped: Sequence[Union[None, InternalSample]],
last_samples: Sequence[Union[None, InternalSample]]):
"""Adjust temperature values. Default: Do nothing."""
def near_exponential_decay_betas(
n_chains: int, exponent: float, max_temp: float) -> np.ndarray:
"""Initialize betas in a near-exponential decay scheme.
Parameters
----------
n_chains:
Number of chains to use.
exponent:
Decay exponent. The higher, the more small temperatures are used.
max_temp:
Maximum chain temperature.
"""
# special case of one chain
if n_chains == 1:
return np.array([1.])
temperatures = np.linspace(1, max_temp ** (1 / exponent), n_chains) \
** exponent
betas = 1 / temperatures
return betas
| [
"logging.getLogger",
"copy.deepcopy",
"numpy.log",
"time.sleep",
"numpy.array",
"numpy.linspace",
"multiprocess.Pool",
"numpy.random.uniform",
"multiprocess.Manager",
"time.time"
] | [((393, 420), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (410, 420), False, 'import logging\n'), ((4696, 4711), 'numpy.array', 'np.array', (['betas'], {}), '(betas)\n', (4704, 4711), True, 'import numpy as np\n'), ((6366, 6417), 'numpy.array', 'np.array', (['[result.trace_x[0] for result in results]'], {}), '([result.trace_x[0] for result in results])\n', (6374, 6417), True, 'import numpy as np\n'), ((6445, 6505), 'numpy.array', 'np.array', (['[result.trace_neglogpost[0] for result in results]'], {}), '([result.trace_neglogpost[0] for result in results])\n', (6453, 6505), True, 'import numpy as np\n'), ((6571, 6632), 'numpy.array', 'np.array', (['[result.trace_neglogprior[0] for result in results]'], {}), '([result.trace_neglogprior[0] for result in results])\n', (6579, 6632), True, 'import numpy as np\n'), ((9123, 9138), 'numpy.array', 'np.array', (['betas'], {}), '(betas)\n', (9131, 9138), True, 'import numpy as np\n'), ((13543, 13594), 'numpy.array', 'np.array', (['[result.trace_x[0] for result in results]'], {}), '([result.trace_x[0] for result in results])\n', (13551, 13594), True, 'import numpy as np\n'), ((13622, 13682), 'numpy.array', 'np.array', (['[result.trace_neglogpost[0] for result in results]'], {}), '([result.trace_neglogpost[0] for result in results])\n', (13630, 13682), True, 'import numpy as np\n'), ((13748, 13809), 'numpy.array', 'np.array', (['[result.trace_neglogprior[0] for result in results]'], {}), '([result.trace_neglogprior[0] for result in results])\n', (13756, 13809), True, 'import numpy as np\n'), ((14250, 14277), 'copy.deepcopy', 'copy.deepcopy', (['last_samples'], {}), '(last_samples)\n', (14263, 14277), False, 'import copy\n'), ((16352, 16367), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (16360, 16367), True, 'import numpy as np\n'), ((16387, 16439), 'numpy.linspace', 'np.linspace', (['(1)', '(max_temp ** (1 / exponent))', 'n_chains'], {}), '(1, max_temp ** (1 / exponent), n_chains)\n', (16398, 16439), True, 'import numpy as np\n'), ((4830, 4861), 'copy.deepcopy', 'copy.deepcopy', (['internal_sampler'], {}), '(internal_sampler)\n', (4843, 4861), False, 'import copy\n'), ((5632, 5654), 'copy.deepcopy', 'copy.deepcopy', (['problem'], {}), '(problem)\n', (5645, 5654), False, 'import copy\n'), ((7791, 7814), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (7808, 7814), True, 'import numpy as np\n'), ((9296, 9320), 'multiprocess.Pool', 'Pool', ([], {'processes': 'n_chains'}), '(processes=n_chains)\n', (9300, 9320), False, 'from multiprocess import Pool, Manager, Queue, Pipe\n'), ((9347, 9378), 'copy.deepcopy', 'copy.deepcopy', (['internal_sampler'], {}), '(internal_sampler)\n', (9360, 9378), False, 'import copy\n'), ((9960, 9982), 'copy.deepcopy', 'copy.deepcopy', (['problem'], {}), '(problem)\n', (9973, 9982), False, 'import copy\n'), ((10131, 10140), 'multiprocess.Manager', 'Manager', ([], {}), '()\n', (10138, 10140), False, 'from multiprocess import Pool, Manager, Queue, Pipe\n'), ((10604, 10619), 'time.sleep', 'time.sleep', (['(3.0)'], {}), '(3.0)\n', (10614, 10619), False, 'import time\n'), ((15105, 15128), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (15122, 15128), True, 'import numpy as np\n'), ((3822, 3835), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3832, 3835), False, 'import time\n'), ((7866, 7875), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (7872, 7875), True, 'import numpy as np\n'), ((15180, 15189), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (15186, 15189), True, 'import numpy as np\n'), ((3333, 3363), 'copy.deepcopy', 'copy.deepcopy', (['new_last_sample'], {}), '(new_last_sample)\n', (3346, 3363), False, 'import copy\n'), ((11042, 11053), 'time.time', 'time.time', ([], {}), '()\n', (11051, 11053), False, 'import time\n'), ((11224, 11251), 'copy.deepcopy', 'copy.deepcopy', (['swapped[idx]'], {}), '(swapped[idx])\n', (11237, 11251), False, 'import copy\n')] |
###############################################################################
#MIT License
#
#Copyright (c) 2019 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
###############################################################################
import os
import cv2
import numpy as np
from PIL import Image
import time
import random
#for fancy parameterization
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description='Compute resulting image using ANDA techinique for the MSRA10K dataset')
parser.add_argument(
'-obj_path', '--obj_path',
type=str, default="/home/bakrinski/datasets/MSRA10K/images/",
help='OBJ_FOLDER_IMG input images path'
)
parser.add_argument(
'-obj_mask_path', '--obj_mask_path',
type=str, default="/home/bakrinski/datasets/MSRA10K/masks/",
help='OBJ_FOLDER_MASK input masks path'
)
parser.add_argument(
'-bg_path', '--bg_path',
type=str, default="/home/dvruiz/PConv-Keras/output/",
help='BG_FOLDER_IMG background images path'
)
parser.add_argument(
'-index_obj_path', '--index_obj_path',
type=str, default="dataset.txt",
help='LIST_OF_N_OBJECTS filepath for the file containing per line a indice, e.g. "dataset.txt" resulting from genObjIndicees.py'
)
parser.add_argument(
'-index_bg_path', '--index_bg_path',
type=str, default="indices_cosine.txt",
help='LIST_OF_INDICES filepath for the file containing per line a indice, e.g. "indices_cosine.txt" resulting from computeKnn.py'
)
parser.add_argument(
'-out_path', '--out_path',
type=str, default="output/",
help='output path containing a folder named images and masks, e.g."output/" '
)
parser.add_argument(
'-seed', '--seed',
type=int, default=22,
help='seed number for the pseudo-random computation'
)
parser.add_argument(
'-size', '--size',
type=int, default=10000,
help='number of images in the dataset'
)
parser.add_argument(
'-n_bgs', '--n_bgs',
type=int, default=1,
help='N_OF_BACKGROUNDS'
)
parser.add_argument(
'-n_ops', '--n_ops',
type=int, default=1,
help='N_OF_OPS'
)
return parser.parse_args()
# SETTINGS
#CALL PARSER
args = parse_args()
#
OBJ_FOLDER_IMG = args.obj_path
OBJ_FOLDER_MASK = args.obj_mask_path
BG_FOLDER_IMG = args.bg_path
OUTPUT_FOLDER_IMG = "images/"
OUTPUT_FOLDER_MASK = "masks/"
LIST_OF_N_OBJECTS = args.index_obj_path
N_OBJECT = args.size
N_OF_BACKGROUNDS = args.n_bgs
N_OF_OPS = args.n_ops
LIST_OF_INDICES = args.index_bg_path
kernelErode = np.ones((3, 3), np.uint8)
maxH = 512
maxW = 512
random.seed(args.seed)
np.random.seed(args.seed)
noise_scale = np.random.uniform(low=0.975, high=1.025, size=N_OBJECT)
#
# # SETTINGS
# OBJ_FOLDER_IMG = "/home/bakrinski/datasets/MSRA10K/images/"
# OBJ_FOLDER_MASK = "/home/bakrinski/datasets/MSRA10K/masks/"
# BG_FOLDER_IMG = "/home/dvruiz/PConv-Keras/output/"
# OUTPUT_FOLDER_IMG = "images/"
# OUTPUT_FOLDER_MASK = "masks/"
# LIST_OF_N_OBJECTS = "dataset.txt"
# N_WORST = 10000
# N_OF_BACKGROUNDS = 1
# N_OF_OPS = 1
# LIST_OF_INDICES = "indices_cosine.txt"
#
# kernelErode = np.ones((3, 3), np.uint8)
#
# maxH = 512
# maxW = 512
#
# random.seed(22)
# np.random.seed(22)
# # noise_scale = np.random.uniform(low=0.975, high=1.025, size=13980)
# noise_scale = np.random.uniform(low=0.975, high=1.025, size=N_WORST)
# #
def randomTranslateInside(newYmax, newYmin, newXmax, newXmin, newOrigin, border, M):
noise_x = np.random.uniform(low=0.0, high=1.0)
noise_y = np.random.uniform(low=0.0, high=1.0)
# check if bbox can move in y
if((newYmax - newYmin) < border[0]):
# check the direction of free space
if((newYmax) < newOrigin[0] + border[0]):
if((newYmin) > newOrigin[0]):
freeSpacePos = (newOrigin[0] + border[0]) - newYmax
freeSpaceNeg = newYmin - newOrigin[0]
luck = np.random.randint(low=0, high=2)
if(luck == 0):
M[1][2] += np.floor(noise_y * freeSpacePos)
else:
M[1][2] -= np.floor(noise_y * freeSpaceNeg)
else:
freeSpace = (newOrigin[0] + border[0]) - newYmax
M[1][2] += np.floor(noise_y * freeSpace)
else:
if((newYmin) > newOrigin[0]):
freeSpace = newYmin - newOrigin[0]
M[1][2] -= np.floor(noise_y * freeSpace)
if((newXmax - newXmin) < border[1]):
# check the direction of free space
if((newXmax) < newOrigin[1] + border[1]):
if((newXmin) > newOrigin[1]):
freeSpacePos = (newOrigin[1] + border[1]) - newXmax
freeSpaceNeg = newXmin - newOrigin[1]
luck = np.random.randint(low=0, high=2)
if(luck == 0):
M[0][2] += np.floor(noise_x * freeSpacePos)
else:
M[0][2] -= np.floor(noise_x * freeSpaceNeg)
else:
freeSpace = (newOrigin[1] + border[1]) - newXmax
M[0][2] += np.floor(noise_x * freeSpace)
else:
if((newXmin) > newOrigin[1]):
freeSpace = newXmin - newOrigin[1]
M[0][2] -= np.floor(noise_x * freeSpace)
return M
def geometricOp2(resizedImg, resizedMask, bgOriginalshape, op, globalIndex):
#######################################################
diffH = int((resizedImg.shape[0] - bgOriginalshape[0]) / 2)
diffW = int((resizedImg.shape[1] - bgOriginalshape[1]) / 2)
####
ymin, ymax, xmin, xmax = bbox(resizedMask)
# xmin -= np.abs(noise_translate_x[globalIndex])
# xmax += np.abs(noise_translate_x[globalIndex])
# ymin -= np.abs(noise_translate_y[globalIndex])
# ymax += np.abs(noise_translate_y[globalIndex])
propX = (xmax - xmin)
propY = (ymax - ymin)
areaOBJ = propX * propY
areaIMG = bgOriginalshape[0] * bgOriginalshape[1]
prop = areaOBJ / areaIMG
###
op = globalIndex % 5
if(op == 0):
beta = 0.05 * noise_scale[globalIndex]
if(op == 1):
beta = 0.15 * noise_scale[globalIndex]
if(op == 2):
beta = 0.65 * noise_scale[globalIndex]
if(op == 3):
beta = 0.75 * noise_scale[globalIndex]
if(op == 4):
beta = 0.85 * noise_scale[globalIndex]
scale = np.sqrt((beta * areaIMG) / areaOBJ)
diffx = ((xmax - xmin) / 2)
diffy = ((ymax - ymin) / 2)
centerx = xmin + diffx
centery = ymin + diffy
pts1 = np.float32([[xmin, ymin], [xmax, ymin], [xmin, ymax]])
newXmin = centerx - diffx * scale
newXmax = centerx + diffx * scale
newYmin = centery - diffy * scale
newYmax = centery + diffy * scale
# LOGIC HERE
newOrigin = [diffH, diffW]
border = [bgOriginalshape[0], bgOriginalshape[1]]
# check if the aspect of the object is the same as the bg
obj_orientation = -1
bg_orientation = -1
if(diffx >= diffy):
obj_orientation = 0
else:
obj_orientation = 1
if(bgOriginalshape[1] >= bgOriginalshape[0]):
bg_orientation = 0
else:
bg_orientation = 1
# check if can fit
if((newYmax - newYmin <= border[0])and(newXmax - newXmin <= border[1])):
# ok then it can fit
# but does it need translation?
pts2 = np.float32(
[[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]])
M = cv2.getAffineTransform(pts1, pts2)
# origin of object must be >= newOrigin
if(newYmin <= newOrigin[0]):
local_diff_y = newOrigin[0] - newYmin
M[1][2] += (local_diff_y)
if(newXmin <= newOrigin[1]):
local_diff_x = newOrigin[1] - newXmin
M[0][2] += (local_diff_x)
# maxdim must be <= border with the correct origin
if(newYmax >= (border[0] + newOrigin[0])):
local_diff_y = newYmax - (border[0] + newOrigin[0])
M[1][2] -= (local_diff_y)
if(newXmax >= (border[1] + newOrigin[1])):
local_diff_x = newXmax - (border[1] + newOrigin[1])
M[0][2] -= (local_diff_x)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
M = randomTranslateInside(
newYmax, newYmin, newXmax, newXmin, newOrigin, border, M)
resizedImg = cv2.warpAffine(
resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)
resizedMask = cv2.warpAffine(
resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)
else:
# it cannot fit
# resize
if(obj_orientation == bg_orientation):
# print("same")
# limit resize to max that fits
# scale must consider translation
scale = min((border[0]) / (ymax - ymin),
(border[1]) / (xmax - xmin))
#
newXmin = centerx - diffx * scale
newXmax = centerx + diffx * scale
newYmin = centery - diffy * scale
newYmax = centery + diffy * scale
pts2 = np.float32(
[[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]])
M = cv2.getAffineTransform(pts1, pts2)
# origin of object must be >= newOrigin
if(newYmin <= newOrigin[0]):
local_diff_y = newOrigin[0] - newYmin
M[1][2] += (local_diff_y)
if(newXmin <= newOrigin[1]):
local_diff_x = newOrigin[1] - newXmin
M[0][2] += (local_diff_x)
# maxdim must be <= border with the correct origin
if(newYmax >= (border[0] + newOrigin[0])):
local_diff_y = newYmax - (border[0] + newOrigin[0])
M[1][2] -= (local_diff_y)
if(newXmax >= (border[1] + newOrigin[1])):
local_diff_x = newXmax - (border[1] + newOrigin[1])
M[0][2] -= (local_diff_x)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
#
M = randomTranslateInside(
newYmax, newYmin, newXmax, newXmin, newOrigin, border, M)
resizedImg = cv2.warpAffine(
resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)
resizedMask = cv2.warpAffine(
resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)
else:
# print("different")
# check if a rotated obj fits
idxmod = np.random.randint(low=0, high=2)
if(idxmod == 0):
degrees = -90
if(idxmod == 1):
degrees = 90
M = cv2.getRotationMatrix2D(((maxW / 2), (maxH / 2)), degrees, 1)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
# scale must consider translation
scale = min((border[0]) / (newYmax - newYmin),
(border[1]) / (newXmax - newXmin))
#
M[0][0] *= scale
M[0][1] *= scale
M[1][0] *= scale
M[1][1] *= scale
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
# origin of object must be >= newOrigin
if(newYmin <= newOrigin[0]):
local_diff_y = newOrigin[0] - newYmin
M[1][2] += (local_diff_y)
if(newXmin <= newOrigin[1]):
local_diff_x = newOrigin[1] - newXmin
M[0][2] += (local_diff_x)
# maxdim must be <= border with the correct origin
if(newYmax >= (border[0] + newOrigin[0])):
local_diff_y = newYmax - (border[0] + newOrigin[0])
M[1][2] -= (local_diff_y)
if(newXmax >= (border[1] + newOrigin[1])):
local_diff_x = newXmax - (border[1] + newOrigin[1])
M[0][2] -= (local_diff_x)
newXmin = xmin * M[0][0] + ymin * M[0][1] + M[0][2]
newXmax = xmax * M[0][0] + ymax * M[0][1] + M[0][2]
newYmin = xmin * M[1][0] + ymin * M[1][1] + M[1][2]
newYmax = xmax * M[1][0] + ymax * M[1][1] + M[1][2]
newXminTmp = min(newXmin, newXmax)
newXmaxTmp = max(newXmin, newXmax)
newYminTmp = min(newYmin, newYmax)
newYmaxTmp = max(newYmin, newYmax)
newXmin = newXminTmp
newXmax = newXmaxTmp
newYmin = newYminTmp
newYmax = newYmaxTmp
#
M = randomTranslateInside(
newYmax, newYmin, newXmax, newXmin, newOrigin, border, M)
resizedImg = cv2.warpAffine(
resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)
resizedMask = cv2.warpAffine(
resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)
####
# cv2.rectangle(resizedMask, (int(newXmin), int(newYmin)),
# (int(newXmax), int(newYmax)), (255, 255, 255), 1)
#######################################################
return resizedImg, resizedMask
def bbox(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
def resize_with_pad(image, height, width):
def get_padding_size(image, height, width):
# h, w, _ = image.shape
h = image.shape[0]
w = image.shape[1]
top, bottom, left, right = (0, 0, 0, 0)
if h < height:
dh = height - h
top = dh // 2
bottom = dh - top
if w < width:
dw = width - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
top, bottom, left, right = get_padding_size(image, height, width)
BLACK = [0, 0, 0]
constant = cv2.copyMakeBorder(
image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
return constant
def resizeToOrg(bgOriginalshape, new, newMask):
if(bgOriginalshape[0] < new.shape[0]):
diffH = int((new.shape[0] - bgOriginalshape[0]) / 2)
new = new[diffH:bgOriginalshape[0] + diffH, :, :]
newMask = newMask[diffH:bgOriginalshape[0] + diffH, :, :]
if(bgOriginalshape[1] < new.shape[1]):
diffW = int((new.shape[1] - bgOriginalshape[1]) / 2)
new = new[:, diffW:bgOriginalshape[1] + diffW, :]
newMask = newMask[:, diffW:bgOriginalshape[1] + diffW, :]
return new, newMask
def loadResizedBG(index):
bgName = "MSRA10K_image_{:06d}.png".format(index)
bgFile = Image.open(BG_FOLDER_IMG + bgName)
bg = np.array(bgFile)
bgOriginalshape = bg.shape
resizedBg = resize_with_pad(bg, height=maxH, width=maxW)
bgFile.close()
return resizedBg, bgOriginalshape
def main(op, multipleBgs, outPath):
# read LIST_OF_N_OBJECTS
arrOBJ = np.zeros(N_OBJECT, np.int)
f = open(LIST_OF_N_OBJECTS, "r")
for i in range(0, N_OBJECT):
line = f.readline()
args = line.split(" ")
arrOBJ[i] = int(args[0])
f.close()
###
# read LIST_OF_N_OBJECTS
arrBG = np.zeros((N_OBJECT, N_OF_BACKGROUNDS), np.int)
f = open(LIST_OF_INDICES, "r")
for i in range(0, N_OBJECT):
line = f.readline()
if line == '\n':
arrOBJ[i] = -1
else:
args = line.split(" ")
for bgindex in range(0, N_OF_BACKGROUNDS):
arrBG[i][bgindex] = int(args[bgindex])
f.close()
###
realI = 0
for i in range(0, N_OBJECT, 1):
if(arrOBJ[i] != -1):
imgName = "MSRA10K_image_{:06d}.jpg".format(arrOBJ[i])
imFile = Image.open(OBJ_FOLDER_IMG + imgName)
img = np.array(imFile)
maskName = imgName.replace(".jpg", ".png")
maskName = maskName.replace("image", "mask")
maskFile = Image.open(OBJ_FOLDER_MASK + maskName)
mask = np.array(maskFile)
mask = np.tile(mask[:, :, None], [1, 1, 3])
resizedImg = resize_with_pad(img, height=maxH, width=maxW)
resizedMask = resize_with_pad(mask, height=maxH, width=maxW)
imFile.close()
maskFile.close()
# print(stamp)
resizedImgArr = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
resizedMaskArr = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
# print(resizedImgArr)
resizedBg = [None] * (N_OF_BACKGROUNDS)
bgOriginalshape = [None] * (N_OF_BACKGROUNDS)
blur = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
inv_blur = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
new = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
result = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
resizedMaskFinal = [[None] * (N_OF_OPS)] * N_OF_BACKGROUNDS
for bgindex in range(0, N_OF_BACKGROUNDS):
resizedBg[bgindex], bgOriginalshape[bgindex] = loadResizedBG(
arrBG[i][bgindex])
# calcule ops per bgs
for opindex in range(0, N_OF_OPS):
globalIndex = (
((realI * N_OF_BACKGROUNDS) + bgindex) * N_OF_OPS) + opindex
# print(globalIndex)
resizedImgArr[bgindex][opindex], resizedMaskArr[bgindex][opindex] = geometricOp2(
resizedImg, resizedMask, bgOriginalshape[bgindex], opindex, globalIndex)
# internalLoop
# BEGIN Smooth border copy
resizedMaskTmp = cv2.erode(
resizedMaskArr[bgindex][opindex], kernelErode, iterations=1)
blur[bgindex][opindex] = cv2.blur(resizedMaskTmp, (3, 3))
blur[bgindex][opindex] = (
blur[bgindex][opindex] / 255) * 0.95
inv_blur[bgindex][opindex] = 1 - blur[bgindex][opindex]
new[bgindex][opindex] = blur[bgindex][opindex] * resizedImgArr[bgindex][opindex] + \
inv_blur[bgindex][opindex] * resizedBg[bgindex]
# END Smooth border copy
new[bgindex][opindex], resizedMaskArr[bgindex][opindex] = resizeToOrg(
bgOriginalshape[bgindex], new[bgindex][opindex], resizedMaskArr[bgindex][opindex])
#########################################################
result[bgindex][opindex] = Image.fromarray(
(new[bgindex][opindex]).astype(np.uint8))
resizedMaskFinal[bgindex][opindex] = Image.fromarray(
(resizedMaskArr[bgindex][opindex]).astype(np.uint8))
stamp = "{:06d}_{:06d}_{:03d}.png".format(
arrOBJ[i], arrBG[i][bgindex], opindex)
result[bgindex][opindex].save(outPath + OUTPUT_FOLDER_IMG +
"MSRA10K_image_" + stamp)
resizedMaskFinal[bgindex][opindex].save(outPath + OUTPUT_FOLDER_MASK
+ "MSRA10K_mask_" + stamp)
print(stamp)
#########################################################
realI += 1
if __name__ == '__main__':
if(args.n_bgs>1):
main(0,True,args.out_path)
else:
main(0,False,args.out_path)
| [
"numpy.sqrt",
"numpy.array",
"argparse.ArgumentParser",
"numpy.where",
"cv2.erode",
"numpy.random.seed",
"cv2.blur",
"numpy.tile",
"cv2.warpAffine",
"numpy.ones",
"numpy.floor",
"numpy.any",
"cv2.getAffineTransform",
"cv2.getRotationMatrix2D",
"PIL.Image.open",
"cv2.copyMakeBorder",
... | [((3729, 3754), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (3736, 3754), True, 'import numpy as np\n'), ((3779, 3801), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3790, 3801), False, 'import random\n'), ((3802, 3827), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3816, 3827), True, 'import numpy as np\n'), ((3842, 3897), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.975)', 'high': '(1.025)', 'size': 'N_OBJECT'}), '(low=0.975, high=1.025, size=N_OBJECT)\n', (3859, 3897), True, 'import numpy as np\n'), ((1430, 1534), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Compute resulting image using ANDA techinique for the MSRA10K dataset"""'}), "(description=\n 'Compute resulting image using ANDA techinique for the MSRA10K dataset')\n", (1444, 1534), False, 'from argparse import ArgumentParser\n'), ((4647, 4683), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)'}), '(low=0.0, high=1.0)\n', (4664, 4683), True, 'import numpy as np\n'), ((4698, 4734), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)'}), '(low=0.0, high=1.0)\n', (4715, 4734), True, 'import numpy as np\n'), ((7542, 7575), 'numpy.sqrt', 'np.sqrt', (['(beta * areaIMG / areaOBJ)'], {}), '(beta * areaIMG / areaOBJ)\n', (7549, 7575), True, 'import numpy as np\n'), ((7709, 7763), 'numpy.float32', 'np.float32', (['[[xmin, ymin], [xmax, ymin], [xmin, ymax]]'], {}), '([[xmin, ymin], [xmax, ymin], [xmin, ymax]])\n', (7719, 7763), True, 'import numpy as np\n'), ((16235, 16254), 'numpy.any', 'np.any', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (16241, 16254), True, 'import numpy as np\n'), ((16266, 16285), 'numpy.any', 'np.any', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (16272, 16285), True, 'import numpy as np\n'), ((17029, 17118), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'BLACK'}), '(image, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=BLACK)\n', (17047, 17118), False, 'import cv2\n'), ((17772, 17806), 'PIL.Image.open', 'Image.open', (['(BG_FOLDER_IMG + bgName)'], {}), '(BG_FOLDER_IMG + bgName)\n', (17782, 17806), False, 'from PIL import Image\n'), ((17816, 17832), 'numpy.array', 'np.array', (['bgFile'], {}), '(bgFile)\n', (17824, 17832), True, 'import numpy as np\n'), ((18062, 18088), 'numpy.zeros', 'np.zeros', (['N_OBJECT', 'np.int'], {}), '(N_OBJECT, np.int)\n', (18070, 18088), True, 'import numpy as np\n'), ((18315, 18361), 'numpy.zeros', 'np.zeros', (['(N_OBJECT, N_OF_BACKGROUNDS)', 'np.int'], {}), '((N_OBJECT, N_OF_BACKGROUNDS), np.int)\n', (18323, 18361), True, 'import numpy as np\n'), ((8525, 8597), 'numpy.float32', 'np.float32', (['[[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]]'], {}), '([[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]])\n', (8535, 8597), True, 'import numpy as np\n'), ((8624, 8658), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (8646, 8658), False, 'import cv2\n'), ((9988, 10055), 'cv2.warpAffine', 'cv2.warpAffine', (['resizedImg', 'M', '(maxW, maxH)'], {'flags': 'cv2.INTER_LINEAR'}), '(resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)\n', (10002, 10055), False, 'import cv2\n'), ((10091, 10160), 'cv2.warpAffine', 'cv2.warpAffine', (['resizedMask', 'M', '(maxW, maxH)'], {'flags': 'cv2.INTER_NEAREST'}), '(resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)\n', (10105, 10160), False, 'import cv2\n'), ((10716, 10788), 'numpy.float32', 'np.float32', (['[[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]]'], {}), '([[newXmin, newYmin], [newXmax, newYmin], [newXmin, newYmax]])\n', (10726, 10788), True, 'import numpy as np\n'), ((10823, 10857), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (10845, 10857), False, 'import cv2\n'), ((12316, 12383), 'cv2.warpAffine', 'cv2.warpAffine', (['resizedImg', 'M', '(maxW, maxH)'], {'flags': 'cv2.INTER_LINEAR'}), '(resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)\n', (12330, 12383), False, 'import cv2\n'), ((12427, 12496), 'cv2.warpAffine', 'cv2.warpAffine', (['resizedMask', 'M', '(maxW, maxH)'], {'flags': 'cv2.INTER_NEAREST'}), '(resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)\n', (12441, 12496), False, 'import cv2\n'), ((12625, 12657), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)'}), '(low=0, high=2)\n', (12642, 12657), True, 'import numpy as np\n'), ((12792, 12849), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(maxW / 2, maxH / 2)', 'degrees', '(1)'], {}), '((maxW / 2, maxH / 2), degrees, 1)\n', (12815, 12849), False, 'import cv2\n'), ((15772, 15839), 'cv2.warpAffine', 'cv2.warpAffine', (['resizedImg', 'M', '(maxW, maxH)'], {'flags': 'cv2.INTER_LINEAR'}), '(resizedImg, M, (maxW, maxH), flags=cv2.INTER_LINEAR)\n', (15786, 15839), False, 'import cv2\n'), ((15883, 15952), 'cv2.warpAffine', 'cv2.warpAffine', (['resizedMask', 'M', '(maxW, maxH)'], {'flags': 'cv2.INTER_NEAREST'}), '(resizedMask, M, (maxW, maxH), flags=cv2.INTER_NEAREST)\n', (15897, 15952), False, 'import cv2\n'), ((16303, 16317), 'numpy.where', 'np.where', (['rows'], {}), '(rows)\n', (16311, 16317), True, 'import numpy as np\n'), ((16347, 16361), 'numpy.where', 'np.where', (['cols'], {}), '(cols)\n', (16355, 16361), True, 'import numpy as np\n'), ((18860, 18896), 'PIL.Image.open', 'Image.open', (['(OBJ_FOLDER_IMG + imgName)'], {}), '(OBJ_FOLDER_IMG + imgName)\n', (18870, 18896), False, 'from PIL import Image\n'), ((18915, 18931), 'numpy.array', 'np.array', (['imFile'], {}), '(imFile)\n', (18923, 18931), True, 'import numpy as np\n'), ((19069, 19107), 'PIL.Image.open', 'Image.open', (['(OBJ_FOLDER_MASK + maskName)'], {}), '(OBJ_FOLDER_MASK + maskName)\n', (19079, 19107), False, 'from PIL import Image\n'), ((19128, 19146), 'numpy.array', 'np.array', (['maskFile'], {}), '(maskFile)\n', (19136, 19146), True, 'import numpy as np\n'), ((19167, 19203), 'numpy.tile', 'np.tile', (['mask[:, :, None]', '[1, 1, 3]'], {}), '(mask[:, :, None], [1, 1, 3])\n', (19174, 19203), True, 'import numpy as np\n'), ((5092, 5124), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)'}), '(low=0, high=2)\n', (5109, 5124), True, 'import numpy as np\n'), ((5418, 5447), 'numpy.floor', 'np.floor', (['(noise_y * freeSpace)'], {}), '(noise_y * freeSpace)\n', (5426, 5447), True, 'import numpy as np\n'), ((5583, 5612), 'numpy.floor', 'np.floor', (['(noise_y * freeSpace)'], {}), '(noise_y * freeSpace)\n', (5591, 5612), True, 'import numpy as np\n'), ((5937, 5969), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)'}), '(low=0, high=2)\n', (5954, 5969), True, 'import numpy as np\n'), ((6263, 6292), 'numpy.floor', 'np.floor', (['(noise_x * freeSpace)'], {}), '(noise_x * freeSpace)\n', (6271, 6292), True, 'import numpy as np\n'), ((6428, 6457), 'numpy.floor', 'np.floor', (['(noise_x * freeSpace)'], {}), '(noise_x * freeSpace)\n', (6436, 6457), True, 'import numpy as np\n'), ((5187, 5219), 'numpy.floor', 'np.floor', (['(noise_y * freeSpacePos)'], {}), '(noise_y * freeSpacePos)\n', (5195, 5219), True, 'import numpy as np\n'), ((5273, 5305), 'numpy.floor', 'np.floor', (['(noise_y * freeSpaceNeg)'], {}), '(noise_y * freeSpaceNeg)\n', (5281, 5305), True, 'import numpy as np\n'), ((6032, 6064), 'numpy.floor', 'np.floor', (['(noise_x * freeSpacePos)'], {}), '(noise_x * freeSpacePos)\n', (6040, 6064), True, 'import numpy as np\n'), ((6118, 6150), 'numpy.floor', 'np.floor', (['(noise_x * freeSpaceNeg)'], {}), '(noise_x * freeSpaceNeg)\n', (6126, 6150), True, 'import numpy as np\n'), ((20781, 20851), 'cv2.erode', 'cv2.erode', (['resizedMaskArr[bgindex][opindex]', 'kernelErode'], {'iterations': '(1)'}), '(resizedMaskArr[bgindex][opindex], kernelErode, iterations=1)\n', (20790, 20851), False, 'import cv2\n'), ((20922, 20954), 'cv2.blur', 'cv2.blur', (['resizedMaskTmp', '(3, 3)'], {}), '(resizedMaskTmp, (3, 3))\n', (20930, 20954), False, 'import cv2\n')] |
import numpy as np
from dyneusr import DyNeuGraph
from dyneusr.datasets import make_trefoil
from kmapper import KeplerMapper
from sklearn.decomposition import PCA
# Generate synthetic dataset
import tadasets
X = tadasets.sphere(n=500, r=1)
# Sort by first column
inds = np.argsort(X[:, 0])
X = X[inds].copy()
y = np.arange(X.shape[0])
# Generate shape graph using KeplerMapper
mapper = KeplerMapper(verbose=1)
lens = mapper.fit_transform(X, projection=PCA(2))
graph = mapper.map(lens, X, nr_cubes=6, overlap_perc=0.5)
# Visualize the shape graph using DyNeuSR's DyNeuGraph
dG = DyNeuGraph(G=graph, y=y)
dG.visualize('dyneusr4D_sphere.html', template='4D', static=True, show=True)
| [
"sklearn.decomposition.PCA",
"tadasets.sphere",
"dyneusr.DyNeuGraph",
"kmapper.KeplerMapper",
"numpy.argsort",
"numpy.arange"
] | [((213, 240), 'tadasets.sphere', 'tadasets.sphere', ([], {'n': '(500)', 'r': '(1)'}), '(n=500, r=1)\n', (228, 240), False, 'import tadasets\n'), ((272, 291), 'numpy.argsort', 'np.argsort', (['X[:, 0]'], {}), '(X[:, 0])\n', (282, 291), True, 'import numpy as np\n'), ((315, 336), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (324, 336), True, 'import numpy as np\n'), ((389, 412), 'kmapper.KeplerMapper', 'KeplerMapper', ([], {'verbose': '(1)'}), '(verbose=1)\n', (401, 412), False, 'from kmapper import KeplerMapper\n'), ((583, 607), 'dyneusr.DyNeuGraph', 'DyNeuGraph', ([], {'G': 'graph', 'y': 'y'}), '(G=graph, y=y)\n', (593, 607), False, 'from dyneusr import DyNeuGraph\n'), ((455, 461), 'sklearn.decomposition.PCA', 'PCA', (['(2)'], {}), '(2)\n', (458, 461), False, 'from sklearn.decomposition import PCA\n')] |
"""
KnowYourData
============
A rapid and lightweight module to describe the statistics and structure of
data arrays for interactive use.
The most simple use case to display data is if you have a numpy array 'x':
>>> from knowyourdata import kyd
>>> kyd(x)
"""
import sys
import numpy as np
from IPython.display import display
# Getting HTML Template
from . import kyd_html_display_template
kyd_htmltemplate = kyd_html_display_template.kyd_htmltemplate
class KYD_datasummary(object):
"""A class to store and display the summary information"""
text_repr = ""
html_repr = ""
# Display Settings
col_width = 10
precision = 4
def __repr__(self):
"""
The Plain String Representation of the Data Summary
"""
return self.text_repr
def _repr_html_(self):
"""
The HTML Representation of the Data Summary
"""
return self.html_repr
def make_html_repr(self):
"""Make HTML Representation of Data Summary"""
self.html_repr = kyd_htmltemplate.format(kyd_class=self.kyd_class)
def make_txt_basic_stats(self):
"""Make Text Representation of Basic Statistics"""
pstr_list = []
pstr_struct_header1 = "Basic Statistics "
pstr_struct_header2 = ''
pstr_list.append(pstr_struct_header1)
pstr_list.append(pstr_struct_header2)
template_str = (
" {0:^10} "
" {1:>8} "
" {2:<10} "
" {3:>8} "
" {4:<10} "
)
tmp_data = [
[
"Mean:", "{kyd_class.mean:.{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"",
"Std Dev:", "{kyd_class.std:.{kyd_class.precision}}".format(
kyd_class=self.kyd_class)
],
["Min:", "1Q:", "Median:", "3Q:", "Max:"],
[
"{kyd_class.min: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.firstquartile: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.median: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.thirdquartile: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.max: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
],
['-99 CI:', '-95 CI:', '-68 CI:', '+68 CI:', '+95 CI:', '+99 CI:'],
[
"{kyd_class.ci_99[0]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_95[0]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_68[0]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_68[1]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_95[1]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_99[1]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
],
]
n_tmp_data = len(tmp_data)
num_rows_in_cols = [len(i) for i in tmp_data]
num_rows = np.max(num_rows_in_cols)
for i in range(n_tmp_data):
tmp_col = tmp_data[i]
for j in range(num_rows_in_cols[i], num_rows):
tmp_col.append("")
for i in range(num_rows):
pstr_list.append(
template_str.format(
tmp_data[0][i],
tmp_data[1][i],
tmp_data[2][i],
tmp_data[3][i],
tmp_data[4][i],
)
)
return pstr_list
def make_txt_struct(self):
"""Make Text Representation of Array"""
pstr_list = []
# pstr_struct_header0 = "................."
# Commenting out Ansi Coloured Version
# pstr_struct_header1 = '\033[1m' + "Array Structure " + '\033[0m'
pstr_struct_header1 = "Array Structure "
pstr_struct_header2 = " "
# pstr_list.append(pstr_struct_header0)
pstr_list.append(pstr_struct_header1)
pstr_list.append(pstr_struct_header2)
pstr_n_dim = (
"Number of Dimensions:\t"
"{kyd_class.ndim}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_n_dim)
pstr_shape = (
"Shape of Dimensions:\t"
"{kyd_class.shape}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_shape)
pstr_dtype = (
"Array Data Type:\t"
"{kyd_class.dtype}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_dtype)
pstr_memsize = (
"Memory Size:\t\t"
"{kyd_class.human_memsize}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_memsize)
pstr_spacer = ("")
pstr_list.append(pstr_spacer)
pstr_numnan = (
"Number of NaN:\t"
"{kyd_class.num_nan}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_numnan)
pstr_numinf = (
"Number of Inf:\t"
"{kyd_class.num_inf}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_numinf)
return pstr_list
def make_text_repr(self):
"""Making final text string for plain text representation"""
tmp_text_repr = ""
tmp_text_repr += "\n"
pstr_basic = self.make_txt_basic_stats()
pstr_struct = self.make_txt_struct()
n_basic = len(pstr_basic)
n_struct = len(pstr_struct)
l_colwidth = max([len(x) for x in pstr_basic]) + 1
r_colwidth = max([len(x) for x in pstr_struct]) + 2
# new_colwidth = self.col_width + 20
# Finding the longest string
len_list = max([n_basic, n_struct])
for i in range(len_list):
tmp_str = '| '
if i < n_basic:
tmp_str += (pstr_basic[i].ljust(l_colwidth))
else:
tmp_str += ''.ljust(l_colwidth)
tmp_str += ' | '
if i < n_struct:
tmp_str += (pstr_struct[i].expandtabs().ljust(r_colwidth))
else:
tmp_str += ''.ljust(r_colwidth)
tmp_str += '\t|'
tmp_text_repr += tmp_str + "\n"
tmp_text_repr += "\n"
self.text_repr = tmp_text_repr
def __init__(self, kyd_class):
super(KYD_datasummary, self).__init__()
self.kyd_class = kyd_class
self.make_text_repr()
self.make_html_repr()
class KYD(object):
"""The Central Class for KYD"""
# Variable for Data Vector
data = None
# Initial Flags
f_allfinite = False
f_allnonfinite = False
f_hasnan = False
f_hasinf = False
# Initialized Numbers
num_nan = 0
num_inf = 0
# Display Settings
col_width = 10
precision = 4
def check_finite(self):
"""Checking to see if all elements are finite and setting flags"""
if np.all(np.isfinite(self.data)):
self.filt_data = self.data
self.f_allfinite = True
else:
finite_inds = np.where(np.isfinite(self.data))
self.filt_data = self.data[finite_inds]
if self.filt_data.size == 0:
self.f_allnonfinite = True
if np.any(np.isnan(self.data)):
self.f_hasnan = True
self.num_nan = np.sum(np.isnan(self.data))
if np.any(np.isinf(self.data)):
self.f_hasinf = True
self.num_inf = np.sum(np.isinf(self.data))
def check_struct(self):
"""Determining the Structure of the Numpy Array"""
self.dtype = self.data.dtype
self.ndim = self.data.ndim
self.shape = self.data.shape
self.size = self.data.size
self.memsize = sys.getsizeof(self.data)
self.human_memsize = sizeof_fmt(self.memsize)
def get_basic_stats(self):
"""Get basic statistics about array"""
if self.f_allnonfinite:
self.min = self.max = self.range = np.nan
self.mean = self.std = self.median = np.nan
self.firstquartile = self.thirdquartile = np.nan
self.ci_68 = self.ci_95 = self.ci_99 = np.array([np.nan, np.nan])
return
self.min = np.float_(np.min(self.filt_data))
self.max = np.float_(np.max(self.filt_data))
self.range = self.max - self.min
self.mean = np.mean(self.filt_data)
self.std = np.std(self.filt_data)
self.median = np.float_(np.median(self.filt_data))
self.firstquartile = np.float_(np.percentile(self.filt_data, 25))
self.thirdquartile = np.float_(np.percentile(self.filt_data, 75))
self.ci_99 = np.float_(
np.percentile(self.filt_data, np.array([0.5, 99.5])))
self.ci_95 = np.float_(
np.percentile(self.filt_data, np.array([2.5, 97.5])))
self.ci_68 = np.float_(
np.percentile(self.filt_data, np.array([16.0, 84.0])))
def make_summary(self):
"""Making Data Summary"""
self.data_summary = KYD_datasummary(self)
def clear_memory(self):
"""Ensuring the Numpy Array does not exist in memory"""
del self.data
del self.filt_data
def display(self, short=False):
"""Displaying all relevant statistics"""
if short:
pass
try:
get_ipython
display(self.data_summary)
except NameError:
print(self.data_summary)
def __init__(self, data):
super(KYD, self).__init__()
# Ensuring that the array is a numpy array
if not isinstance(data, np.ndarray):
data = np.array(data)
self.data = data
self.check_finite()
self.check_struct()
self.get_basic_stats()
self.clear_memory()
self.make_summary()
def sizeof_fmt(num, suffix='B'):
"""Return human readable version of in-memory size.
Code from <NAME> from Stack Overflow:
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def kyd(data, full_statistics=False):
"""Print statistics of any numpy array
data -- Numpy Array of Data
Keyword arguments:
full_statistics -- printing all detailed statistics of the sources
(Currently Not Implemented)
"""
data_kyd = KYD(data)
if full_statistics:
data_kyd.display()
else:
data_kyd.display(short=True)
return data_kyd
| [
"numpy.mean",
"IPython.display.display",
"numpy.median",
"sys.getsizeof",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.isfinite",
"numpy.isnan",
"numpy.std",
"numpy.percentile",
"numpy.isinf"
] | [((3473, 3497), 'numpy.max', 'np.max', (['num_rows_in_cols'], {}), '(num_rows_in_cols)\n', (3479, 3497), True, 'import numpy as np\n'), ((8323, 8347), 'sys.getsizeof', 'sys.getsizeof', (['self.data'], {}), '(self.data)\n', (8336, 8347), False, 'import sys\n'), ((8951, 8974), 'numpy.mean', 'np.mean', (['self.filt_data'], {}), '(self.filt_data)\n', (8958, 8974), True, 'import numpy as np\n'), ((8994, 9016), 'numpy.std', 'np.std', (['self.filt_data'], {}), '(self.filt_data)\n', (9000, 9016), True, 'import numpy as np\n'), ((7475, 7497), 'numpy.isfinite', 'np.isfinite', (['self.data'], {}), '(self.data)\n', (7486, 7497), True, 'import numpy as np\n'), ((8736, 8762), 'numpy.array', 'np.array', (['[np.nan, np.nan]'], {}), '([np.nan, np.nan])\n', (8744, 8762), True, 'import numpy as np\n'), ((8813, 8835), 'numpy.min', 'np.min', (['self.filt_data'], {}), '(self.filt_data)\n', (8819, 8835), True, 'import numpy as np\n'), ((8866, 8888), 'numpy.max', 'np.max', (['self.filt_data'], {}), '(self.filt_data)\n', (8872, 8888), True, 'import numpy as np\n'), ((9049, 9074), 'numpy.median', 'np.median', (['self.filt_data'], {}), '(self.filt_data)\n', (9058, 9074), True, 'import numpy as np\n'), ((9115, 9148), 'numpy.percentile', 'np.percentile', (['self.filt_data', '(25)'], {}), '(self.filt_data, 25)\n', (9128, 9148), True, 'import numpy as np\n'), ((9189, 9222), 'numpy.percentile', 'np.percentile', (['self.filt_data', '(75)'], {}), '(self.filt_data, 75)\n', (9202, 9222), True, 'import numpy as np\n'), ((9945, 9971), 'IPython.display.display', 'display', (['self.data_summary'], {}), '(self.data_summary)\n', (9952, 9971), False, 'from IPython.display import display\n'), ((10218, 10232), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (10226, 10232), True, 'import numpy as np\n'), ((7624, 7646), 'numpy.isfinite', 'np.isfinite', (['self.data'], {}), '(self.data)\n', (7635, 7646), True, 'import numpy as np\n'), ((7809, 7828), 'numpy.isnan', 'np.isnan', (['self.data'], {}), '(self.data)\n', (7817, 7828), True, 'import numpy as np\n'), ((7950, 7969), 'numpy.isinf', 'np.isinf', (['self.data'], {}), '(self.data)\n', (7958, 7969), True, 'import numpy as np\n'), ((9298, 9319), 'numpy.array', 'np.array', (['[0.5, 99.5]'], {}), '([0.5, 99.5])\n', (9306, 9319), True, 'import numpy as np\n'), ((9396, 9417), 'numpy.array', 'np.array', (['[2.5, 97.5]'], {}), '([2.5, 97.5])\n', (9404, 9417), True, 'import numpy as np\n'), ((9494, 9516), 'numpy.array', 'np.array', (['[16.0, 84.0]'], {}), '([16.0, 84.0])\n', (9502, 9516), True, 'import numpy as np\n'), ((7906, 7925), 'numpy.isnan', 'np.isnan', (['self.data'], {}), '(self.data)\n', (7914, 7925), True, 'import numpy as np\n'), ((8047, 8066), 'numpy.isinf', 'np.isinf', (['self.data'], {}), '(self.data)\n', (8055, 8066), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 09:03:21 2016
@author: Ben
"""
import sys
import os
sys.path.append(os.path.abspath('..\..'))
sys.path.append(os.path.abspath('..'))
# =============================================================================
# HEPHAESTUS VALIDATION 8 - BEAM DISPLACEMENTS AND ROTATIONS SIMPLE AL BOX BEAM
# =============================================================================
# IMPORTS:
import numpy as np
from AeroComBAT.Structures import MaterialLib
from AeroComBAT.AircraftParts import Wing
from AeroComBAT.FEM import Model
# Define the width of the cross-section
c = .076
ctip = 0.0076+.001
croot = 0.0076+.001
x1 = -0.039/croot/2
x2 = 0.039/croot/2
span = 0.76#.305*3/2
p1 = np.array([c/2,0.,0.])
p2 = np.array([c/2,span,0.])
Y_rib = np.linspace(0.,1.,2)
b_s = np.linalg.norm((Y_rib[0],Y_rib[-1]))
matLib = MaterialLib()
matLib.addMat(1,'AL','iso',[68.9e9,.33,2700*2],.00025)
matLib.addMat(2,'Weak_mat','iso',[100,.33,10],.00025)
matLib.addMat(3,'AS43501-6*','trans_iso',[142e9,9.8e9,.34,.42,6e9,20000],0.0005)
n_ply = [4,4,4,4]
m_ply = [1,1,1,1]
th_ply = [30,-30,-30,30]
#n_ply = [4]
#m_ply = [1]
n_orients = 1
n_lams = 4
typeXSect = 'rectBox'
noe_dens = 120
chordVec=np.array([-1.,0.,0.])
# For tension bending coupling
#m_ply = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
#th_ply = [0,0,0,0,-30,-30,-30,-30,0,0,0,0,30,30,30,30]
wing1 = Wing(1,p1,p2,croot,ctip,x1,x2,Y_rib,n_ply,m_ply,matLib,name='box',\
noe=noe_dens,chordVec=chordVec,ref_ax='shearCntr',n_orients=n_orients,\
n_lams=n_lams,typeXSect=typeXSect,meshSize=2,th_ply=th_ply)
sbeam1 = wing1.wingSects[0].SuperBeams[0]
x1 = np.array([0,0.,0.])
x2 = np.array([c,0.,0.])
x3 = np.array([c,span,0.])
x4 = np.array([0,span,0.])
nspan = 36*2
nchord = 10
wing1.addLiftingSurface(1,x1,x2,x3,x4,nspan,nchord)
# Make a FEM model
model = Model()
model.addAircraftParts([wing1])
# Apply the constraint for the model
model.applyConstraints(0,'fix')
model.plotRigidModel(numXSects=10)
model.normalModesAnalysis()
freqs = model.freqs
'''
# Aerodynamic Model Validation
model.calcAIC(.24,.47,.4572/2,symxz=True)
model.normalModesAnalysis()
pbar = np.dot(np.linalg.inv(model.D),np.dot(model.W,-model.umode[:,0]/max(abs(model.umode[:,0]))))
pbarReal = np.real(pbar)
pbarImag = np.imag(pbar)
pbarPlot = pbar[330:330+10]
#pbarPlot = pbar[0:0+10]
xs = np.zeros((10))
for i in range(0,10):
xs[i] = model.aeroBox[i].Xr[0]
xs = xs/c
import matplotlib.pyplot as plt
plt.figure(1)
plt.hold(True)
plt.plot(xs,np.real(pbarPlot),label='real part')
plt.plot(xs,np.imag(pbarPlot),label='imaginary part')
plt.legend()
plt.grid(True)
plt.hold(False)
'''
'''
# CASE 1:
# Apply the case load
model.resetPointLoads()
tipLoad = np.array([10000.,0.,0.,0.,0.,0.])
F = {80:tipLoad}
model.applyLoads(1,F=F)
# Run the analysis
model.staticAnalysis(1)
model.plotDeformedModel(figName='V8 Case 1',numXSects=8,contLim=[-4.0e6,4.0e6],\
warpScale=100,displScale=10,contour='sig_33')
'''
# Import NASTRAN Results:
NASTRAN = np.genfromtxt('FlutterResults.csv', delimiter=',')
UNAST = NASTRAN[:,0]
Damp1 = NASTRAN[:,1]
Freq1 = NASTRAN[:,2]
Damp2 = NASTRAN[:,3]
Freq2 = NASTRAN[:,4]
Damp3 = NASTRAN[:,5]
Freq3 = NASTRAN[:,6]
Damp4 = NASTRAN[:,7]
Freq4 = NASTRAN[:,8]
Damp5 = NASTRAN[:,9]
Freq5 = NASTRAN[:,10]
Damp6 = NASTRAN[:,11]
Freq6 = NASTRAN[:,12]
U_vec = np.linspace(1,342,100)
kr_vec = np.array([0.,1e-07,1e-06,1e-05,1e-04,.001,.01,.05,.1,.5,1.,5.,10.,50])*10
M_vec = [0.]*len(kr_vec)
rho_0 = 1.225
nmodes = 6
model.flutterAnalysis(U_vec,kr_vec,M_vec,c,rho_0,nmodes,symxz=True,g=0.0)
# flutter plots
import matplotlib.pyplot as plt
cvec = ['b','g','r','c','m','y']
plt.figure(1)
plt.hold(True)
for PID, point in model.flutterPoints.iteritems():
plt.plot(U_vec,point.gamma,color=cvec[PID],label='Mode '+str(PID+1))
plt.legend(loc=2)
plt.ylim([-1,1])
plt.plot(UNAST,Damp1,str(cvec[0])+'--',label='NASTRAN Mode 1')
plt.plot(UNAST,Damp2,str(cvec[1])+'--',label='NASTRAN Mode 2')
plt.plot(UNAST,Damp3,str(cvec[2])+'--',label='NASTRAN Mode 3')
plt.plot(UNAST,Damp4,str(cvec[3])+'--',label='NASTRAN Mode 4')
plt.plot(UNAST,Damp5,str(cvec[4])+'--',label='NASTRAN Mode 5')
plt.plot(UNAST,Damp6,str(cvec[5])+'--',label='NASTRAN Mode 6')
plt.title('Damping of the Wing Modes')
plt.xlabel('Free-stream airspeed, m/s')
plt.ylabel('Damping, g')
plt.grid(True)
plt.hold(False)
plt.figure(2)
plt.hold(True)
for PID, point in model.flutterPoints.iteritems():
plt.plot(U_vec,point.omega,color = cvec[PID],label='Mode '+str(PID+1))
plt.legend(loc=1)
#plt.ylim([0,150])
plt.plot(UNAST,Freq1,str(cvec[0])+'--',label='NASTRAN Mode 1')
plt.plot(UNAST,Freq2,str(cvec[1])+'--',label='NASTRAN Mode 2')
plt.plot(UNAST,Freq3,str(cvec[2])+'--',label='NASTRAN Mode 3')
plt.plot(UNAST,Freq4,str(cvec[3])+'--',label='NASTRAN Mode 4')
plt.plot(UNAST,Freq5,str(cvec[4])+'--',label='NASTRAN Mode 5')
plt.plot(UNAST,Freq6,str(cvec[5])+'--',label='NASTRAN Mode 6')
plt.title('Frequency of the Wing Modes')
plt.xlabel('Free-stream airspeed, m/s')
plt.ylabel('Mode Frequency, Hz')
plt.grid(True)
plt.hold(False)
cvec = ['b','g','r','c','m','y','k']
Uind = 80
point1 = model.flutterPoints[0]
point2 = model.flutterPoints[1]
point3 = model.flutterPoints[2]
point4 = model.flutterPoints[3]
omegaAeros = point1.omegaAeroDict[U_vec[Uind]]
omegaRoots1 = point1.omegaRootDict[U_vec[Uind]]
omegaRoots2 = point2.omegaRootDict[U_vec[Uind]]
omegaRoots3 = point3.omegaRootDict[U_vec[Uind]]
omegaRoots4 = point4.omegaRootDict[U_vec[Uind]]
gammas1 = point1.gammaDict[U_vec[Uind]]
gammas2 = point2.gammaDict[U_vec[Uind]]
gammas3 = point3.gammaDict[U_vec[Uind]]
gammas4 = point4.gammaDict[U_vec[Uind]]
plt.figure(3)
plt.hold(True)
plt.plot(omegaAeros,omegaAeros,'ko-',label='omega_aero')
plt.plot(omegaAeros,omegaRoots1,'bo-',label='omega_root_1')
plt.plot(omegaAeros,omegaRoots2,'go-',label='omega_root_2')
plt.plot(omegaAeros,omegaRoots3,'ro-',label='omega_root_3')
plt.plot(omegaAeros,omegaRoots4,'co-',label='omega_root_4')
plt.legend(loc=2)
plt.ylim([0,2200])
plt.xlim([0,1500])
plt.xlabel('Aerodynamic frequency, rad')
plt.ylabel('Root frequency, rad')
plt.title('Interpolation of Root Requencies at V=%4.2f m/s'%(U_vec[Uind]))
plt.grid(True)
plt.hold(False)
plt.figure(4)
plt.hold(True)
plt.plot(omegaAeros,gammas1,'bo-',label='gamma_root_1')
plt.plot(omegaAeros,gammas2,'go-',label='gamma_root_2')
plt.plot(omegaAeros,gammas3,'ro-',label='gamma_root_3')
plt.plot(omegaAeros,gammas4,'co-',label='gamma_root_4')
plt.legend(loc=3)
plt.ylim([-1.2,.1])
plt.xlim([0,1500])
plt.xlabel('Aerodynamic frequency, rad')
plt.ylabel('Damping (g)')
plt.title('Interpolation of Root Damping at V=%4.2f m/s'%(U_vec[Uind]))
plt.grid(True)
plt.hold(False)
'''
import cProfile
#cProfile.run('model.flutterAnalysis(U_vec,kr_vec,M_vec,c/2,rho_0,nmodes,symxz=True,g=.01)')
flatPlateFlutter = np.genfromtxt('FlatPlateFlutterResults.csv', delimiter=',',dtype=float)
plt.figure(3)
plt.hold(True)
for i in range(0,3):
plt.plot(flatPlateFlutter[1:,0],flatPlateFlutter[1:,i+1],label='mode'+str(i+1))
plt.legend(loc=3)
#plt.ylim([-.001,150])
plt.grid(True)
plt.hold(False)
plt.figure(4)
plt.hold(True)
for i in range(0,3):
plt.plot(flatPlateFlutter[1:,0],flatPlateFlutter[1:,i+4],label='mode'+str(i+1))
plt.legend(loc=1)
#plt.ylim([0,150])
plt.grid(True)
plt.hold(False)
'''
'''
model.normalModesAnalysis()
model.plotDeformedModel(mode=1,figName='mode 1',numXSects=10,displScale=1)
model.plotDeformedModel(mode=2,figName='mode 2',numXSects=10,displScale=1)
model.plotDeformedModel(mode=3,figName='mode 3',numXSects=10,displScale=1)
model.plotDeformedModel(mode=4,figName='mode 4',numXSects=10,displScale=1)
model.plotDeformedModel(mode=5,figName='mode 5',numXSects=10,displScale=1)
'''
'''
import cProfile
# Initialize an array of PANIDs
PANIDs = model.aeroBox.keys()
# Initialize the number of panels
numPan = len(PANIDs)
Area = np.zeros((numPan,numPan))
# For all the recieving panels
for i in range(0,numPan):
recievingBox = model.aeroBox[PANIDs[i]]
Area[i,i] = recievingBox.Area
model.AeroArea = Area
cProfile.run('model.calcAIC(0.,1.,0.8990566037735849*2)')
'''
'''
# Test Normal modes
model.normalModesAnalysis(analysis_name='Normal_Modes')
# Write the beam displacements and rotations to a file
freqs = model.freqs
model.plotDeformedModel(mode=1,figName='Mode 1',\
numXSects=10,warpScale=1,displScale=2)
model.plotDeformedModel(mode=2,figName='Modes 2',analysis_name='Normal_Modes',\
numXSects=10,warpScale=1,displScale=2)
model.plotDeformedModel(mode=3,figName='Modes 3',analysis_name='Normal_Modes',\
numXSects=10,warpScale=1,displScale=2)
model.plotDeformedModel(mode=4,figName='Modes 4',analysis_name='Normal_Modes',\
numXSects=10,warpScale=1,displScale=2)
model.plotDeformedModel(mode=5,figName='Modes 5',analysis_name='Normal_Modes',\
numXSects=10,warpScale=1,displScale=2)
model.plotDeformedModel(mode=6,figName='Modes 6',analysis_name='Normal_Modes',\
numXSects=10,warpScale=1,displScale=2)
'''
'''
# CASE 2:
# Apply the case load
def f(x):
vx = -0.1*(-1.0e3*x[2]**2+6e7*x[2]+1.0e6)
vy = (-1.0e3*x[2]**2+6e7*x[2]+1.0e6)
pz = 0
tz = .2*c*(-1.0e3*x[2]**2+6e7*x[2]+1.0e6)
return np.array([vx,vy,pz,tz])/1.0e5
wing1.applyLoads(f=f,allElems=True)
# Run the analysis
wing1.staticAnalysis(resetPointLoads=True)
wing1.plotDeformedWing(figName='V8 Case 2',numXSects=10,contLim=[0.,5.0e8],\
warpScale=10,displScale=10,contour='MaxPrin')
# Write the beam displacements and rotations to a file
sbeam1.writeDisplacements(fileName = 'V8_Case_2.csv')
# CASE 3:
# Apply the case load
def f(x):
vx = 1e3
vy = 1e3
pz = -1e3
tz = 1e3
return np.array([vx,vy,pz,tz])/1e1
wing1.applyLoads(f=f,allElems=True)
# Run the analysis
wing1.staticAnalysis(resetPointLoads=True)
wing1.plotDeformedWing(figName='V8 Case 3',numXSects=10,contLim=[0.,5.0e8],\
warpScale=100,displScale=10,contour='MaxPrin')
# Write the beam displacements and rotations to a file
sbeam1.writeDisplacements(fileName = 'V8_Case_3.csv')''' | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.hold",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"AeroComBAT.AircraftParts.Wing",
"numpy.array",
"numpy.linspace",
"AeroComBAT.FEM.Model",
"matplotlib.pyplot.figure",
"numpy.linalg.n... | [((731, 758), 'numpy.array', 'np.array', (['[c / 2, 0.0, 0.0]'], {}), '([c / 2, 0.0, 0.0])\n', (739, 758), True, 'import numpy as np\n'), ((758, 786), 'numpy.array', 'np.array', (['[c / 2, span, 0.0]'], {}), '([c / 2, span, 0.0])\n', (766, 786), True, 'import numpy as np\n'), ((790, 814), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(2)'], {}), '(0.0, 1.0, 2)\n', (801, 814), True, 'import numpy as np\n'), ((817, 854), 'numpy.linalg.norm', 'np.linalg.norm', (['(Y_rib[0], Y_rib[-1])'], {}), '((Y_rib[0], Y_rib[-1]))\n', (831, 854), True, 'import numpy as np\n'), ((864, 877), 'AeroComBAT.Structures.MaterialLib', 'MaterialLib', ([], {}), '()\n', (875, 877), False, 'from AeroComBAT.Structures import MaterialLib\n'), ((1228, 1254), 'numpy.array', 'np.array', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (1236, 1254), True, 'import numpy as np\n'), ((1392, 1616), 'AeroComBAT.AircraftParts.Wing', 'Wing', (['(1)', 'p1', 'p2', 'croot', 'ctip', 'x1', 'x2', 'Y_rib', 'n_ply', 'm_ply', 'matLib'], {'name': '"""box"""', 'noe': 'noe_dens', 'chordVec': 'chordVec', 'ref_ax': '"""shearCntr"""', 'n_orients': 'n_orients', 'n_lams': 'n_lams', 'typeXSect': 'typeXSect', 'meshSize': '(2)', 'th_ply': 'th_ply'}), "(1, p1, p2, croot, ctip, x1, x2, Y_rib, n_ply, m_ply, matLib, name=\n 'box', noe=noe_dens, chordVec=chordVec, ref_ax='shearCntr', n_orients=\n n_orients, n_lams=n_lams, typeXSect=typeXSect, meshSize=2, th_ply=th_ply)\n", (1396, 1616), False, 'from AeroComBAT.AircraftParts import Wing\n'), ((1649, 1672), 'numpy.array', 'np.array', (['[0, 0.0, 0.0]'], {}), '([0, 0.0, 0.0])\n', (1657, 1672), True, 'import numpy as np\n'), ((1674, 1697), 'numpy.array', 'np.array', (['[c, 0.0, 0.0]'], {}), '([c, 0.0, 0.0])\n', (1682, 1697), True, 'import numpy as np\n'), ((1699, 1723), 'numpy.array', 'np.array', (['[c, span, 0.0]'], {}), '([c, span, 0.0])\n', (1707, 1723), True, 'import numpy as np\n'), ((1726, 1750), 'numpy.array', 'np.array', (['[0, span, 0.0]'], {}), '([0, span, 0.0])\n', (1734, 1750), True, 'import numpy as np\n'), ((1855, 1862), 'AeroComBAT.FEM.Model', 'Model', ([], {}), '()\n', (1860, 1862), False, 'from AeroComBAT.FEM import Model\n'), ((3020, 3070), 'numpy.genfromtxt', 'np.genfromtxt', (['"""FlutterResults.csv"""'], {'delimiter': '""","""'}), "('FlutterResults.csv', delimiter=',')\n", (3033, 3070), True, 'import numpy as np\n'), ((3356, 3380), 'numpy.linspace', 'np.linspace', (['(1)', '(342)', '(100)'], {}), '(1, 342, 100)\n', (3367, 3380), True, 'import numpy as np\n'), ((3669, 3682), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3679, 3682), True, 'import matplotlib.pyplot as plt\n'), ((3683, 3697), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (3691, 3697), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3839), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (3832, 3839), True, 'import matplotlib.pyplot as plt\n'), ((3840, 3857), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1, 1]'], {}), '([-1, 1])\n', (3848, 3857), True, 'import matplotlib.pyplot as plt\n'), ((4235, 4273), 'matplotlib.pyplot.title', 'plt.title', (['"""Damping of the Wing Modes"""'], {}), "('Damping of the Wing Modes')\n", (4244, 4273), True, 'import matplotlib.pyplot as plt\n'), ((4274, 4313), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Free-stream airspeed, m/s"""'], {}), "('Free-stream airspeed, m/s')\n", (4284, 4313), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4338), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Damping, g"""'], {}), "('Damping, g')\n", (4324, 4338), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4353), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4347, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4354, 4369), 'matplotlib.pyplot.hold', 'plt.hold', (['(False)'], {}), '(False)\n', (4362, 4369), True, 'import matplotlib.pyplot as plt\n'), ((4371, 4384), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4381, 4384), True, 'import matplotlib.pyplot as plt\n'), ((4385, 4399), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (4393, 4399), True, 'import matplotlib.pyplot as plt\n'), ((4526, 4543), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (4536, 4543), True, 'import matplotlib.pyplot as plt\n'), ((4941, 4981), 'matplotlib.pyplot.title', 'plt.title', (['"""Frequency of the Wing Modes"""'], {}), "('Frequency of the Wing Modes')\n", (4950, 4981), True, 'import matplotlib.pyplot as plt\n'), ((4982, 5021), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Free-stream airspeed, m/s"""'], {}), "('Free-stream airspeed, m/s')\n", (4992, 5021), True, 'import matplotlib.pyplot as plt\n'), ((5022, 5054), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mode Frequency, Hz"""'], {}), "('Mode Frequency, Hz')\n", (5032, 5054), True, 'import matplotlib.pyplot as plt\n'), ((5055, 5069), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5063, 5069), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5085), 'matplotlib.pyplot.hold', 'plt.hold', (['(False)'], {}), '(False)\n', (5078, 5085), True, 'import matplotlib.pyplot as plt\n'), ((5662, 5675), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (5672, 5675), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5690), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (5684, 5690), True, 'import matplotlib.pyplot as plt\n'), ((5691, 5750), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'omegaAeros', '"""ko-"""'], {'label': '"""omega_aero"""'}), "(omegaAeros, omegaAeros, 'ko-', label='omega_aero')\n", (5699, 5750), True, 'import matplotlib.pyplot as plt\n'), ((5748, 5810), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'omegaRoots1', '"""bo-"""'], {'label': '"""omega_root_1"""'}), "(omegaAeros, omegaRoots1, 'bo-', label='omega_root_1')\n", (5756, 5810), True, 'import matplotlib.pyplot as plt\n'), ((5808, 5870), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'omegaRoots2', '"""go-"""'], {'label': '"""omega_root_2"""'}), "(omegaAeros, omegaRoots2, 'go-', label='omega_root_2')\n", (5816, 5870), True, 'import matplotlib.pyplot as plt\n'), ((5868, 5930), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'omegaRoots3', '"""ro-"""'], {'label': '"""omega_root_3"""'}), "(omegaAeros, omegaRoots3, 'ro-', label='omega_root_3')\n", (5876, 5930), True, 'import matplotlib.pyplot as plt\n'), ((5928, 5990), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'omegaRoots4', '"""co-"""'], {'label': '"""omega_root_4"""'}), "(omegaAeros, omegaRoots4, 'co-', label='omega_root_4')\n", (5936, 5990), True, 'import matplotlib.pyplot as plt\n'), ((5988, 6005), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (5998, 6005), True, 'import matplotlib.pyplot as plt\n'), ((6006, 6025), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 2200]'], {}), '([0, 2200])\n', (6014, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6025, 6044), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1500]'], {}), '([0, 1500])\n', (6033, 6044), True, 'import matplotlib.pyplot as plt\n'), ((6044, 6084), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Aerodynamic frequency, rad"""'], {}), "('Aerodynamic frequency, rad')\n", (6054, 6084), True, 'import matplotlib.pyplot as plt\n'), ((6085, 6118), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Root frequency, rad"""'], {}), "('Root frequency, rad')\n", (6095, 6118), True, 'import matplotlib.pyplot as plt\n'), ((6119, 6193), 'matplotlib.pyplot.title', 'plt.title', (["('Interpolation of Root Requencies at V=%4.2f m/s' % U_vec[Uind])"], {}), "('Interpolation of Root Requencies at V=%4.2f m/s' % U_vec[Uind])\n", (6128, 6193), True, 'import matplotlib.pyplot as plt\n'), ((6194, 6208), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6202, 6208), True, 'import matplotlib.pyplot as plt\n'), ((6209, 6224), 'matplotlib.pyplot.hold', 'plt.hold', (['(False)'], {}), '(False)\n', (6217, 6224), True, 'import matplotlib.pyplot as plt\n'), ((6226, 6239), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (6236, 6239), True, 'import matplotlib.pyplot as plt\n'), ((6240, 6254), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (6248, 6254), True, 'import matplotlib.pyplot as plt\n'), ((6255, 6313), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'gammas1', '"""bo-"""'], {'label': '"""gamma_root_1"""'}), "(omegaAeros, gammas1, 'bo-', label='gamma_root_1')\n", (6263, 6313), True, 'import matplotlib.pyplot as plt\n'), ((6311, 6369), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'gammas2', '"""go-"""'], {'label': '"""gamma_root_2"""'}), "(omegaAeros, gammas2, 'go-', label='gamma_root_2')\n", (6319, 6369), True, 'import matplotlib.pyplot as plt\n'), ((6367, 6425), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'gammas3', '"""ro-"""'], {'label': '"""gamma_root_3"""'}), "(omegaAeros, gammas3, 'ro-', label='gamma_root_3')\n", (6375, 6425), True, 'import matplotlib.pyplot as plt\n'), ((6423, 6481), 'matplotlib.pyplot.plot', 'plt.plot', (['omegaAeros', 'gammas4', '"""co-"""'], {'label': '"""gamma_root_4"""'}), "(omegaAeros, gammas4, 'co-', label='gamma_root_4')\n", (6431, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6479, 6496), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)'}), '(loc=3)\n', (6489, 6496), True, 'import matplotlib.pyplot as plt\n'), ((6497, 6518), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1.2, 0.1]'], {}), '([-1.2, 0.1])\n', (6505, 6518), True, 'import matplotlib.pyplot as plt\n'), ((6517, 6536), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1500]'], {}), '([0, 1500])\n', (6525, 6536), True, 'import matplotlib.pyplot as plt\n'), ((6536, 6576), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Aerodynamic frequency, rad"""'], {}), "('Aerodynamic frequency, rad')\n", (6546, 6576), True, 'import matplotlib.pyplot as plt\n'), ((6577, 6602), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Damping (g)"""'], {}), "('Damping (g)')\n", (6587, 6602), True, 'import matplotlib.pyplot as plt\n'), ((6603, 6674), 'matplotlib.pyplot.title', 'plt.title', (["('Interpolation of Root Damping at V=%4.2f m/s' % U_vec[Uind])"], {}), "('Interpolation of Root Damping at V=%4.2f m/s' % U_vec[Uind])\n", (6612, 6674), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6689), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6683, 6689), True, 'import matplotlib.pyplot as plt\n'), ((6690, 6705), 'matplotlib.pyplot.hold', 'plt.hold', (['(False)'], {}), '(False)\n', (6698, 6705), True, 'import matplotlib.pyplot as plt\n'), ((119, 144), 'os.path.abspath', 'os.path.abspath', (['"""..\\\\.."""'], {}), "('..\\\\..')\n", (134, 144), False, 'import os\n'), ((161, 182), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (176, 182), False, 'import os\n'), ((3388, 3486), 'numpy.array', 'np.array', (['[0.0, 1e-07, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, \n 10.0, 50]'], {}), '([0.0, 1e-07, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.05, 0.1, 0.5, \n 1.0, 5.0, 10.0, 50])\n', (3396, 3486), True, 'import numpy as np\n')] |
import json
import random
import numpy as np
high = ['HospitalName']
med = ['State', 'CountyName', 'HospitalType', 'PhoneNumber', 'HospitalOwner']
low = ['MeasureCode', 'MeasureName']
algo_list = ['full-den', 'k-den']
DCFileName = "/testdata/hospital_constraints.txt" # path to constraints file
def testcase_gen(curPolicyArray, policySenLevel, testcase_count, limit, runs, database_name, relation_name, test_name, is_monotonic, step, start, testfanout, test_obl_cueset):
# full-den with MVC
test = []
for k in range(0, runs):
np.random.seed(42+k)
# without replacement sample for tid's
if is_monotonic:
tid = np.random.choice(range(1, limit), start+step*(testcase_count), replace=False)
else:
tid = np.random.choice(range(1, limit), start+step*(testcase_count), replace=True)
np.random.seed(42+k)
attributes_sample = np.random.choice(curPolicyArray, start+step*(testcase_count), replace=True)
for i in range(0, testcase_count):
testcase = {}
testcase['expID'] = k
testcase['userID'] = "38400000-8cf0-11bd-b23e-10b96e4ef00d"
testcase['userName'] = "Samus"
testcase['databaseName'] = database_name
testcase['relationName'] = relation_name
testcase['purpose'] = "analytics"
testcase['DCFileName'] = DCFileName
testcase['algo'] = "full-den"
testcase['k_value'] = 0
testcase['limit'] = limit
testcase['isAscend'] = True
testcase['policySenLevel'] = policySenLevel
policies = []
for j in range(0, start+step*(i)):
policy = {}
policy['databaseName'] = database_name
policy['relationName'] = relation_name
policy['tupleID'] = int(tid[j])
policy['attributeName'] = attributes_sample[j]
policies.append(policy)
randomFlag = {}
randomFlag['seed'] = 42 + k
randomFlag['randomCuesetChoosing'] = True
randomFlag['randomHiddenCellChoosing'] = True
testcase['policies'] = policies
testcase['randomFlag'] = randomFlag
testcase['testname'] = test_name + "_full_MVC"
testcase['useMVC'] = True
# testcase['testFanOut'] = testfanout
testcase['testOblCueset'] = test_obl_cueset
test.append(testcase)
with open('../testdata/testcases/testcases_full_MVC_'+ policySenLevel + "_" + relation_name + "_obl_" + str(test_obl_cueset) +'.json', 'w') as f:
json.dump(test, f, ensure_ascii=False)
if test_obl_cueset is False:
# full-den without MVC
test = []
for k in range(0, runs):
np.random.seed(42+k)
# without replacement sample for tid's
if is_monotonic:
tid = np.random.choice(range(1, limit), start+step*(testcase_count), replace=False)
else:
tid = np.random.choice(range(1, limit), start+step*(testcase_count), replace=True)
np.random.seed(42+k)
attributes_sample = np.random.choice(curPolicyArray, start+step*(testcase_count), replace=True)
for i in range(0, testcase_count):
testcase = {}
testcase['expID'] = k
testcase['userID'] = "38400000-8cf0-11bd-b23e-10b96e4ef00d"
testcase['userName'] = "Samus"
testcase['databaseName'] = database_name
testcase['relationName'] = relation_name
testcase['purpose'] = "analytics"
testcase['DCFileName'] = DCFileName
testcase['algo'] = "full-den"
testcase['k_value'] = 0
testcase['limit'] = limit
testcase['isAscend'] = True
testcase['policySenLevel'] = policySenLevel
policies = []
for j in range(0, start+step*(i)):
policy = {}
policy['databaseName'] = database_name
policy['relationName'] = relation_name
policy['tupleID'] = int(tid[j])
policy['attributeName'] = attributes_sample[j]
policies.append(policy)
randomFlag = {}
randomFlag['seed'] = 42 + k # TODO: involving some randomness
randomFlag['randomCuesetChoosing'] = True
randomFlag['randomHiddenCellChoosing'] = True
testcase['policies'] = policies
testcase['randomFlag'] = randomFlag
testcase['testname'] = test_name + "_full_noMVC"
testcase['useMVC'] = False
testcase['testFanOut'] = testfanout
testcase['testOblCueset'] = test_obl_cueset
test.append(testcase)
with open('../testdata/testcases/testcases_full_noMVC_'+ policySenLevel + "_" + relation_name + "_obl_" + str(test_obl_cueset) +'.json', 'w') as f:
json.dump(test, f, ensure_ascii=False)
# k-den with MVC
if test_obl_cueset is False:
k_values = [min((0.1 + 4 * round(i/10, 1)), 1) for i in range(3)]
print(k_values)
for t in range(0, len(k_values)):
test = []
for k in range(0, runs):
np.random.seed(42+k)
# without replacement sample for tid's
if is_monotonic:
tid = np.random.choice(range(1, limit), start+step*(testcase_count), replace=False)
else:
tid = np.random.choice(range(1, limit), start+step*(testcase_count), replace=True)
np.random.seed(42+k)
attributes_sample = np.random.choice(curPolicyArray, start+step*(testcase_count), replace=True)
for i in range(0, testcase_count):
testcase = {}
testcase['expID'] = k
testcase['userID'] = "38400000-8cf0-11bd-b23e-10b96e4ef00d"
testcase['userName'] = "Samus"
testcase['databaseName'] = database_name
testcase['relationName'] = relation_name
testcase['purpose'] = "analytics"
testcase['DCFileName'] = DCFileName
testcase['algo'] = "k-den"
testcase['k_value'] = k_values[t]
print(testcase['k_value'])
testcase['limit'] = limit
testcase['isAscend'] = True
testcase['policySenLevel'] = policySenLevel
policies = []
for j in range(0, start+step*(i)):
policy = {}
policy['databaseName'] = database_name
policy['relationName'] = relation_name
policy['tupleID'] = int(tid[j])
policy['attributeName'] = attributes_sample[j]
policies.append(policy)
randomFlag = {}
randomFlag['seed'] = 42 + k
randomFlag['randomCuesetChoosing'] = True
randomFlag['randomHiddenCellChoosing'] = True
testcase['policies'] = policies
testcase['randomFlag'] = randomFlag
testcase['testname'] = test_name + "_k_MVC_" + str(k_values[t]).replace('.', '_')
testcase['useMVC'] = True
# testcase['testFanOut'] = testfanout
testcase['testOblCueset'] = test_obl_cueset
test.append(testcase)
with open('../testdata/testcases/testcases_k_'+ str(k_values[t]).replace('.', '_')+'_MVC_'+ policySenLevel + "_" + relation_name +'.json', 'w') as f:
json.dump(test, f, ensure_ascii=False)
if __name__ == "__main__":
curPolicyArray = high
policySenLevel = "high"
# if set as True, monotonically selecting policies in different experiments
is_monotonic = True
database_name = "hospitaldb"
relation_name = "hospital"
testcase_count = 10 # no. of testcases in each test
start = 10 # no. of sensitive cells in the starting testcase
step = 10 # no. of sensitive cells growing in testcases
# limit = 99980 # no. of tuples
limit = 10000 # no. of tuples
runs = 4 # no. of runs
testfanout = True
test_obl_cueset = False
test_name_base="server_test_hospital"
testcase_gen(curPolicyArray, policySenLevel, testcase_count, limit, runs, database_name, relation_name, test_name_base, is_monotonic, step, start, testfanout, test_obl_cueset) | [
"numpy.random.choice",
"numpy.random.seed",
"json.dump"
] | [((565, 587), 'numpy.random.seed', 'np.random.seed', (['(42 + k)'], {}), '(42 + k)\n', (579, 587), True, 'import numpy as np\n'), ((872, 894), 'numpy.random.seed', 'np.random.seed', (['(42 + k)'], {}), '(42 + k)\n', (886, 894), True, 'import numpy as np\n'), ((921, 998), 'numpy.random.choice', 'np.random.choice', (['curPolicyArray', '(start + step * testcase_count)'], {'replace': '(True)'}), '(curPolicyArray, start + step * testcase_count, replace=True)\n', (937, 998), True, 'import numpy as np\n'), ((2682, 2720), 'json.dump', 'json.dump', (['test', 'f'], {'ensure_ascii': '(False)'}), '(test, f, ensure_ascii=False)\n', (2691, 2720), False, 'import json\n'), ((2851, 2873), 'numpy.random.seed', 'np.random.seed', (['(42 + k)'], {}), '(42 + k)\n', (2865, 2873), True, 'import numpy as np\n'), ((3182, 3204), 'numpy.random.seed', 'np.random.seed', (['(42 + k)'], {}), '(42 + k)\n', (3196, 3204), True, 'import numpy as np\n'), ((3235, 3312), 'numpy.random.choice', 'np.random.choice', (['curPolicyArray', '(start + step * testcase_count)'], {'replace': '(True)'}), '(curPolicyArray, start + step * testcase_count, replace=True)\n', (3251, 3312), True, 'import numpy as np\n'), ((5199, 5237), 'json.dump', 'json.dump', (['test', 'f'], {'ensure_ascii': '(False)'}), '(test, f, ensure_ascii=False)\n', (5208, 5237), False, 'import json\n'), ((5513, 5535), 'numpy.random.seed', 'np.random.seed', (['(42 + k)'], {}), '(42 + k)\n', (5527, 5535), True, 'import numpy as np\n'), ((5868, 5890), 'numpy.random.seed', 'np.random.seed', (['(42 + k)'], {}), '(42 + k)\n', (5882, 5890), True, 'import numpy as np\n'), ((5925, 6002), 'numpy.random.choice', 'np.random.choice', (['curPolicyArray', '(start + step * testcase_count)'], {'replace': '(True)'}), '(curPolicyArray, start + step * testcase_count, replace=True)\n', (5941, 6002), True, 'import numpy as np\n'), ((8067, 8105), 'json.dump', 'json.dump', (['test', 'f'], {'ensure_ascii': '(False)'}), '(test, f, ensure_ascii=False)\n', (8076, 8105), False, 'import json\n')] |
"""
BFR
"""
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
class BFR(object):
class Local(object):
def __init__(self, n_cluster, soft_n_cluster=None, shrink=0.5,
input_file_path=None, iter_func=None,
chunk_size=None, kmeans_params=None, print_log=True,
write_to_file=False, output_file=None, cache_labels=None):
"""
:param n_cluster: int
:param soft_n_cluster: int
Used to roughly cluster points.
:param shrink: float=0~1.0
Used to reduce the threshold of Clustering Algorithm.
:param input_file_path: str
The file to read the input results.
If parameter "data" is not specified, this parameter is used to build a generator.
:param iter_func: function
The function used to build iterator. The iterator returns pandas.DataFrame with index.
:param output_file_path: str
The file to store the output results.
"""
self._n_cluster = n_cluster
self._soft_n_cluster = soft_n_cluster if soft_n_cluster is not None else n_cluster ** 2
self._shrink = shrink
self._print_log = print_log
self._data_generator = None
self.clusters = self.labels = None
self._write_to_file, self._output_file = write_to_file, output_file
if cache_labels is None:
self._cache_labels = not write_to_file
else:
self._cache_labels = cache_labels
if isinstance(kmeans_params, dict):
self._kmeans_params = kmeans_params
else:
self._kmeans_params = {}
if input_file_path is None and iter_func is None:
print("No data input. Please call add_data(generator) to add data to the model.")
else:
self.add_data(iter_func=iter_func, input_file_path=input_file_path, chunk_size=chunk_size)
def add_data(self, iter_func, input_file_path, chunk_size):
"""
:param input_file_path: str
:param iter_func: function
:param chunk_size: int
"""
if callable(iter_func):
self._data_generator = iter_func
elif isinstance(input_file_path, str):
self._data_generator = lambda: pd.read_table(input_file_path,
delimiter="[^0-9a-zA-Z\.\n]", dtype=np.float64,
chunksize=chunk_size)
else:
raise ValueError
def run(self):
"""
DS: (n_clusters, [n, SUM, SUM_SQUARE])
CS: (n_clusters, [n, SUM, SUM_SQUARE])
RS: (n_samples, dimension)
"""
iterator_vectors = self._data_generator()
vectors = next(iterator_vectors)
n_dim = vectors.shape[1]
"""
Initialize DS, CS, RS.
"""
DS, CS, RS = self._initialization(vectors, n_dim)
if self._print_log:
print("Tasks start. Start to print intermediate results ...")
self.print_log(1, DS, CS, RS)
"""
Iteratively process chunks
"""
for i, vectors in enumerate(iterator_vectors):
DS, CS, RS = self._iteration(vectors, n_dim, DS, CS, RS)
if self._print_log:
self.print_log(i+2, DS, CS, RS)
DS, CS = self._last_round(DS, CS, n_dim)
self.clusters = DS[:, 1:n_dim + 1]
if self._print_log:
self.print_log("final", DS, CS, RS)
"""
Save the results: cluster coordinates and point labels.
"""
if self._cache_labels and self._write_to_file:
self.labels = pd.concat(list(self.classify(DS, self._n_cluster, n_dim)))
self.labels.to_csv(self._output_file, mode="w", sep=",")
elif self._cache_labels:
self.labels = pd.concat(list(self.classify(DS, self._n_cluster, n_dim)))
elif self._write_to_file:
pd.DataFrame(columns=["cluster"]).to_csv(self._output_file, mode='w', sep=",", header=True)
for df in self.classify(DS, self._n_cluster, n_dim):
df.to_csv(self._output_file, mode='a', sep=",", header=False)
return self
@staticmethod
def print_log(i, DS, CS, RS):
print("Round {0}: \n"
" Number of points in DS = {1}\n"
" Number of clusters in CS = {2}\n"
" Number of points in CS = {3},\n"
" Number of points in RS = {4}".format(i,
int(np.sum(DS[:, 0])), CS.shape[0],
int(np.sum(CS[:, 0])), RS.shape[0]))
def _initialization(self, vectors, n_dim):
"""
:param vectors: pandas.DataFrame
:param n_dim: int
:return:
"""
# Step 2-3
rest, RS = self._initialize_RS(vectors=vectors,
soft_n_cluster=self._soft_n_cluster,
shrink=self._shrink,
kmeans_params=self._kmeans_params)
# Step 4-5
DS = self._initialize_DS(vectors=rest,
n_cluster=self._n_cluster,
n_dim=n_dim,
kmeans_params=self._kmeans_params)
# Step 6
CS, RS = self._initialize_CS(vectors=RS,
n_cluster=self._soft_n_cluster,
n_dim=n_dim,
kmeans_params=self._kmeans_params)
return DS, CS, RS
def _iteration(self, vectors, n_dim, DS, CS, RS):
# Update DS
DS, vectors = self._update_DS_or_CS(vectors, DS, n_dim)
# Update CS
CS, vectors = self._update_DS_or_CS(vectors, CS, n_dim)
# Update RS and CS
CS, RS = self._update_RS(vectors, CS, RS, self._soft_n_cluster, n_dim, self._kmeans_params)
# Merge clusters in CS
CS = self._merge_CS(CS, n_dim)
return DS, CS, RS
def _last_round(self, DS, CS, n_dim):
"""
Merge CS into DS if the clusters have a Mahalanobis Distance < 2 * sqrt(d)
:return: DS, new_CS(just for print log)
"""
num_points = np.sum(DS[:, 0]) + np.sum(CS[:, 0])
for i in range(CS.shape[0]):
distance = float("inf")
for j in range(DS.shape[0]):
dist_ = self.dist(CS[i, :], DS[j, :], n_dim)
if dist_ < distance:
distance = dist_
min_j = j
DS[min_j, :] += CS[i, :]
new_CS = np.array([[num_points - np.sum(DS[:, 0])]])
return DS, new_CS
@staticmethod
def _initialize_RS(vectors, soft_n_cluster, shrink, kmeans_params):
"""
Initialize RS:
Run KMeans algorithm to cluster the points.
Filter out small clusters and add their members to RS.
Rest points are used to build DS.
:param vectors: pandas.DataFrame
:param soft_n_cluster: int
:param shrink: float
:param kmeans_params: dict
:return:
"""
"""
Run the KMeans algorithm with a large K to cluster points.
"""
threshold = int(vectors.shape[0] // soft_n_cluster * shrink)
clusters = KMeans(n_clusters=soft_n_cluster, **kmeans_params)\
.fit_predict(vectors)
"""
Find out cluster members.
Compute the statistics of clusters (number of points in each cluster)
then assign the number to each point.
"""
# centroids and number of points in each cluster
uniques, n_uniques = np.unique(clusters, return_counts=True)
# count number of points in clusters
count_table = dict(zip(uniques, n_uniques))
vectorized_func = np.vectorize(lambda x: count_table[x])
vector_nums = vectorized_func(clusters)
"""
Construct RS and rest points(used to build DS)
"""
RS = vectors[vector_nums <= threshold]
rest = vectors[vector_nums > threshold]
return rest, RS
@staticmethod
def _initialize_DS(vectors, n_cluster, n_dim, kmeans_params):
"""
Initialize DS:
Run KMeans and then compute statistics of each cluster.
:param vectors: np.array
:param n_cluster: int
:param n_dim: int
:param kmeans_params: dict
:return: np.array
"""
"""
Run KMeans and predict cluster index for each points
"""
clusters = KMeans(n_clusters=n_cluster, **kmeans_params)\
.fit_predict(vectors)
"""
Calculate statistics.
"""
DS = np.zeros((n_cluster, n_dim * 2 + 1))
for idx, u in enumerate(np.unique(clusters, return_counts=False)):
temp = vectors[clusters == u]
DS[idx, 0] = temp.shape[0]
DS[idx, 1: n_dim + 1] = np.sum(temp, axis=0)
DS[idx, n_dim + 1: 2 * n_dim + 1] = np.sum(temp ** 2, axis=0) + 10**-6
return DS
@staticmethod
def _initialize_CS(vectors, n_cluster, n_dim, kmeans_params):
"""
Divide the RS into a new RS and CS.
Run KMeans to cluster points in RS.
Add points in the clusters with more than one member into RS, and rest into CS.
:param vectors: np.array
RS from _initialize_RS(...)
:param n_cluster: int
:param n_dim: int
:param kmeans_params: dict
:return:
"""
if vectors.shape[0] <= n_cluster:
return np.zeros((0, n_dim * 2 + 1)), vectors
"""
Run KMeans.
"""
arr_clusters = KMeans(n_clusters=n_cluster, **kmeans_params).fit_predict(vectors)
uniques, n_uniques = np.unique(arr_clusters, return_counts=True)
"""
Count the number of points in clusters.
"""
count_table = dict(zip(uniques, n_uniques))
vectorized_func = np.vectorize(lambda x: count_table[x])
arr_nums = vectorized_func(arr_clusters)
RS = vectors[arr_nums <= 1]
CS = np.zeros((np.sum(n_uniques > 1), n_dim * 2 + 1))
for i, u in enumerate(uniques[n_uniques > 1]):
temp = vectors[arr_clusters == u]
CS[i, 0] = temp.shape[0]
CS[i, 1:n_dim + 1] = np.sum(temp, axis=0)
CS[i, n_dim + 1:2 * n_dim + 1] = np.sum(temp ** 2, axis=0)
return CS, RS
@staticmethod
def _update_DS_or_CS(vectors, clusters, n_dim):
"""
Update DS: Add new points into DS or CS.
For the new points, compare them to each of the DS using the Mahalanobis Distance and assign
them to the nearest DS clusters if the distance is < 2 * sqrt(n_dim).
For the new points that are not assigned to DS clusters, using the Mahalanobis Distance
and assign the points to the nearest CS clusters if the distance is < 2 * sqrt(n_dim).
"""
if clusters.shape[0] == 0:
return clusters, vectors
"""
Initialize centers and variances of clusters.
"""
centers = clusters[:, 1:n_dim + 1] / clusters[:, :1]
variance = clusters[:, n_dim + 1:2 * n_dim + 1] / clusters[:, :1] - centers ** 2
centers = centers.reshape((1, -1, n_dim))
variance = variance.reshape((1, -1, n_dim))
"""
Calculate Mahalanobis distances between points and clusters centers.
And assign points into DS according to distances.
"""
# distances.shape -> (n_samples, n_clusters)
vectors = np.array(vectors)
distance = np.sum((vectors.reshape((-1, 1, n_dim)) - centers) ** 2 / variance, axis=2) ** 0.5
dist_argmin = np.argmin(distance, axis=1)
dist_min = np.min(distance, axis=1)
for i in range(clusters.shape[0]):
temp = vectors[(dist_min <= 2 * np.sqrt(n_dim)) & (dist_argmin == i)]
clusters[i, 0] += temp.shape[0]
clusters[i, 1:n_dim + 1] += np.sum(temp, axis=0)
clusters[i, n_dim + 1:2 * n_dim + 1] += np.sum(temp ** 2, axis=0)
return clusters, vectors[(dist_min > 2 * np.sqrt(n_dim))]
@staticmethod
def _update_RS(vectors, CS, RS, n_cluster, n_dim, kmeans_params):
"""
Update RS: And remain points into RS and re-assing points in RS and CS.
For the new points that are not assigned to a DS cluster or a CS cluster, assign them to RS.
Run K-Means on the RS with a large K to generate CS (clusters with more than one points)
and RS (clusters with only one point).
"""
RS = np.concatenate((vectors, RS), axis=0)
if RS.shape[0] <= n_cluster:
return CS, RS
"""
Cluster RS and build new clusters in CS.
"""
clusters = KMeans(n_clusters=n_cluster, **kmeans_params).fit_predict(RS)
uniques, n_uniques = np.unique(clusters, return_counts=True)
count_table = dict(zip(uniques, n_uniques))
vectorized_func = np.vectorize(lambda x: count_table[x])
vector_nums = vectorized_func(clusters)
CS_more = np.zeros((np.sum(n_uniques > 1), n_dim * 2 + 1))
for i, u in enumerate(uniques[n_uniques > 1]):
temp = RS[clusters == u]
CS_more[i, 0] = temp.shape[0]
CS_more[i, 1:n_dim + 1] = np.sum(temp, axis=0)
CS_more[i, n_dim + 1:2 * n_dim + 1] = np.sum(temp ** 2, axis=0)
RS = RS[vector_nums <= 1]
CS = np.concatenate((CS, CS_more), axis=0)
return CS, RS
def _merge_CS(self, CS, n_dim):
"""
Merge clusters in CS that have a Mahalanobis Distance < 2 * sqrt(n_dim).
"""
if CS.shape[0] < 2:
return CS
# flag represents weather two clusters have been merged in one loop
flag = True
while flag:
flag = False
for i in range(CS.shape[0] - 1):
for j in range(i + 1, CS.shape[0]):
if self.dist(CS[i, :], CS[j, :], n_dim) < 2 * np.sqrt(n_dim):
CS[i, :] += CS[j, :]
CS = np.delete(CS, j, axis=0)
flag = True
break
if flag:
break
return CS
@staticmethod
def dist(cluster1, cluster2, n_dim):
"""
Compute distance between two clusters
:param cluster1: numpu.array
shape = (1 + 2 * n_dim, )
cluster: (n, SUM, SUMSQ)
:param cluster2: same as cluster1
:param n_dim: int
:return:
"""
c1 = cluster1[1:n_dim + 1] / cluster1[0]
c2 = cluster2[1:n_dim + 1] / cluster2[0]
v1 = cluster1[n_dim + 1:2 * n_dim + 1] / cluster1[0] - c1 ** 2
return np.sum((c1 - c2) ** 2 / v1) ** 0.5
def classify(self, DS, n_cluster, n_dim):
"""
A new pass to assign points to clusters.
:param DS:
:param n_cluster:
:param n_dim:
:return:
"""
iterator_vectors = self._data_generator()
# centers and variance of clusters
centers = DS[:, 1:n_dim + 1] / DS[:, :1]
variance = DS[:, n_dim + 1:2 * n_dim + 1] / DS[:, :1] - centers ** 2
centers = centers.reshape((1, -1, n_dim))
variance = variance.reshape((1, -1, n_dim))
for vectors in iterator_vectors:
vector_index = vectors.index
vectors = np.array(vectors)
clusters = np.zeros((vectors.shape[0], 1))
# distances.shape = (n_samples, n_clusters)
distance = np.sum((vectors.reshape((-1, 1, n_dim)) - centers) ** 2 / variance, axis=2) ** 0.5
dist_argmin = np.argmin(distance, axis=1)
dist_min = np.min(distance, axis=1)
for i in range(n_cluster):
clusters[(dist_min <= 2 * np.sqrt(n_dim)) & (dist_argmin == i)] = i
clusters[(dist_min > 2 * np.sqrt(n_dim))] = -1
yield pd.DataFrame(clusters, index=vector_index, columns=["cluster"])
if __name__ == '__main__':
def iter_func():
for i in range(10):
yield pd.DataFrame(np.random.randn(5, 3), index=range(5))
bfr = BFR.Local(n_cluster=2, iter_func=iter_func, print_log=True).run()
print(bfr.clusters)
# print(bfr.labels)
| [
"sklearn.cluster.KMeans",
"numpy.sqrt",
"numpy.unique",
"pandas.DataFrame",
"numpy.delete",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randn",
"numpy.concatenate",
"numpy.min",
"numpy.argmin",
"pandas.read_table",
"numpy.vectorize"
] | [((8518, 8557), 'numpy.unique', 'np.unique', (['clusters'], {'return_counts': '(True)'}), '(clusters, return_counts=True)\n', (8527, 8557), True, 'import numpy as np\n'), ((8694, 8732), 'numpy.vectorize', 'np.vectorize', (['(lambda x: count_table[x])'], {}), '(lambda x: count_table[x])\n', (8706, 8732), True, 'import numpy as np\n'), ((9691, 9727), 'numpy.zeros', 'np.zeros', (['(n_cluster, n_dim * 2 + 1)'], {}), '((n_cluster, n_dim * 2 + 1))\n', (9699, 9727), True, 'import numpy as np\n'), ((10883, 10926), 'numpy.unique', 'np.unique', (['arr_clusters'], {'return_counts': '(True)'}), '(arr_clusters, return_counts=True)\n', (10892, 10926), True, 'import numpy as np\n'), ((11098, 11136), 'numpy.vectorize', 'np.vectorize', (['(lambda x: count_table[x])'], {}), '(lambda x: count_table[x])\n', (11110, 11136), True, 'import numpy as np\n'), ((12867, 12884), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (12875, 12884), True, 'import numpy as np\n'), ((13017, 13044), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (13026, 13044), True, 'import numpy as np\n'), ((13068, 13092), 'numpy.min', 'np.min', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (13074, 13092), True, 'import numpy as np\n'), ((13990, 14027), 'numpy.concatenate', 'np.concatenate', (['(vectors, RS)'], {'axis': '(0)'}), '((vectors, RS), axis=0)\n', (14004, 14027), True, 'import numpy as np\n'), ((14304, 14343), 'numpy.unique', 'np.unique', (['clusters'], {'return_counts': '(True)'}), '(clusters, return_counts=True)\n', (14313, 14343), True, 'import numpy as np\n'), ((14430, 14468), 'numpy.vectorize', 'np.vectorize', (['(lambda x: count_table[x])'], {}), '(lambda x: count_table[x])\n', (14442, 14468), True, 'import numpy as np\n'), ((14937, 14974), 'numpy.concatenate', 'np.concatenate', (['(CS, CS_more)'], {'axis': '(0)'}), '((CS, CS_more), axis=0)\n', (14951, 14974), True, 'import numpy as np\n'), ((6936, 6952), 'numpy.sum', 'np.sum', (['DS[:, 0]'], {}), '(DS[:, 0])\n', (6942, 6952), True, 'import numpy as np\n'), ((6955, 6971), 'numpy.sum', 'np.sum', (['CS[:, 0]'], {}), '(CS[:, 0])\n', (6961, 6971), True, 'import numpy as np\n'), ((9764, 9804), 'numpy.unique', 'np.unique', (['clusters'], {'return_counts': '(False)'}), '(clusters, return_counts=False)\n', (9773, 9804), True, 'import numpy as np\n'), ((9936, 9956), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (9942, 9956), True, 'import numpy as np\n'), ((11485, 11505), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (11491, 11505), True, 'import numpy as np\n'), ((11555, 11580), 'numpy.sum', 'np.sum', (['(temp ** 2)'], {'axis': '(0)'}), '(temp ** 2, axis=0)\n', (11561, 11580), True, 'import numpy as np\n'), ((13318, 13338), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (13324, 13338), True, 'import numpy as np\n'), ((13395, 13420), 'numpy.sum', 'np.sum', (['(temp ** 2)'], {'axis': '(0)'}), '(temp ** 2, axis=0)\n', (13401, 13420), True, 'import numpy as np\n'), ((14781, 14801), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (14787, 14801), True, 'import numpy as np\n'), ((14856, 14881), 'numpy.sum', 'np.sum', (['(temp ** 2)'], {'axis': '(0)'}), '(temp ** 2, axis=0)\n', (14862, 14881), True, 'import numpy as np\n'), ((16399, 16426), 'numpy.sum', 'np.sum', (['((c1 - c2) ** 2 / v1)'], {}), '((c1 - c2) ** 2 / v1)\n', (16405, 16426), True, 'import numpy as np\n'), ((17132, 17149), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (17140, 17149), True, 'import numpy as np\n'), ((17177, 17208), 'numpy.zeros', 'np.zeros', (['(vectors.shape[0], 1)'], {}), '((vectors.shape[0], 1))\n', (17185, 17208), True, 'import numpy as np\n'), ((17409, 17436), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (17418, 17436), True, 'import numpy as np\n'), ((17464, 17488), 'numpy.min', 'np.min', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (17470, 17488), True, 'import numpy as np\n'), ((8130, 8180), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'soft_n_cluster'}), '(n_clusters=soft_n_cluster, **kmeans_params)\n', (8136, 8180), False, 'from sklearn.cluster import KMeans\n'), ((9522, 9567), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_cluster'}), '(n_clusters=n_cluster, **kmeans_params)\n', (9528, 9567), False, 'from sklearn.cluster import KMeans\n'), ((10009, 10034), 'numpy.sum', 'np.sum', (['(temp ** 2)'], {'axis': '(0)'}), '(temp ** 2, axis=0)\n', (10015, 10034), True, 'import numpy as np\n'), ((10661, 10689), 'numpy.zeros', 'np.zeros', (['(0, n_dim * 2 + 1)'], {}), '((0, n_dim * 2 + 1))\n', (10669, 10689), True, 'import numpy as np\n'), ((10783, 10828), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_cluster'}), '(n_clusters=n_cluster, **kmeans_params)\n', (10789, 10828), False, 'from sklearn.cluster import KMeans\n'), ((11259, 11280), 'numpy.sum', 'np.sum', (['(n_uniques > 1)'], {}), '(n_uniques > 1)\n', (11265, 11280), True, 'import numpy as np\n'), ((14208, 14253), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_cluster'}), '(n_clusters=n_cluster, **kmeans_params)\n', (14214, 14253), False, 'from sklearn.cluster import KMeans\n'), ((14554, 14575), 'numpy.sum', 'np.sum', (['(n_uniques > 1)'], {}), '(n_uniques > 1)\n', (14560, 14575), True, 'import numpy as np\n'), ((17709, 17772), 'pandas.DataFrame', 'pd.DataFrame', (['clusters'], {'index': 'vector_index', 'columns': "['cluster']"}), "(clusters, index=vector_index, columns=['cluster'])\n", (17721, 17772), True, 'import pandas as pd\n'), ((17881, 17902), 'numpy.random.randn', 'np.random.randn', (['(5)', '(3)'], {}), '(5, 3)\n', (17896, 17902), True, 'import numpy as np\n'), ((2492, 2598), 'pandas.read_table', 'pd.read_table', (['input_file_path'], {'delimiter': '"""[^0-9a-zA-Z\\\\.\n]"""', 'dtype': 'np.float64', 'chunksize': 'chunk_size'}), "(input_file_path, delimiter='[^0-9a-zA-Z\\\\.\\n]', dtype=np.\n float64, chunksize=chunk_size)\n", (2505, 2598), True, 'import pandas as pd\n'), ((5025, 5041), 'numpy.sum', 'np.sum', (['DS[:, 0]'], {}), '(DS[:, 0])\n', (5031, 5041), True, 'import numpy as np\n'), ((5122, 5138), 'numpy.sum', 'np.sum', (['CS[:, 0]'], {}), '(CS[:, 0])\n', (5128, 5138), True, 'import numpy as np\n'), ((7365, 7381), 'numpy.sum', 'np.sum', (['DS[:, 0]'], {}), '(DS[:, 0])\n', (7371, 7381), True, 'import numpy as np\n'), ((13474, 13488), 'numpy.sqrt', 'np.sqrt', (['n_dim'], {}), '(n_dim)\n', (13481, 13488), True, 'import numpy as np\n'), ((15648, 15672), 'numpy.delete', 'np.delete', (['CS', 'j'], {'axis': '(0)'}), '(CS, j, axis=0)\n', (15657, 15672), True, 'import numpy as np\n'), ((4363, 4396), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cluster']"}), "(columns=['cluster'])\n", (4375, 4396), True, 'import pandas as pd\n'), ((13188, 13202), 'numpy.sqrt', 'np.sqrt', (['n_dim'], {}), '(n_dim)\n', (13195, 13202), True, 'import numpy as np\n'), ((15550, 15564), 'numpy.sqrt', 'np.sqrt', (['n_dim'], {}), '(n_dim)\n', (15557, 15564), True, 'import numpy as np\n'), ((17665, 17679), 'numpy.sqrt', 'np.sqrt', (['n_dim'], {}), '(n_dim)\n', (17672, 17679), True, 'import numpy as np\n'), ((17578, 17592), 'numpy.sqrt', 'np.sqrt', (['n_dim'], {}), '(n_dim)\n', (17585, 17592), True, 'import numpy as np\n')] |
import numpy as np
class SumTree:
def __init__(self, mem_size):
self.tree = np.zeros(2 * mem_size - 1)
self.data = np.zeros(mem_size, dtype=object)
self.size = mem_size
self.ptr = 0
self.nentities=0
def update(self, idx, p):
tree_idx = idx + self.size - 1
diff = p - self.tree[tree_idx]
self.tree[tree_idx] += diff
while tree_idx:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += diff
def store(self, p, data):
self.data[self.ptr] = data
self.update(self.ptr, p)
idx=self.ptr
self.ptr += 1
if self.ptr == self.size:
self.ptr = 0
self.nentities+=1
if self.nentities > self.size:
self.nentities = self.size
return idx
def getNextIdx(self):
return self.ptr
def sample(self, value):
ptr = 0
while ptr < self.size - 1:
left = 2 * ptr + 1
if value < self.tree[left]:
ptr = left
else:
value -= self.tree[left]
ptr = left + 1
return ptr - (self.size - 1), self.tree[ptr], self.data[ptr - (self.size - 1)]
@property
def total_p(self):
return self.tree[0]
@property
def max_p(self):
return np.max(self.tree[-self.size:])
@property
def min_p(self):
return np.min(self.tree[-self.size:])
class Memory:
def __init__(self, mem_size, prior=True, paperPrior=False,p_upper=1.,epsilon=.01,alpha=1,beta=1):
self.p_upper=p_upper
self.epsilon=epsilon
self.alpha=alpha
self.beta=beta
self.prior = prior
self.paperPrior = paperPrior
self.nentities=0
#self.dict={}
#self.data_len = 2 * feature_size + 2
self.mem_size = mem_size
if prior and not paperPrior:
self.tree = SumTree(mem_size)
else:
self.mem = np.zeros(mem_size, dtype=object)
self.mem_ptr = 0
#def getID(self,transition):
# ind=-1
# if transition in dict:
# ind = dict[transition]
# return ind
def store(self, transition):
if self.prior and not self.paperPrior:
p = self.tree.max_p
if not p:
p = self.p_upper
idx=self.tree.store(p, transition)
self.nentities += 1
if self.nentities > self.mem_size:
self.nentities = self.mem_size
else:
self.mem[self.mem_ptr] = transition
idx=self.mem_ptr
self.mem_ptr += 1
if self.mem_ptr == self.mem_size:
self.mem_ptr = 0
self.nentities += 1
if self.nentities > self.mem_size:
self.nentities = self.mem_size
return idx
def sample(self, n):
if self.prior and not self.paperPrior:
min_p = self.tree.min_p
if min_p==0:
min_p=self.epsilon**self.alpha
seg = self.tree.total_p / n
batch = np.zeros(n, dtype=object)
w = np.zeros((n, 1), np.float32)
idx = np.zeros(n, np.int32)
a = 0
for i in range(n):
b = a + seg
v = np.random.uniform(a, b)
idx[i], p, batch[i] = self.tree.sample(v)
w[i] = (p / min_p) ** (-self.beta)
a += seg
return idx, w, batch
if self.paperPrior:
w = min(3, 1 + 0.02 * self.nentities)
mask = np.array([int(self.nentities * np.log(1 + np.random.random() * (np.exp(w) - 1))/w) for i in range(n)])
mask = (mask + self.mem_ptr) % self.nentities
#mask = np.random.choice(range(self.nentities), n)
return mask, 0, self.mem[mask]
else:
mask = np.random.choice(range(self.nentities), n)
return mask, 0, self.mem[mask]
def update(self, idx, tderr):
if self.prior:
tderr += self.epsilon
tderr = np.minimum(tderr, self.p_upper)
#print(idx,tderr)
for i in range(len(idx)):
self.tree.update(idx[i], tderr[i] ** self.alpha)
def getNextIdx(self):
if self.prior:
ptr=self.tree.getNextIdx()
else:
ptr=self.mem_ptr
return ptr
def getData(self,idx):
if idx >=self.nentities:
return None
if self.prior:
data=self.tree.data[idx]
else:
data=self.mem[idx]
return data
class MemoryCourse:
def __init__(self):
self.mem = []
def store(self, transition):
self.mem.append(transition)
return len(self.mem) - 1
def getLast(self):
return self.mem[-1]
def getAll(self):
return self.mem
| [
"numpy.minimum",
"numpy.random.random",
"numpy.min",
"numpy.max",
"numpy.exp",
"numpy.zeros",
"numpy.random.uniform"
] | [((89, 115), 'numpy.zeros', 'np.zeros', (['(2 * mem_size - 1)'], {}), '(2 * mem_size - 1)\n', (97, 115), True, 'import numpy as np\n'), ((136, 168), 'numpy.zeros', 'np.zeros', (['mem_size'], {'dtype': 'object'}), '(mem_size, dtype=object)\n', (144, 168), True, 'import numpy as np\n'), ((1346, 1376), 'numpy.max', 'np.max', (['self.tree[-self.size:]'], {}), '(self.tree[-self.size:])\n', (1352, 1376), True, 'import numpy as np\n'), ((1428, 1458), 'numpy.min', 'np.min', (['self.tree[-self.size:]'], {}), '(self.tree[-self.size:])\n', (1434, 1458), True, 'import numpy as np\n'), ((1991, 2023), 'numpy.zeros', 'np.zeros', (['mem_size'], {'dtype': 'object'}), '(mem_size, dtype=object)\n', (1999, 2023), True, 'import numpy as np\n'), ((3119, 3144), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'object'}), '(n, dtype=object)\n', (3127, 3144), True, 'import numpy as np\n'), ((3161, 3189), 'numpy.zeros', 'np.zeros', (['(n, 1)', 'np.float32'], {}), '((n, 1), np.float32)\n', (3169, 3189), True, 'import numpy as np\n'), ((3208, 3229), 'numpy.zeros', 'np.zeros', (['n', 'np.int32'], {}), '(n, np.int32)\n', (3216, 3229), True, 'import numpy as np\n'), ((4122, 4153), 'numpy.minimum', 'np.minimum', (['tderr', 'self.p_upper'], {}), '(tderr, self.p_upper)\n', (4132, 4153), True, 'import numpy as np\n'), ((3327, 3350), 'numpy.random.uniform', 'np.random.uniform', (['a', 'b'], {}), '(a, b)\n', (3344, 3350), True, 'import numpy as np\n'), ((3659, 3677), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3675, 3677), True, 'import numpy as np\n'), ((3681, 3690), 'numpy.exp', 'np.exp', (['w'], {}), '(w)\n', (3687, 3690), True, 'import numpy as np\n')] |
import os
import pytest
import musdb
import numpy as np
import yaml
@pytest.fixture(params=[True, False])
def mus(request):
return musdb.DB(root_dir='data/MUS-STEMS-SAMPLE', is_wav=request.param)
def user_function0(track):
'''pass because output is none. Useful for training'''
# return any number of targets
return None
def user_function1(track):
'''Pass'''
# return any number of targets
estimates = {
'vocals': track.audio,
'accompaniment': track.audio,
}
return estimates
def user_function2(track):
'''fails because of wrong shape'''
# return any number of targets
estimates = {
'vocals': track.audio[:-1],
'accompaniment': track.audio,
}
return estimates
def user_function3(track):
'''fails because of wrong estimate name'''
# return any number of targets
estimates = {
'triangle': track.audio,
'accompaniment': track.audio,
}
return estimates
def user_function4(track):
'''fails because of wrong type'''
# return any number of targets
estimates = {
'vocals': track.audio.astype(np.int32),
}
return estimates
def user_function5(track):
'''fails because output is not a dict'''
# return any number of targets
return track.audio
def user_function6(track):
'''fails because of wrong type'''
# return any number of targets
estimates = {
'vocals': track.audio.astype(np.float32),
}
return estimates
def test_stems(mus):
tracks = mus.load_mus_tracks()
setup_path = os.path.join(
musdb.__path__[0], 'configs', 'mus.yaml'
)
with open(setup_path, 'r') as f:
setup = yaml.load(f)
for track in tracks:
for k, v in setup['stem_ids'].items():
if k == 'mixture':
assert np.allclose(
track.audio,
track.stems[v]
)
else:
assert np.allclose(
track.sources[k].audio,
track.stems[v]
)
def test_file_loading(mus):
# initiate musdb
tracks = mus.load_mus_tracks()
assert len(tracks) == 2
for track in tracks:
assert track.audio.shape[1] > 0
assert track.audio.shape[-1] == 2
assert track.stems.shape[0] == 5
# loads only the train set
tracks = mus.load_mus_tracks(subsets='train')
assert len(tracks) == 1
# load a single named track
tracks = mus.load_mus_tracks(tracknames=['PR - Oh No'])
assert len(tracks) == 1
# load train and test set
tracks = mus.load_mus_tracks(subsets=['train', 'test'])
assert len(tracks) == 2
# load train and test set
tracks = mus.load_mus_tracks(subsets=None)
assert len(tracks) == 2
@pytest.mark.parametrize(
"path",
[
pytest.mark.xfail(None, raises=RuntimeError),
pytest.mark.xfail("wrong/path", raises=IOError),
"data/MUS-STEMS-SAMPLE",
]
)
def test_env(path):
if path is not None:
os.environ["MUSDB_PATH"] = path
assert musdb.DB()
@pytest.mark.parametrize(
"func",
[
user_function1,
pytest.mark.xfail(user_function2, raises=ValueError),
pytest.mark.xfail(user_function3, raises=ValueError),
pytest.mark.xfail(user_function4, raises=ValueError),
pytest.mark.xfail(user_function5, raises=ValueError),
pytest.mark.xfail("not_a_function", raises=TypeError),
user_function6,
]
)
def test_user_functions_test(func, mus):
assert mus.test(user_function=func)
@pytest.mark.parametrize(
"func",
[
user_function0,
user_function1,
pytest.mark.xfail(user_function2, raises=ValueError),
pytest.mark.xfail(user_function3, raises=ValueError),
pytest.mark.xfail(user_function4, raises=ValueError),
]
)
def test_run(func, mus):
# process mus but do not save the results
assert mus.run(
user_function=func,
estimates_dir=None
)
@pytest.mark.parametrize(
"func",
[
pytest.mark.xfail(user_function0, raises=ValueError),
user_function1,
pytest.mark.xfail(user_function2, raises=ValueError),
pytest.mark.xfail(user_function3, raises=ValueError),
pytest.mark.xfail(user_function4, raises=ValueError),
]
)
def test_run_estimates(func, mus):
assert mus.run(
user_function=func,
estimates_dir='./Estimates'
)
def test_parallel(mus):
assert mus.run(
user_function=user_function1,
parallel=True,
cpus=1
)
| [
"numpy.allclose",
"pytest.mark.xfail",
"os.path.join",
"yaml.load",
"musdb.DB",
"pytest.fixture"
] | [((71, 107), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[True, False]'}), '(params=[True, False])\n', (85, 107), False, 'import pytest\n'), ((137, 201), 'musdb.DB', 'musdb.DB', ([], {'root_dir': '"""data/MUS-STEMS-SAMPLE"""', 'is_wav': 'request.param'}), "(root_dir='data/MUS-STEMS-SAMPLE', is_wav=request.param)\n", (145, 201), False, 'import musdb\n'), ((1590, 1644), 'os.path.join', 'os.path.join', (['musdb.__path__[0]', '"""configs"""', '"""mus.yaml"""'], {}), "(musdb.__path__[0], 'configs', 'mus.yaml')\n", (1602, 1644), False, 'import os\n'), ((3124, 3134), 'musdb.DB', 'musdb.DB', ([], {}), '()\n', (3132, 3134), False, 'import musdb\n'), ((1713, 1725), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1722, 1725), False, 'import yaml\n'), ((2882, 2926), 'pytest.mark.xfail', 'pytest.mark.xfail', (['None'], {'raises': 'RuntimeError'}), '(None, raises=RuntimeError)\n', (2899, 2926), False, 'import pytest\n'), ((2936, 2983), 'pytest.mark.xfail', 'pytest.mark.xfail', (['"""wrong/path"""'], {'raises': 'IOError'}), "('wrong/path', raises=IOError)\n", (2953, 2983), False, 'import pytest\n'), ((3213, 3265), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function2'], {'raises': 'ValueError'}), '(user_function2, raises=ValueError)\n', (3230, 3265), False, 'import pytest\n'), ((3275, 3327), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function3'], {'raises': 'ValueError'}), '(user_function3, raises=ValueError)\n', (3292, 3327), False, 'import pytest\n'), ((3337, 3389), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function4'], {'raises': 'ValueError'}), '(user_function4, raises=ValueError)\n', (3354, 3389), False, 'import pytest\n'), ((3399, 3451), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function5'], {'raises': 'ValueError'}), '(user_function5, raises=ValueError)\n', (3416, 3451), False, 'import pytest\n'), ((3461, 3514), 'pytest.mark.xfail', 'pytest.mark.xfail', (['"""not_a_function"""'], {'raises': 'TypeError'}), "('not_a_function', raises=TypeError)\n", (3478, 3514), False, 'import pytest\n'), ((3731, 3783), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function2'], {'raises': 'ValueError'}), '(user_function2, raises=ValueError)\n', (3748, 3783), False, 'import pytest\n'), ((3793, 3845), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function3'], {'raises': 'ValueError'}), '(user_function3, raises=ValueError)\n', (3810, 3845), False, 'import pytest\n'), ((3855, 3907), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function4'], {'raises': 'ValueError'}), '(user_function4, raises=ValueError)\n', (3872, 3907), False, 'import pytest\n'), ((4124, 4176), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function0'], {'raises': 'ValueError'}), '(user_function0, raises=ValueError)\n', (4141, 4176), False, 'import pytest\n'), ((4210, 4262), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function2'], {'raises': 'ValueError'}), '(user_function2, raises=ValueError)\n', (4227, 4262), False, 'import pytest\n'), ((4272, 4324), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function3'], {'raises': 'ValueError'}), '(user_function3, raises=ValueError)\n', (4289, 4324), False, 'import pytest\n'), ((4334, 4386), 'pytest.mark.xfail', 'pytest.mark.xfail', (['user_function4'], {'raises': 'ValueError'}), '(user_function4, raises=ValueError)\n', (4351, 4386), False, 'import pytest\n'), ((1853, 1893), 'numpy.allclose', 'np.allclose', (['track.audio', 'track.stems[v]'], {}), '(track.audio, track.stems[v])\n', (1864, 1893), True, 'import numpy as np\n'), ((1993, 2044), 'numpy.allclose', 'np.allclose', (['track.sources[k].audio', 'track.stems[v]'], {}), '(track.sources[k].audio, track.stems[v])\n', (2004, 2044), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import tensorflow as tf
import numpy as np
from scripts.utils import parser as ps
from rl import experimenter as Exp
from rl.algorithms import PolicyGradient
from rl.core.function_approximators.policies.tf2_policies import RobustKerasMLPGassian
from rl.core.function_approximators.supervised_learners import SuperRobustKerasMLP
def main(c):
# Setup logz and save c
ps.configure_log(c)
# Create mdp and fix randomness
mdp = ps.setup_mdp(c['mdp'], c['seed'])
# Create learnable objects
ob_shape = mdp.ob_shape
ac_shape = mdp.ac_shape
if mdp.use_time_info:
ob_shape = (np.prod(ob_shape)+1,)
# Define the learner
policy = RobustKerasMLPGassian(ob_shape, ac_shape, name='policy',
init_lstd=c['init_lstd'],
units=c['policy_units'])
vfn = SuperRobustKerasMLP(ob_shape, (1,), name='value function',
units=c['value_units'])
# Create algorithm
alg = PolicyGradient(policy, vfn,
gamma=mdp.gamma, horizon=mdp.horizon,
**c['algorithm'])
# Let's do some experiments!
exp = Exp.Experimenter(alg, mdp, c['experimenter']['rollout_kwargs'])
exp.run(**c['experimenter']['run_kwargs'])
CONFIG = {
'top_log_dir': 'log_pg',
'exp_name': 'cp',
'seed': 9,
'mdp': {
'envid': 'DartCartPole-v1',
'horizon': 1000, # the max length of rollouts in training
'gamma': 1.0,
'n_processes':1,
},
'experimenter': {
'run_kwargs': {
'n_itrs': 100,
'pretrain': True,
'final_eval': False,
'save_freq': 5,
},
'rollout_kwargs': {
'min_n_samples': 2000,
'max_n_rollouts': None,
},
},
'algorithm': {
'optimizer':'adam',
'lr':0.001,
'max_kl':0.1,
'delta':None,
'lambd':0.99,
'max_n_batches':2,
'n_warm_up_itrs':None,
'n_pretrain_itrs':1,
},
'policy_units': (64,),
'value_units': (128,128),
'init_lstd': -1,
}
if __name__ == '__main__':
main(CONFIG)
| [
"numpy.prod",
"scripts.utils.parser.configure_log",
"rl.experimenter.Experimenter",
"rl.algorithms.PolicyGradient",
"scripts.utils.parser.setup_mdp",
"rl.core.function_approximators.supervised_learners.SuperRobustKerasMLP",
"rl.core.function_approximators.policies.tf2_policies.RobustKerasMLPGassian"
] | [((486, 505), 'scripts.utils.parser.configure_log', 'ps.configure_log', (['c'], {}), '(c)\n', (502, 505), True, 'from scripts.utils import parser as ps\n'), ((553, 586), 'scripts.utils.parser.setup_mdp', 'ps.setup_mdp', (["c['mdp']", "c['seed']"], {}), "(c['mdp'], c['seed'])\n", (565, 586), True, 'from scripts.utils import parser as ps\n'), ((782, 894), 'rl.core.function_approximators.policies.tf2_policies.RobustKerasMLPGassian', 'RobustKerasMLPGassian', (['ob_shape', 'ac_shape'], {'name': '"""policy"""', 'init_lstd': "c['init_lstd']", 'units': "c['policy_units']"}), "(ob_shape, ac_shape, name='policy', init_lstd=c[\n 'init_lstd'], units=c['policy_units'])\n", (803, 894), False, 'from rl.core.function_approximators.policies.tf2_policies import RobustKerasMLPGassian\n'), ((971, 1058), 'rl.core.function_approximators.supervised_learners.SuperRobustKerasMLP', 'SuperRobustKerasMLP', (['ob_shape', '(1,)'], {'name': '"""value function"""', 'units': "c['value_units']"}), "(ob_shape, (1,), name='value function', units=c[\n 'value_units'])\n", (990, 1058), False, 'from rl.core.function_approximators.supervised_learners import SuperRobustKerasMLP\n'), ((1117, 1205), 'rl.algorithms.PolicyGradient', 'PolicyGradient', (['policy', 'vfn'], {'gamma': 'mdp.gamma', 'horizon': 'mdp.horizon'}), "(policy, vfn, gamma=mdp.gamma, horizon=mdp.horizon, **c[\n 'algorithm'])\n", (1131, 1205), False, 'from rl.algorithms import PolicyGradient\n'), ((1295, 1358), 'rl.experimenter.Experimenter', 'Exp.Experimenter', (['alg', 'mdp', "c['experimenter']['rollout_kwargs']"], {}), "(alg, mdp, c['experimenter']['rollout_kwargs'])\n", (1311, 1358), True, 'from rl import experimenter as Exp\n'), ((721, 738), 'numpy.prod', 'np.prod', (['ob_shape'], {}), '(ob_shape)\n', (728, 738), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import os
# all modules should be installed
if __name__ == '__main__':
# path of source image should be secified
img1 = cv.imread(r'...\test images\test19june.png')
img = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
# changing directory to favicon dir for verification
os.chdir(r'...\test images\favicon')
flag = 0
for i in os.listdir():
temp = cv.imread(i, cv.IMREAD_GRAYSCALE)
w, h = temp.shape[::-1]
# print('width : {}, height : {}'.format(w, h))
result = cv.matchTemplate(img, temp, cv.TM_CCOEFF_NORMED)
loc = np.where(result >= 0.85)
if len(loc[0]) != 0:
fn, _ = os.path.splitext(i)
print('► opened : {}'.format(fn))
flag = 1
for pt in zip(*loc[::-1]):
cv.rectangle(img1, pt, (pt[0] + w, pt[1] + h), (255, 79, 138), 2)
if not flag:
print('► Person is clear')
# in case if its a screenshot of plane desktop
cv.imshow('img', img1)
cv.waitKey(0)
cv.destroyAllWindows()
# applicable in : chrome, opera mini, mozilla
| [
"cv2.rectangle",
"os.listdir",
"numpy.where",
"os.path.splitext",
"cv2.imshow",
"os.chdir",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.matchTemplate",
"cv2.imread"
] | [((174, 219), 'cv2.imread', 'cv.imread', (['"""...\\\\test images\\\\test19june.png"""'], {}), "('...\\\\test images\\\\test19june.png')\n", (183, 219), True, 'import cv2 as cv\n'), ((230, 266), 'cv2.cvtColor', 'cv.cvtColor', (['img1', 'cv.COLOR_BGR2GRAY'], {}), '(img1, cv.COLOR_BGR2GRAY)\n', (241, 266), True, 'import cv2 as cv\n'), ((332, 369), 'os.chdir', 'os.chdir', (['"""...\\\\test images\\\\favicon"""'], {}), "('...\\\\test images\\\\favicon')\n", (340, 369), False, 'import os\n'), ((397, 409), 'os.listdir', 'os.listdir', ([], {}), '()\n', (407, 409), False, 'import os\n'), ((1038, 1060), 'cv2.imshow', 'cv.imshow', (['"""img"""', 'img1'], {}), "('img', img1)\n", (1047, 1060), True, 'import cv2 as cv\n'), ((1066, 1079), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1076, 1079), True, 'import cv2 as cv\n'), ((1085, 1107), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1105, 1107), True, 'import cv2 as cv\n'), ((427, 460), 'cv2.imread', 'cv.imread', (['i', 'cv.IMREAD_GRAYSCALE'], {}), '(i, cv.IMREAD_GRAYSCALE)\n', (436, 460), True, 'import cv2 as cv\n'), ((571, 619), 'cv2.matchTemplate', 'cv.matchTemplate', (['img', 'temp', 'cv.TM_CCOEFF_NORMED'], {}), '(img, temp, cv.TM_CCOEFF_NORMED)\n', (587, 619), True, 'import cv2 as cv\n'), ((635, 659), 'numpy.where', 'np.where', (['(result >= 0.85)'], {}), '(result >= 0.85)\n', (643, 659), True, 'import numpy as np\n'), ((711, 730), 'os.path.splitext', 'os.path.splitext', (['i'], {}), '(i)\n', (727, 730), False, 'import os\n'), ((857, 922), 'cv2.rectangle', 'cv.rectangle', (['img1', 'pt', '(pt[0] + w, pt[1] + h)', '(255, 79, 138)', '(2)'], {}), '(img1, pt, (pt[0] + w, pt[1] + h), (255, 79, 138), 2)\n', (869, 922), True, 'import cv2 as cv\n')] |
'''
Recurrent Deterministic Policy Gradient (DDPG with LSTM network)
Update with batch of episodes for each time, so requires each episode has the same length.
'''
import math
import random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions import Categorical
from collections import namedtuple
from common.buffers import *
from common.value_networks import *
from common.policy_networks import *
from common.utils import *
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
from gym import spaces
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
class RDPG():
def __init__(self, replay_buffer, state_space, action_space, hidden_dim):
self.replay_buffer = replay_buffer
self.hidden_dim = hidden_dim
self.qnet = QNetworkLSTM(state_space, action_space, hidden_dim).to(device)
self.target_qnet = QNetworkLSTM(state_space, action_space, hidden_dim).to(device)
self.policy_net = DPG_PolicyNetworkLSTM(state_space, action_space, hidden_dim).to(device)
self.target_policy_net = DPG_PolicyNetworkLSTM(state_space, action_space, hidden_dim).to(device)
print('Q network: ', self.qnet)
print('Policy network: ', self.policy_net)
for target_param, param in zip(self.target_qnet.parameters(), self.qnet.parameters()):
target_param.data.copy_(param.data)
self.q_criterion = nn.MSELoss()
q_lr=1e-3
policy_lr = 1e-3
self.update_cnt=0
self.q_optimizer = optim.Adam(self.qnet.parameters(), lr=q_lr)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=policy_lr)
def target_soft_update(self, net, target_net, soft_tau):
# Soft update the target net
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.data.copy_( # copy data value into target parameters
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
return target_net
def update(self, batch_size, reward_scale=10.0, gamma=0.99, soft_tau=1e-2, policy_up_itr=10, target_update_delay=3, warmup=True):
self.update_cnt+=1
hidden_in, hidden_out, state, action, last_action, reward, next_state, done = self.replay_buffer.sample(batch_size)
# print('sample:', state, action, reward, done)
state = torch.FloatTensor(state).to(device)
next_state = torch.FloatTensor(next_state).to(device)
action = torch.FloatTensor(action).to(device)
last_action = torch.FloatTensor(last_action).to(device)
reward = torch.FloatTensor(reward).unsqueeze(-1).to(device)
done = torch.FloatTensor(np.float32(done)).unsqueeze(-1).to(device)
# use hidden states stored in the memory for initialization, hidden_in for current, hidden_out for target
predict_q, _ = self.qnet(state, action, last_action, hidden_in) # for q
new_action, _ = self.policy_net.evaluate(state, last_action, hidden_in) # for policy
new_next_action, _ = self.target_policy_net.evaluate(next_state, action, hidden_out) # for q
predict_target_q, _ = self.target_qnet(next_state, new_next_action, action, hidden_out) # for q
predict_new_q, _ = self.qnet(state, new_action, last_action, hidden_in) # for policy. as optimizers are separated, no detach for q_h_in is also fine
target_q = reward+(1-done)*gamma*predict_target_q # for q
# reward = reward_scale * (reward - reward.mean(dim=0)) /reward.std(dim=0) # normalize with batch mean and std
q_loss = self.q_criterion(predict_q, target_q.detach())
policy_loss = -torch.mean(predict_new_q)
# train qnet
self.q_optimizer.zero_grad()
q_loss.backward(retain_graph=True) # no need for retain_graph here actually
self.q_optimizer.step()
# train policy_net
self.policy_optimizer.zero_grad()
policy_loss.backward(retain_graph=True)
self.policy_optimizer.step()
# update the target_qnet
if self.update_cnt%target_update_delay==0:
self.target_qnet=self.target_soft_update(self.qnet, self.target_qnet, soft_tau)
self.target_policy_net=self.target_soft_update(self.policy_net, self.target_policy_net, soft_tau)
return q_loss.detach().cpu().numpy(), policy_loss.detach().cpu().numpy()
def save_model(self, path):
torch.save(self.qnet.state_dict(), path+'_q')
torch.save(self.target_qnet.state_dict(), path+'_target_q')
torch.save(self.policy_net.state_dict(), path+'_policy')
def load_model(self, path):
self.qnet.load_state_dict(torch.load(path+'_q'))
self.target_qnet.load_state_dict(torch.load(path+'_target_q'))
self.policy_net.load_state_dict(torch.load(path+'_policy'))
self.qnet.eval()
self.target_qnet.eval()
self.policy_net.eval()
def plot(rewards):
plt.figure(figsize=(20,5))
plt.plot(rewards)
plt.savefig('rdpg.png')
# plt.show()
plt.clf()
class NormalizedActions(gym.ActionWrapper): # gym env wrapper
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
if __name__ == '__main__':
NUM_JOINTS=2
LINK_LENGTH=[200, 140]
INI_JOING_ANGLES=[0.1, 0.1]
SCREEN_SIZE=1000
# SPARSE_REWARD=False
# SCREEN_SHOT=False
ENV = ['Pendulum', 'Reacher'][0]
if ENV == 'Reacher':
env=Reacher(screen_size=SCREEN_SIZE, num_joints=NUM_JOINTS, link_lengths = LINK_LENGTH, \
ini_joint_angles=INI_JOING_ANGLES, target_pos = [369,430], render=True)
action_space = spaces.Box(low=-1.0, high=1.0, shape=(env.num_actions,), dtype=np.float32)
state_space = spaces.Box(low=-np.inf, high=np.inf, shape=(env.num_observations, ))
elif ENV == 'Pendulum':
# env = NormalizedActions(gym.make("Pendulum-v0"))
env = gym.make("Pendulum-v0")
action_space = env.action_space
state_space = env.observation_space
hidden_dim = 64
explore_steps = 0 # for random exploration
batch_size = 3 # each sample in batch is an episode for lstm policy (normally it's timestep)
update_itr = 1 # update iteration
replay_buffer_size=1e6
replay_buffer = ReplayBufferLSTM2(replay_buffer_size)
model_path='./model/rdpg'
torch.autograd.set_detect_anomaly(True)
alg = RDPG(replay_buffer, state_space, action_space, hidden_dim)
if args.train:
# alg.load_model(model_path)
# hyper-parameters
max_episodes = 1000
max_steps = 100
frame_idx = 0
rewards=[]
for i_episode in range (max_episodes):
q_loss_list=[]
policy_loss_list=[]
state = env.reset()
episode_reward = 0
last_action = env.action_space.sample()
episode_state = []
episode_action = []
episode_last_action = []
episode_reward = []
episode_next_state = []
episode_done = []
hidden_out = (torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda(), \
torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda()) # initialize hidden state for lstm, (hidden, cell), each is (layer, batch, dim)
for step in range(max_steps):
hidden_in = hidden_out
action, hidden_out = alg.policy_net.get_action(state, last_action, hidden_in)
next_state, reward, done, _ = env.step(action)
if ENV !='Reacher':
env.render()
if step>0:
ini_hidden_in = hidden_in
ini_hidden_out = hidden_out
episode_state.append(state)
episode_action.append(action)
episode_last_action.append(last_action)
episode_reward.append(reward)
episode_next_state.append(next_state)
episode_done.append(done)
state = next_state
last_action = action
frame_idx += 1
if len(replay_buffer) > batch_size:
for _ in range(update_itr):
q_loss, policy_loss = alg.update(batch_size)
q_loss_list.append(q_loss)
policy_loss_list.append(policy_loss)
if done: # should not break for lstm cases to make every episode with same length
break
if i_episode % 20 == 0:
plot(rewards)
alg.save_model(model_path)
print('Eps: ', i_episode, '| Reward: ', np.sum(episode_reward), '| Loss: ', np.average(q_loss_list), np.average(policy_loss_list))
replay_buffer.push(ini_hidden_in, ini_hidden_out, episode_state, episode_action, episode_last_action, \
episode_reward, episode_next_state, episode_done)
rewards.append(np.sum(episode_reward))
alg.save_model(model_path)
if args.test:
test_episodes = 10
max_steps=100
alg.load_model(model_path)
for i_episode in range (test_episodes):
q_loss_list=[]
policy_loss_list=[]
state = env.reset()
episode_reward = 0
last_action = np.zeros(action_space.shape[0])
hidden_out = (torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda(), \
torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda()) # initialize hidden state for lstm, (hidden, cell), each is (layer, batch, dim)
for step in range(max_steps):
hidden_in = hidden_out
action, hidden_out= alg.policy_net.get_action(state, last_action, hidden_in, noise_scale=0.0) # no noise for testing
next_state, reward, done, _ = env.step(action)
env.render()
last_action = action
state = next_state
episode_reward += reward
if done:
break
print('Eps: ', i_episode, '| Reward: ', episode_reward)
| [
"numpy.clip",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"gym.make",
"argparse.ArgumentParser",
"torch.mean",
"matplotlib.pyplot.plot",
"torch.autograd.set_detect_anomaly",
"matplotlib.pyplot.savefig",
"numpy.float32",
"numpy.average",
"torch.device",
"reacher.Reacher",
"torch.load",
... | [((927, 1013), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train or test neural net motor controller."""'}), "(description=\n 'Train or test neural net motor controller.')\n", (950, 1013), False, 'import argparse\n'), ((883, 902), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (895, 902), False, 'import torch\n'), ((5605, 5632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (5615, 5632), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5653), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {}), '(rewards)\n', (5644, 5653), True, 'import matplotlib.pyplot as plt\n'), ((5658, 5681), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rdpg.png"""'], {}), "('rdpg.png')\n", (5669, 5681), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5712), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5710, 5712), True, 'import matplotlib.pyplot as plt\n'), ((7421, 7460), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (7454, 7460), False, 'import torch\n'), ((2009, 2021), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2019, 2021), True, 'import torch.nn as nn\n'), ((5967, 5993), 'numpy.clip', 'np.clip', (['action', 'low', 'high'], {}), '(action, low, high)\n', (5974, 5993), True, 'import numpy as np\n'), ((6221, 6247), 'numpy.clip', 'np.clip', (['action', 'low', 'high'], {}), '(action, low, high)\n', (6228, 6247), True, 'import numpy as np\n'), ((6529, 6690), 'reacher.Reacher', 'Reacher', ([], {'screen_size': 'SCREEN_SIZE', 'num_joints': 'NUM_JOINTS', 'link_lengths': 'LINK_LENGTH', 'ini_joint_angles': 'INI_JOING_ANGLES', 'target_pos': '[369, 430]', 'render': '(True)'}), '(screen_size=SCREEN_SIZE, num_joints=NUM_JOINTS, link_lengths=\n LINK_LENGTH, ini_joint_angles=INI_JOING_ANGLES, target_pos=[369, 430],\n render=True)\n', (6536, 6690), False, 'from reacher import Reacher\n'), ((6718, 6792), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(env.num_actions,)', 'dtype': 'np.float32'}), '(low=-1.0, high=1.0, shape=(env.num_actions,), dtype=np.float32)\n', (6728, 6792), False, 'from gym import spaces\n'), ((6816, 6883), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(env.num_observations,)'}), '(low=-np.inf, high=np.inf, shape=(env.num_observations,))\n', (6826, 6883), False, 'from gym import spaces\n'), ((826, 851), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (849, 851), False, 'import torch\n'), ((4300, 4325), 'torch.mean', 'torch.mean', (['predict_new_q'], {}), '(predict_new_q)\n', (4310, 4325), False, 'import torch\n'), ((5331, 5354), 'torch.load', 'torch.load', (["(path + '_q')"], {}), "(path + '_q')\n", (5341, 5354), False, 'import torch\n'), ((5395, 5425), 'torch.load', 'torch.load', (["(path + '_target_q')"], {}), "(path + '_target_q')\n", (5405, 5425), False, 'import torch\n'), ((5465, 5493), 'torch.load', 'torch.load', (["(path + '_policy')"], {}), "(path + '_policy')\n", (5475, 5493), False, 'import torch\n'), ((6987, 7010), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (6995, 7010), False, 'import gym\n'), ((10477, 10508), 'numpy.zeros', 'np.zeros', (['action_space.shape[0]'], {}), '(action_space.shape[0])\n', (10485, 10508), True, 'import numpy as np\n'), ((2993, 3017), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (3010, 3017), False, 'import torch\n'), ((3050, 3079), 'torch.FloatTensor', 'torch.FloatTensor', (['next_state'], {}), '(next_state)\n', (3067, 3079), False, 'import torch\n'), ((3112, 3137), 'torch.FloatTensor', 'torch.FloatTensor', (['action'], {}), '(action)\n', (3129, 3137), False, 'import torch\n'), ((3175, 3205), 'torch.FloatTensor', 'torch.FloatTensor', (['last_action'], {}), '(last_action)\n', (3192, 3205), False, 'import torch\n'), ((9812, 9834), 'numpy.sum', 'np.sum', (['episode_reward'], {}), '(episode_reward)\n', (9818, 9834), True, 'import numpy as np\n'), ((9848, 9871), 'numpy.average', 'np.average', (['q_loss_list'], {}), '(q_loss_list)\n', (9858, 9871), True, 'import numpy as np\n'), ((9873, 9901), 'numpy.average', 'np.average', (['policy_loss_list'], {}), '(policy_loss_list)\n', (9883, 9901), True, 'import numpy as np\n'), ((10113, 10135), 'numpy.sum', 'np.sum', (['episode_reward'], {}), '(episode_reward)\n', (10119, 10135), True, 'import numpy as np\n'), ((3238, 3263), 'torch.FloatTensor', 'torch.FloatTensor', (['reward'], {}), '(reward)\n', (3255, 3263), False, 'import torch\n'), ((8159, 8209), 'torch.zeros', 'torch.zeros', (['[1, 1, hidden_dim]'], {'dtype': 'torch.float'}), '([1, 1, hidden_dim], dtype=torch.float)\n', (8170, 8209), False, 'import torch\n'), ((8236, 8286), 'torch.zeros', 'torch.zeros', (['[1, 1, hidden_dim]'], {'dtype': 'torch.float'}), '([1, 1, hidden_dim], dtype=torch.float)\n', (8247, 8286), False, 'import torch\n'), ((10535, 10585), 'torch.zeros', 'torch.zeros', (['[1, 1, hidden_dim]'], {'dtype': 'torch.float'}), '([1, 1, hidden_dim], dtype=torch.float)\n', (10546, 10585), False, 'import torch\n'), ((10612, 10662), 'torch.zeros', 'torch.zeros', (['[1, 1, hidden_dim]'], {'dtype': 'torch.float'}), '([1, 1, hidden_dim], dtype=torch.float)\n', (10623, 10662), False, 'import torch\n'), ((3330, 3346), 'numpy.float32', 'np.float32', (['done'], {}), '(done)\n', (3340, 3346), True, 'import numpy as np\n')] |
# START unsga3
import numpy as np
from pymoo.algorithms.moo.nsga3 import NSGA3
from pymoo.algorithms.moo.unsga3 import UNSGA3
from pymoo.factory import get_problem
from pymoo.optimize import minimize
problem = get_problem("ackley", n_var=30)
# create the reference directions to be used for the optimization - just a single one here
ref_dirs = np.array([[1.0]])
# create the algorithm object
algorithm = UNSGA3(ref_dirs, pop_size=100)
# execute the optimization
res = minimize(problem,
algorithm,
termination=('n_gen', 150),
save_history=True,
seed=1)
print("UNSGA3: Best solution found: \nX = %s\nF = %s" % (res.X, res.F))
# END unsga3
# START no_unsga3
_res = minimize(problem,
NSGA3(ref_dirs, pop_size=100),
termination=('n_gen', 150),
save_history=True,
seed=1)
print("NSGA3: Best solution found: \nX = %s\nF = %s" % (res.X, res.F))
# END no_unsga3
# START unsga3_comp
import numpy as np
import matplotlib.pyplot as plt
ret = [np.min(e.pop.get("F")) for e in res.history]
_ret = [np.min(e.pop.get("F")) for e in _res.history]
plt.plot(np.arange(len(ret)), ret, label="unsga3")
plt.plot(np.arange(len(_ret)), _ret, label="nsga3")
plt.title("Convergence")
plt.xlabel("Generation")
plt.ylabel("F")
plt.legend()
plt.show()
# END unsga3_comp
| [
"pymoo.algorithms.moo.nsga3.NSGA3",
"pymoo.optimize.minimize",
"pymoo.algorithms.moo.unsga3.UNSGA3",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"pymoo.factory.get_problem",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((212, 243), 'pymoo.factory.get_problem', 'get_problem', (['"""ackley"""'], {'n_var': '(30)'}), "('ackley', n_var=30)\n", (223, 243), False, 'from pymoo.factory import get_problem\n'), ((347, 364), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (355, 364), True, 'import numpy as np\n'), ((408, 438), 'pymoo.algorithms.moo.unsga3.UNSGA3', 'UNSGA3', (['ref_dirs'], {'pop_size': '(100)'}), '(ref_dirs, pop_size=100)\n', (414, 438), False, 'from pymoo.algorithms.moo.unsga3 import UNSGA3\n'), ((473, 560), 'pymoo.optimize.minimize', 'minimize', (['problem', 'algorithm'], {'termination': "('n_gen', 150)", 'save_history': '(True)', 'seed': '(1)'}), "(problem, algorithm, termination=('n_gen', 150), save_history=True,\n seed=1)\n", (481, 560), False, 'from pymoo.optimize import minimize\n'), ((1270, 1294), 'matplotlib.pyplot.title', 'plt.title', (['"""Convergence"""'], {}), "('Convergence')\n", (1279, 1294), True, 'import matplotlib.pyplot as plt\n'), ((1295, 1319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (1305, 1319), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1335), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F"""'], {}), "('F')\n", (1330, 1335), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1346, 1348), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1357, 1359), True, 'import matplotlib.pyplot as plt\n'), ((764, 793), 'pymoo.algorithms.moo.nsga3.NSGA3', 'NSGA3', (['ref_dirs'], {'pop_size': '(100)'}), '(ref_dirs, pop_size=100)\n', (769, 793), False, 'from pymoo.algorithms.moo.nsga3 import NSGA3\n')] |
"""
This file provides a wrapper to run a physics simulator. Currently only gazebo
and bullet are supportedbut bullet should be also soon.
"""
import ast
import configparser
import math
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import logging
import pickle as pkl
import sys
import time
import traceback
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Human Brain Project, SP10"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Research"
__date__ = "June 14th, 2017"
class Controller(object):
def __init__(self, configuration):
# Retrieve the config
self.config = configuration
self.log = logging.getLogger('Controller')
params = configuration["Controller"]
if "file" in params:
self.load(params["file"])
else:
if "params" in params:
if type(params["params"]) == str:
self.params = ast.literal_eval(params["params"])
elif type(params["params"]) == (np.ndarray or list):
self.params = params["params"]
else:
self.params = dict()
else:
self.params = dict()
if "time_step" in params:
self.time_step = float(params["time_step"])
else:
self.time_step = 0.01
if "openloop" in params:
self.openloop = params["openloop"]
else:
self.openloop = True
self.it = 0
self.t = 0
self.t_init = 0
self.st_time_step = 0
self.hist_cmd = []
def __getstate__(self):
""" This is called before pickling. """
state = self.__dict__.copy()
del state['hist_cmd']
del state["log"]
return state
def __setstate__(self, state):
""" This is called while unpickling. """
self.__dict__.update(state)
def get_params_len(self):
length = 0
for m in self.params:
length += len(m)
return length
def get_norm_params(self):
# Surcharge it depending on the controller
return
def set_norm_params(self, liste):
# Surcharge it depending on the controller
return
def get_params(self):
return self.params
def set_params(self, params):
self.params = params
def step(self, t):
"""
This function is called to update the controller state at each time_step
"""
# This function must be surcharged
self.t = t
cmd = []
for i in range(len(self.params)):
cmd.append(0)
return cmd
def run(self, sim_time, physics):
"""
This function is a blocking function that execute the controller for a given sim_time
"""
st = 0
try:
self.t_init = time.time()
# Wait for the physics to be started
while 1:
time.sleep(self.time_step)
if physics.is_sim_started():
break
while physics.sim_duration < sim_time:
st = physics.sim_duration
cmd = self.step(st)
physics.set_sim_cmd(cmd)
# Do something for real-time here
except:
self.log.error("Simulation aborted by user. Physics time: " + str(physics.sim_duration) +
"s. Controller time: not set!")
physics.kill_sim()
traceback.print_exc()
sys.exit()
rt = time.time() - self.t_init
self.log.info("Simulation of {0:.2f}s (ST)".format(st) +
" finished in {0:.2f}s (RT)".format(rt) +
" with acceleration of {0:.3f} x".format(st/rt))
def load(self, filename):
"""
Load itself from a pickle file
"""
f = open(filename, 'rb')
tmp_dict = pkl.load(f)
f.close()
self.__dict__.update(tmp_dict.__dict__)
def save(self, filename):
"""
Save class and variables with pickle
"""
f = open(filename, 'wb')
pkl.dump(self, f, 2)
f.close()
def plot(self, filename="history.png"):
plt.plot(np.array(self.hist_cmd))
plt.savefig(filename, format='png', dpi=300)
plt.close()
class Sine(Controller):
def __init__(self, configuration):
super(Sine, self).__init__(configuration)
self.n_motors = 4
self.norm_f = 2 * math.pi * self.params[0]["f"]
self.norm_a = self.params[0]["a"]
self.norm_phi = self.params[0]["phi"]
def set_norm_params(self, liste):
j = 0
params = []
for i in range(self.n_motors):
f = liste[j] * self.norm_f
a = liste[j+1] * self.norm_a
phi = liste[j+2] * self.norm_phi
params.append({"f": f, "a": a, "phi": phi})
j += 3
self.params = params
def get_norm_params(self):
liste = []
for m in self.params:
for i in m:
liste.append(m)
return liste
def step(self, t):
self.t = t
cmd = []
for i in range(self.n_motors):
cmd.append(self.params[i]["a"] * math.sin(self.params[i]["f"] * self.t + self.params[i]["phi"]))
self.hist_cmd.append(cmd)
return cmd
class CPG(Controller):
def __init__(self, configuration):
super(CPG, self).__init__(configuration)
self.n_motors = 4
self.r = [1 for _ in range(self.n_motors)]
self.phi = [np.pi * float(self.params[i]["duty_factor"]) for i in range(self.n_motors)]
self.o = [float(self.params[i]["o"]) for i in range(self.n_motors)]
self.kappa_r = [1] * 4
self.kappa_phi = [1] * 4
self.kappa_o = [1] * 4
self.f_r = [0] * 4
self.f_phi = [0] * 4
self.f_o = [0] * 4
self.dt = float(self.config["Controller"]["integ_time"])
self.gamma = 0.1
self.prev_t = -1
self.coupling = [self.params[i]["coupling"] for i in range(self.n_motors)]
a = float(self.params[1]["phase_offset"])
b = float(self.params[2]["phase_offset"])
c = float(self.params[3]["phase_offset"])
d = a - b
e = a - c
f = b - c
self.psi = [[0, a, b, c],
[-1*a, 0, d, e],
[-1*b, -1*d, 0, f],
[-1*c, -1*e, -1*f, 0]]
def step_cpg(self):
cmd = []
for i in range(self.n_motors):
# Fetch current motor values
dt = self.dt
gamma = self.gamma
mu = float(self.params[i]["mu"])
omega = float(self.params[i]["omega"])
d = float(self.params[i]["duty_factor"])
r = self.r[i]
phi = self.phi[i]
o = self.o[i]
kappa_r = self.kappa_r[i]
kappa_phi = self.kappa_phi[i]
kappa_o = self.kappa_o[i]
f_r = self.f_r[i]
f_phi = self.f_phi[i]
f_o = self.f_o[i]
cpl = self.coupling[i]
# Compute step evolution of r, phi and o
d_r = gamma * (mu + kappa_r * f_r - r * r) * r
d_phi = omega + kappa_phi * f_phi
d_o = kappa_o * f_o
# Add phase coupling
for j in range(self.n_motors):
d_phi += cpl[j] * np.sin(self.phi[j] - phi - self.psi[i][j])
# Update r, phi and o
self.r[i] += dt * d_r
self.phi[i] += dt * d_phi
self.o[i] += dt * d_o
# Threshold phi to 2pi max
phi_thr = 0
phi_2pi = self.phi[i] % (2 * math.pi)
if phi_2pi < ((2 * math.pi) * d):
phi_thr = phi_2pi / (2 * d)
else:
phi_thr = (phi_2pi + (2 * math.pi) * (1 - 2 * d)) / (2 * (1 - d))
# Save action
action = self.r[i] * np.cos(phi_thr) + self.o[i]
cmd.append(action)
self.hist_cmd.append(cmd)
return cmd
def step(self, t):
self.t = t
n_steps = (int(self.t/self.dt) - self.prev_t)
cmd = []
if n_steps == 0:
n_steps = 1
self.log.error("Controller time step (" + str((self.t - self.prev_t)*1000) +
"ms) is too low for numerical integration (dt = " + str(self.dt*1000) + " ms). " +
"Truncating control signal to avoid stopping software!")
for _ in range(n_steps):
cmd = self.step_cpg()
self.prev_t = int(self.t/self.dt)
return cmd
class AdaptableCPG(CPG):
def __init__(self, configuration):
super(AdaptableCPG, self).__init__(configuration)
@override
def step(self, t, sensors):
self.t = t
# Modify CPG frequency according to [JAPANESE PAPER] to observe gait transitions
# Update CPG
n_steps = (int(self.t/self.dt) - self.prev_t)
cmd = []
if n_steps == 0:
n_steps = 1
self.log.error("Controller time step (" + str((self.t - self.prev_t)*1000) +
"ms) is too low for numerical integration (dt = " + str(self.dt*1000) + " ms). " +
"Truncating control signal to avoid stopping software!")
for _ in range(n_steps):
cmd = self.step_cpg()
self.prev_t = int(self.t/self.dt)
return cmd
if __name__ == '__main__':
# Test Sine evolution
config = {"Controller":
{"params": "[{'f': 6, 'a': 0.4, 'phi': 0}, \
{'f': 6, 'a': 0.4, 'phi': 0},\
{'f': 6, 'a': 0.4, 'phi': 3.14},\
{'f': 6, 'a': 0.4, 'phi': 3.14}]",
"time_step": 0.001,
"sim_time": 5}}
t = 0
dt = config["Controller"]["time_step"]
st = config["Controller"]["sim_time"]
n = int(st / dt)
sc = Sine(config)
t_init = time.time()
for i in range(n):
sc.step(t)
t += dt
t_tot = time.time() - t_init
print(str(n) + " iterations computed in " + str(t_tot) + " s")
sc.plot("sine.png")
# Test CPG evolution
config = {"Controller": {"params": "[{'mu': 0.4, 'o': 0, 'omega': 6.35, 'duty_factor': 0.6, "
"'phase_offset': 0, 'coupling': [5,5,5,0]},"
"{'mu': 0.4, 'o': 0, 'omega': 6.35, 'duty_factor': 0.6, "
"'phase_offset': 6.28, 'coupling': [5,5,5,0]},"
"{'mu': 0.4, 'o': 0, 'omega': 6.35, 'duty_factor': 0.9, "
"'phase_offset': 3.14, 'coupling': [5,5,5,0]},"
"{'mu': 0.4, 'o': 0, 'omega': 6.35, 'duty_factor': 0.9, "
"'phase_offset': 3.14, 'coupling': [5,5,5,0]}]",
"integ_time": 0.001,
"time_step": 0.01,
"sim_time": 10}
}
t = 0
dt = config["Controller"]["time_step"]
st = config["Controller"]["sim_time"]
n = int(st / dt)
sc = CPG(config)
t_init = time.time()
for i in range(n):
sc.step(t)
t += dt
t_tot = time.time() - t_init
print(str(n) + " iterations computed in " + str(t_tot) + " s")
sc.plot("cpg.png")
| [
"logging.getLogger",
"pickle.dump",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"pickle.load",
"time.sleep",
"math.sin",
"matplotlib.pyplot.close",
"numpy.array",
"ast.literal_eval",
"numpy.cos",
"sys.exit",
"numpy.sin",
"traceback.print_exc",
"time.time"
] | [((206, 227), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (220, 227), False, 'import matplotlib\n'), ((10236, 10247), 'time.time', 'time.time', ([], {}), '()\n', (10245, 10247), False, 'import time\n'), ((11503, 11514), 'time.time', 'time.time', ([], {}), '()\n', (11512, 11514), False, 'import time\n'), ((740, 771), 'logging.getLogger', 'logging.getLogger', (['"""Controller"""'], {}), "('Controller')\n", (757, 771), False, 'import logging\n'), ((4069, 4080), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (4077, 4080), True, 'import pickle as pkl\n'), ((4290, 4310), 'pickle.dump', 'pkl.dump', (['self', 'f', '(2)'], {}), '(self, f, 2)\n', (4298, 4310), True, 'import pickle as pkl\n'), ((4425, 4469), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'format': '"""png"""', 'dpi': '(300)'}), "(filename, format='png', dpi=300)\n", (4436, 4469), True, 'import matplotlib.pyplot as plt\n'), ((4478, 4489), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4487, 4489), True, 'import matplotlib.pyplot as plt\n'), ((10318, 10329), 'time.time', 'time.time', ([], {}), '()\n', (10327, 10329), False, 'import time\n'), ((11585, 11596), 'time.time', 'time.time', ([], {}), '()\n', (11594, 11596), False, 'import time\n'), ((2997, 3008), 'time.time', 'time.time', ([], {}), '()\n', (3006, 3008), False, 'import time\n'), ((3697, 3708), 'time.time', 'time.time', ([], {}), '()\n', (3706, 3708), False, 'import time\n'), ((4392, 4415), 'numpy.array', 'np.array', (['self.hist_cmd'], {}), '(self.hist_cmd)\n', (4400, 4415), True, 'import numpy as np\n'), ((3096, 3122), 'time.sleep', 'time.sleep', (['self.time_step'], {}), '(self.time_step)\n', (3106, 3122), False, 'import time\n'), ((3638, 3659), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3657, 3659), False, 'import traceback\n'), ((3672, 3682), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3680, 3682), False, 'import sys\n'), ((1018, 1052), 'ast.literal_eval', 'ast.literal_eval', (["params['params']"], {}), "(params['params'])\n", (1034, 1052), False, 'import ast\n'), ((5427, 5489), 'math.sin', 'math.sin', (["(self.params[i]['f'] * self.t + self.params[i]['phi'])"], {}), "(self.params[i]['f'] * self.t + self.params[i]['phi'])\n", (5435, 5489), False, 'import math\n'), ((7620, 7662), 'numpy.sin', 'np.sin', (['(self.phi[j] - phi - self.psi[i][j])'], {}), '(self.phi[j] - phi - self.psi[i][j])\n', (7626, 7662), True, 'import numpy as np\n'), ((8168, 8183), 'numpy.cos', 'np.cos', (['phi_thr'], {}), '(phi_thr)\n', (8174, 8183), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from Pruning.utils import prune_rate, arg_nonzero_min
def weight_prune(model, pruning_perc):
'''
Prune pruning_perc% weights globally (not layer-wise)
arXiv: 1606.09274
'''
all_weights = []
for p in model.parameters():
if len(p.data.size()) != 1:
all_weights += list(torch.abs(p).cpu().data.numpy().flatten())
threshold = np.percentile(np.array(all_weights), pruning_perc)
# generate mask
masks = []
for p in model.parameters():
if len(p.data.size()) != 1:
pruned_inds = torch.abs(p).data > threshold
masks.append(pruned_inds.float())
return masks
def per_layer_weight_prune(model, pruning_perc):
'''
On Progress...
pruning_perc[0] : prune rate of other weights
pruning_perc[1] : prune rate of point weights
'''
other_weights = []
point_weights = []
for name, p in model.named_parameters():
if len(p.data.size()) != 1:
if 'layer' in name:
if 'conv1' in name or 'conv3' in name:
point_weights += list(torch.abs(p).cpu().data.numpy().flatten())
else:
other_weights += list(torch.abs(p).cpu().data.numpy().flatten())
else:
other_weights += list(torch.abs(p).cpu().data.numpy().flatten())
threshold_other = np.percentile(np.array(other_weights), pruning_perc[0])
threshold_point = np.percentile(np.array(point_weights), pruning_perc[1])
num_of_params = [len(other_weights), len(point_weights)]
# generate mask
masks = []
for name, p in model.named_parameters():
if len(p.data.size()) != 1:
if 'layer' in name:
if 'conv1' in name or 'conv3' in name:
pruned_inds = torch.abs(p).data > threshold_point
masks.append(pruned_inds.float())
else:
pruned_inds = torch.abs(p).data > threshold_other
masks.append(pruned_inds.float())
else:
pruned_inds = torch.abs(p).data > threshold_other
masks.append(pruned_inds.float())
return masks, num_of_params
def prune_one_filter(model, masks):
'''
Pruning one least ``important'' feature map by the scaled l2norm of
kernel weights
arXiv:1611.06440
'''
NO_MASKS = False
# construct masks if there is not yet
if not masks:
masks = []
NO_MASKS = True
values = []
for p in model.parameters():
if len(p.data.size()) == 4: # nasty way of selecting conv layer
p_np = p.data.cpu().numpy()
# construct masks if there is not
if NO_MASKS:
masks.append(np.ones(p_np.shape).astype('float32'))
# find the scaled l2 norm for each filter this layer
value_this_layer = np.square(p_np).sum(axis=1).sum(axis=1)\
.sum(axis=1)/(p_np.shape[1]*p_np.shape[2]*p_np.shape[3])
# normalization (important)
value_this_layer = value_this_layer / \
np.sqrt(np.square(value_this_layer).sum())
min_value, min_ind = arg_nonzero_min(list(value_this_layer))
values.append([min_value, min_ind])
assert len(masks) == len(values), "something wrong here"
values = np.array(values)
# set mask corresponding to the filter to prune
to_prune_layer_ind = np.argmin(values[:, 0])
to_prune_filter_ind = int(values[to_prune_layer_ind, 1])
masks[to_prune_layer_ind][to_prune_filter_ind] = 0.
print('Prune filter #{} in layer #{}'.format(
to_prune_filter_ind,
to_prune_layer_ind))
return masks
def filter_prune(model, pruning_perc):
'''
Prune filters one by one until reach pruning_perc
(not iterative pruning)
'''
masks = []
current_pruning_perc = 0.
while current_pruning_perc < pruning_perc:
masks = prune_one_filter(model, masks)
model.set_masks(masks)
current_pruning_perc = prune_rate(model, verbose=False)
print('{:.2f} pruned'.format(current_pruning_perc))
return masks
| [
"torch.abs",
"numpy.ones",
"numpy.square",
"numpy.array",
"Pruning.utils.prune_rate",
"numpy.argmin"
] | [((3466, 3482), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3474, 3482), True, 'import numpy as np\n'), ((3561, 3584), 'numpy.argmin', 'np.argmin', (['values[:, 0]'], {}), '(values[:, 0])\n', (3570, 3584), True, 'import numpy as np\n'), ((477, 498), 'numpy.array', 'np.array', (['all_weights'], {}), '(all_weights)\n', (485, 498), True, 'import numpy as np\n'), ((1483, 1506), 'numpy.array', 'np.array', (['other_weights'], {}), '(other_weights)\n', (1491, 1506), True, 'import numpy as np\n'), ((1561, 1584), 'numpy.array', 'np.array', (['point_weights'], {}), '(point_weights)\n', (1569, 1584), True, 'import numpy as np\n'), ((4171, 4203), 'Pruning.utils.prune_rate', 'prune_rate', (['model'], {'verbose': '(False)'}), '(model, verbose=False)\n', (4181, 4203), False, 'from Pruning.utils import prune_rate, arg_nonzero_min\n'), ((645, 657), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (654, 657), False, 'import torch\n'), ((2190, 2202), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (2199, 2202), False, 'import torch\n'), ((1906, 1918), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (1915, 1918), False, 'import torch\n'), ((2052, 2064), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (2061, 2064), False, 'import torch\n'), ((2868, 2887), 'numpy.ones', 'np.ones', (['p_np.shape'], {}), '(p_np.shape)\n', (2875, 2887), True, 'import numpy as np\n'), ((3234, 3261), 'numpy.square', 'np.square', (['value_this_layer'], {}), '(value_this_layer)\n', (3243, 3261), True, 'import numpy as np\n'), ((3004, 3019), 'numpy.square', 'np.square', (['p_np'], {}), '(p_np)\n', (3013, 3019), True, 'import numpy as np\n'), ((404, 416), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (413, 416), False, 'import torch\n'), ((1394, 1406), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (1403, 1406), False, 'import torch\n'), ((1188, 1200), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (1197, 1200), False, 'import torch\n'), ((1295, 1307), 'torch.abs', 'torch.abs', (['p'], {}), '(p)\n', (1304, 1307), False, 'import torch\n')] |
import astropy.units as u
import numpy as np
import astropy.constants as const
class Rayleigh:
'''
The Rayleigh class estimates the Rayleigh scattering for the indicated atom using the
equations from Chapter 10 of <NAME>, ed., CRC Handbook of Chemistry and Physics, Internet Version 2005,
`hbcponline <http://hbcponline.com/faces/contents/ContentsSearch.xhtml>`_, CRC Press, Boca Raton, FL, 2005.
These values won't depend on pressure or temperature and therefore only a wavenumbers grid is needed to produce them.
The wavenumbers grid can be given by the user or loaded from an already initialised
:class:`~rapoc.models.model.Model` class (as :class:`~rapoc.Rosseland` or :class:`~rapoc.PLanck`).
The latter solution might be of help in using the Rayleigh scattering along with other opacities.
'''
def __init__(self, atom, wavenumber_grid=None, model=None):
'''
Parameters
----------
atom: str
name of the considered atom
wavenumber_grid: list or numpy.array or astropy.units.Quantity
data wavenumber grid
model: :class:`~rapoc.models.model.Model`
built model to use to load the wavenumbers grid.
Examples
---------
First we prepare a data wavenumbers grid, then we can produce the Rayleigh data:
>>> rayleigh = Rayleigh('H', wavenumber_grid=[100000, 10000, 1000])
if we already have a molecular model loaded, as example built from a datafile,
we can use it to initialise the Rayleigh class:
>>> input_data = Rosseland(input_data=exomol_file)
>>> rayleigh = Rayleigh(atom='Na', model=input_data)
Now the Rayleigh data are ready to be used as input data for any RAPOC :class:`~rapoc.models.model.Model` class:
>>> pl = Planck(input_data=rayleigh)
>>> rs = Rosseland(input_data=rayleigh)
'''
self.atom = atom
self.atom_mass = self._get_atommass()
if model:
self.wavenumber_grid = model.wavenumber_grid
self.wavelength_grid = model.wavelength_grid
else:
self.wavenumber_grid, self.wavelength_grid = self._get_wavegrid(wavenumber_grid)
self.pressure_grid, self.temperature_grid = np.array([1e-10, 1e100]) * u.Pa, np.array([1, 1e6]) * u.K
self.opacities = self.compute_opacities()
def _get_atommass(self):
from molmass import Formula
f = Formula(self.atom)
return (f.mass * u.u / u.ct).to(u.g / u.ct)
def _get_wavegrid(self, wavenumber_grid):
if isinstance(wavenumber_grid, u.Quantity):
wavenumber_grid = wavenumber_grid.to(1 / u.cm)
else:
wavenumber_grid /= u.cm
wavelength_grid = 1 / wavenumber_grid.to(1 / u.um)
return wavenumber_grid, wavelength_grid
def compute_opacities(self):
"""
It computes the opacities for Rayleigh scattering as described in <NAME>, ed.,
CRC Handbook of Chemistry and Physics, Internet Version 2005,
`hbcponline <http://hbcponline.com/faces/contents/ContentsSearch.xhtml>`_, CRC Press, Boca Raton, FL, 2005.
chapter 10 table 1:
.. math::
\\alpha(\\lambda) = \\frac{128 \\pi^5}{3 \\lambda^4} \\frac{a^2}{m}
where :math:`a` is depending on the atom polarizabiility listed in table 2 chapter 10 of CRC handbook,
and :math:`m` is the atom mass.
Returns
-------
astropy.units.Quantity
returns the estimated opacity sampled at the indicated wavenumbers.
"""
a = a_table[self.atom] * (1e-24 * u.cm ** 3)
alpha = 128 * np.pi ** 5 / (3 * self.wavelength_grid ** 4) * a ** 2
alpha /= self.atom_mass * u.ct
return alpha.si
def read_content(self):
'''
Reads the class content and returns the needed valued for the opacity models.
Returns
-------
str
molecule name
astropy.units.Quantity:
molecular mass
astropy.units.Quantity:
data pressure grid in si units
astropy.units.Quantity:
data temperature grid in si units
astropy.units.Quantity:
data wavenumber grid
astropy.units.Quantity:
data opacities grid in si units
'''
opac = np.ones((self.pressure_grid.size, self.temperature_grid.size, self.wavenumber_grid.size,))
opac *= u.m ** 2 / u.kg
opac[0, 0] = self.opacities
return self.atom, self.atom_mass, self.pressure_grid, self.temperature_grid, self.wavenumber_grid, opac, 'rayleigh'
# from table 2 sec 10 of CRC
a_table = {
'H': 0.666793,
'He': 0.20496,
"Li": 24.3,
"Be": 5.6,
"B": 3.03,
"C": 1.76,
"N": 1.1,
"O": 0.802,
"F": 0.557,
"Ne": 0.3956,
"Na": 24.11,
"Mg": 10.6,
"Al": 6.8,
"Si": 5.38,
"P": 3.63,
"S": 2.9,
"Cl": 2.18,
"Ar": 1.6411,
"K": 43.4,
"Ca": 22.8,
"Sc": 17.8,
"Ti": 14.6,
"V": 12.4,
"Cr": 11.6,
"Mn": 9.4,
"Fe": 8.4,
"Co": 7.5,
"Ni": 6.8,
"Cu": 6.2,
"Zn": 5.75,
"Ga": 8.12,
"Ge": 6.07,
"As": 4.31,
"Se": 3.77,
"Br": 3.05,
"Kr": 2.4844,
"Rb": 47.3,
"Sr": 27.6,
"Y": 22.7,
"Zr": 17.9,
"Nb": 15.7,
"Mo": 12.8,
"Tc": 11.4,
"Ru": 9.6,
"Rh": 8.6,
"Pd": 4.8,
"Ag": 7.2,
"Cd": 7.36,
"In": 10.2,
"Sn": 7.7,
"Sb": 6.6,
"Te": 5.5,
"I": 5.35,
"Xe": 4.044,
"Cs": 59.42,
"Ba": 39.7,
"La": 31.1,
"Ce": 29.6,
"Pr": 28.2,
"Nd": 31.4,
"Pm": 30.1,
"Sm": 28.8,
"Eu": 27.7,
"Gd": 23.5,
"Tb": 25.5,
"Dy": 24.5,
"Ho": 23.6,
"Er": 22.7,
"Tm": 21.8,
"Yb": 21,
"Lu": 21.9,
"Hf": 16.2,
"Ta": 13.1,
"W": 11.1,
"Re": 9.7,
"Os": 8.5,
"Ir": 7.6,
"Pt": 6.5,
"Au": 5.8,
"Hg": 5.02,
"Tl": 7.6,
"Pb": 6.8,
"Bi": 7.4,
"Po": 6.8,
"At": 6,
"Rn": 5.3,
"Fr": 47.1,
"Ra": 38.3,
"Ac": 32.1,
"Th": 32.1,
"Pa": 25.4,
"U": 24.9,
"Np": 24.8,
"Pu": 24.5,
"Am": 23.3,
"Cm": 23,
"Bk": 22.7,
"Cf": 20.5,
"Es": 19.7,
"Fm": 23.8,
"Md": 18.2,
"No": 17.5,
}
| [
"numpy.array",
"numpy.ones",
"molmass.Formula"
] | [((2473, 2491), 'molmass.Formula', 'Formula', (['self.atom'], {}), '(self.atom)\n', (2480, 2491), False, 'from molmass import Formula\n'), ((4394, 4488), 'numpy.ones', 'np.ones', (['(self.pressure_grid.size, self.temperature_grid.size, self.wavenumber_grid.size\n )'], {}), '((self.pressure_grid.size, self.temperature_grid.size, self.\n wavenumber_grid.size))\n', (4401, 4488), True, 'import numpy as np\n'), ((2287, 2312), 'numpy.array', 'np.array', (['[1e-10, 1e+100]'], {}), '([1e-10, 1e+100])\n', (2295, 2312), True, 'import numpy as np\n'), ((2320, 2344), 'numpy.array', 'np.array', (['[1, 1000000.0]'], {}), '([1, 1000000.0])\n', (2328, 2344), True, 'import numpy as np\n')] |
import time
import numpy as np
import pandas as pd
import lightgbm as lgb
import xgboost as xgb
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn import preprocessing
##
#%%
def deal_fea(_train_f,_test_f,_predictor_f):
gen_fea = [f for f in _predictor_f if 'SNP' in f]
int_fea = ['BMI分类','产次','孕次']
other_fea = [f for f in _predictor_f if (f not in gen_fea) and (f not in int_fea)]
other_fea.remove('RBP4')
######fill nan############
##mode fill gen_fea
# _mode_gen_fea =_train_f[gen_fea].mode()
# for i in range(len(gen_fea)):
# _train_f[gen_fea[i]].fillna(_mode_gen_fea[gen_fea[i]][0],inplace = True)
# _test_f[gen_fea[i]].fillna(_mode_gen_fea[gen_fea[i]][0],inplace = True)
#mode fill int_fea
int_fea = list(set(int_fea))
_mode_int_fea =_train_f[int_fea].mode()
for i in range(len(int_fea)):
_train_f[int_fea[i]].fillna(_mode_int_fea[int_fea[i]][0],inplace = True)
_test_f[int_fea[i]].fillna(_mode_int_fea[int_fea[i]][0],inplace = True)
##median fill other_fea
_median_other_fea =_train_f[other_fea].median()
for i in range(len(other_fea)):
_train_f[other_fea[i]].fillna(_median_other_fea[other_fea[i]],inplace = True)
_test_f[other_fea[i]].fillna(_median_other_fea[other_fea[i]],inplace = True)
####not num fea
# _train_f['SNP1|SNP2'] = _train_f['SNP1'] ^ _train_f['SNP2']
# _test_f['SNP1|SNP2'] = _test_f['SNP1'] ^ _test_f['SNP2']
gen_drop = ['SNP12','SNP26','SNP9','SNP1','SNP10']
# gen_drop = ['SNP21','SNP22','SNP23','SNP54','SNP55']
##onehot gen
# gen_fea = list(set(gen_fea)-set(gen_drop))
# gen_fea = gen_fea+['DM家族史']
# for idx in gen_fea:
# _le=LabelEncoder().fit(_train_f[idx])
# ##onehot tarin
# _label=_le.transform(_train_f[idx])
# _ohe=OneHotEncoder(sparse=False).fit(_label.reshape(-1,1))
# ohe=_ohe.transform(_label.reshape(-1,1))
# _train_f.drop([idx],axis=1,inplace=True)
# for i in range(0,ohe.shape[1]):
# _train_f[idx+'_'+str(i)] = ohe[:,i]
# ##onehot test
# _label=_le.transform(_test_f[idx])
# ohe=_ohe.transform(_label.reshape(-1,1))
# _test_f.drop([idx],axis=1,inplace=True)
# for i in range(0,ohe.shape[1]):
# _test_f[idx+'_'+str(i)] = ohe[:,i]
################add feature#########################
eps=1e-4
_train_f['ALT/AST'] = _train_f['ALT']/(_train_f['AST']+eps)
_test_f['ALT/AST'] = _test_f['ALT']/(_test_f['AST']+eps)
_train_f['HDLC/LDLC'] = _train_f['HDLC']/(_train_f['LDLC']+eps)
_test_f['HDLC/LDLC'] = _test_f['HDLC']/(_test_f['LDLC']+eps)
_train_f['CHO/TG'] = _train_f['CHO']/(_train_f['TG']+eps)
_test_f['CHO/TG'] = _test_f['CHO']/(_test_f['TG']+eps)
_train_f['CHO/Cr'] = _train_f['CHO']/(_train_f['Cr']+eps)
_test_f['CHO/Cr'] = _test_f['CHO']/(_test_f['Cr']+eps)
_train_f['ApoA1/ApoB'] = _train_f['ApoA1']/_train_f['ApoB']
_test_f['ApoA1/ApoB'] =_test_f['ApoA1']/_test_f['ApoB']
_train_f['收缩压/舒张压'] = _train_f['收缩压']/(_train_f['舒张压']+eps)
_test_f['收缩压/舒张压'] = _test_f['收缩压']/(_test_f['舒张压']+eps)
### wu da add
_train_f['年龄/HDLC'] = _train_f['年龄']/(_train_f['HDLC']+eps)
_test_f['年龄/HDLC'] = _test_f['年龄']/(_test_f['HDLC']+eps)
_train_f['AST/TG'] = _train_f['AST']/(_train_f['TG']+eps)
_test_f['AST/TG'] = _test_f['AST']/(_test_f['TG']+eps)
_train_f['wbc/CHO'] = _train_f['wbc']/(_train_f['CHO']+eps)
_test_f['wbc/CHO'] = _test_f['wbc']/(_test_f['CHO']+eps)
_train_f['VAR00007/wbc'] = _train_f['VAR00007']/(_train_f['wbc']+eps)
_test_f['VAR00007/wbc'] = _test_f['VAR00007']/(_test_f['wbc']+eps)
_train_f['TG/HDLC'] = _train_f['TG']/(_train_f['HDLC']+eps)
_test_f['TG/HDLC'] = _test_f['TG']/(_test_f['HDLC']+eps)
_train_f['VAR00007/ApoA1'] = _train_f['VAR00007']/(_train_f['ApoA1']+eps)
_test_f['VAR00007/ApoA1'] = _test_f['VAR00007']/(_test_f['ApoA1']+eps)
_train_f['VAR00007*孕前BMI'] = _train_f['VAR00007']*(_train_f['孕前BMI']+eps)
_test_f['VAR00007*孕前BMI'] = _test_f['VAR00007']*(_test_f['孕前BMI']+eps)
##############################
_train_f['VAR00007log年龄'] = np.log10(_train_f['VAR00007'])*np.log10(_train_f['年龄']/(1e-5))
_test_f['VAR00007log年龄'] =np.log10(_test_f['VAR00007'])*np.log10(_test_f['年龄']/(1e-5))
# _train_f['ApoA1/年龄'] = _train_f['ApoA1']/_train_f['年龄']
# _test_f['ApoA1/年龄'] =_test_f['ApoA1']/_test_f['年龄']
#
_train_f['体脂率'] = 1.2*_train_f['孕前BMI']+0.23*_train_f['年龄']-5.4
_test_f['体脂率'] = 1.2*_test_f['孕前BMI']+0.23*_test_f['年龄']-5.4
_train_f['VAR00007*孕前BMI'] = _train_f['VAR00007']*(_train_f['孕前BMI']+eps)
_test_f['VAR00007*孕前BMI'] = _test_f['VAR00007']*(_test_f['孕前BMI']+eps)
add_fea = ['ALT/AST','HDLC/LDLC','CHO/TG','CHO/Cr',\
'ApoA1/ApoB',\
'体脂率','收缩压/舒张压','VAR00007log年龄','年龄/HDLC',\
'AST/TG','wbc/CHO','VAR00007/wbc','TG/HDLC','VAR00007/ApoA1','VAR00007*孕前BMI']
minmax_fea = other_fea+add_fea
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(_train_f[minmax_fea])
_train_f[minmax_fea]=min_max_scaler.transform(_train_f[minmax_fea])
_test_f[minmax_fea]=min_max_scaler.transform(_test_f[minmax_fea])
###########drop spme useless##########
_train_f.drop(['BMI分类'],axis = 1,inplace = True)
_test_f.drop(['BMI分类'],axis = 1,inplace = True)
_train_f.drop(['产次'],axis = 1,inplace = True)
_test_f.drop(['产次'],axis = 1,inplace = True)
_train_f.drop(['身高'],axis = 1,inplace = True)
_test_f.drop(['身高'],axis = 1,inplace = True)
_train_f.drop(['孕前体重'],axis = 1,inplace = True)
_test_f.drop(['孕前体重'],axis = 1,inplace = True)
_train_f.drop(['糖筛孕周'],axis = 1,inplace = True)
_test_f.drop(['糖筛孕周'],axis = 1,inplace = True)
# _train_f.drop(['孕次'],axis = 1,inplace = True)
# _test_f.drop(['孕次'],axis = 1,inplace = True)
# _train_f.drop(['RBP4'],axis = 1,inplace = True)
# _test_f.drop(['RBP4'],axis = 1,inplace = True)
###gen drop
_train_f.drop(gen_drop,axis = 1,inplace = True)
_test_f.drop(gen_drop,axis = 1,inplace = True)
###########drop spme useless##########
_train_f.drop(['id'],axis = 1,inplace = True)
_test_f.drop(['id'],axis = 1,inplace = True)
return _train_f,_test_f
#%%
orig_train = pd.read_csv('../data/d_train_20180307.csv',encoding='gb2312')
orig_test = pd.read_csv('../data/f_test_b_20180305.csv',encoding='gb2312')
predictor = [f for f in orig_train.columns if f not in ['label']]
train,test = deal_fea(orig_train.copy(),orig_test.copy(),predictor)
predictor = [f for f in train.columns if f not in ['label']]
#train.to_csv('off_train_fea.csv',index=False)
#test.to_csv('off_test_fea.csv',index=False)
#%%
print('开始训练...')
predictor = [f for f in train.columns if f not in ['label']]
xgb_params={'booster':'gbtree',
'objective': 'binary:logistic',
'max_depth':8,
'lambda':10,
'subsample':0.6,
'colsample_bytree':0.6,
'eta': 0.05, #0.01
'seed':1024,
'nthread':12,
'n_estimators':70,
'min_child_weight':5,
'gamma':0.1
}
#cv_params= {'n_estimators': np.array(range(10,200,20)), \
# 'eta': [0.001,0.01,0.1,0.2],\
# 'max_depth': [6,7,8,9,10],\
# 'lambda': [10,30,60,100],\
# 'gamma':[0.1,0.3,0.5,0.8]
# }
#score = 'f1'
#optimized_GBM = GridSearchCV(xgb.XGBClassifier(**xgb_params),cv_params,cv=5, verbose=2,scoring=score)
#
#optimized_GBM.fit(train[predictor], train['label'])
#optimized_GBM.best_params_
#%%
def f1_error(preds,df):
label = df.get_label()
# preds = 1.0/(1.0+np.exp(-preds))
pred = preds.copy()
pred[preds>=0.5]=1
pred[preds<0.5]=0
tp = sum([int (i==1 and j==1) for i,j in zip(pred,label)])
precision = float (tp)/sum(pred==1)
recall = float(tp)/sum(label==1)
return 'f1',2*(precision*recall/(precision+recall))
def check_f1(preds,gt):
label = gt.copy()
pred = preds.copy()
pred[preds>=0.5]=1
pred[preds<0.5]=0
tp = sum([int (i==1 and j==1) for i,j in zip(pred,label)])
precision = float (tp)/sum(pred==1)
recall = float(tp)/sum(label==1)
return 2*(precision*recall/(precision+recall))
print('开始CV 5折训练...')
t0 = time.time()
every_loop_num=5
loop_num = 1
train_preds = np.zeros((train.shape[0],loop_num))
scores = np.zeros(loop_num)
test_preds = np.zeros((test.shape[0],every_loop_num*loop_num))
for loop in range(loop_num):
kf = StratifiedKFold(train.label,n_folds = every_loop_num, shuffle=True, random_state=520)
for i, (train_index, test_index) in enumerate(kf):
print('第{}-{}次训练...'.format(loop,i))
train_feat1 = train.iloc[train_index].copy()
train_feat2 = train.iloc[test_index].copy()
xgb_train1 = xgb.DMatrix(train_feat1[predictor], train_feat1['label'])
xgb_train2 = xgb.DMatrix(train_feat2[predictor], train_feat2['label'] )
watchlist = [(xgb_train1,'train'),(xgb_train2,'test')]
cv_log = xgb.cv(xgb_params,xgb_train1,num_boost_round=25000,nfold=5,feval=f1_error,\
early_stopping_rounds=50,seed=1024,maximize=True)
bst_auc= cv_log['test-f1-mean'].max()
cv_log['best'] = cv_log.index
cv_log.index = cv_log['test-f1-mean']
bst_nb = cv_log.best.to_dict()[bst_auc]
#train bst_nb+50
watchlist = [(xgb_train1,'train')]
xgb_model = xgb.train(xgb_params,xgb_train1,num_boost_round=bst_nb+50,evals=watchlist)
xgb_cv_test= xgb.DMatrix(train_feat2[predictor])
xgb_pre = xgb_model.predict(xgb_cv_test)
train_preds[test_index,loop] = xgb_pre
xgb_Dmatrix= xgb.DMatrix(test[predictor])
xgb_pre_test = xgb_model.predict(xgb_Dmatrix)
test_preds[:,i+loop*every_loop_num] = xgb_pre_test
print('线下train得分: {}'.format(check_f1(train_preds[:,loop],train['label'])))
scores[loop]=check_f1(train_preds[:,loop],train['label'])
#%%
submission = pd.DataFrame({'pred':test_preds.mean(axis=1)})
pre = submission['pred']
print('线下train_avg得分: {}'.format(np.mean(scores)))
pre.to_csv('../data/xgb_gbdt_f1.csv',index=False, float_format='%f') | [
"numpy.mean",
"numpy.log10",
"pandas.read_csv",
"xgboost.train",
"xgboost.cv",
"sklearn.cross_validation.StratifiedKFold",
"numpy.zeros",
"xgboost.DMatrix",
"time.time",
"sklearn.preprocessing.MinMaxScaler"
] | [((6652, 6714), 'pandas.read_csv', 'pd.read_csv', (['"""../data/d_train_20180307.csv"""'], {'encoding': '"""gb2312"""'}), "('../data/d_train_20180307.csv', encoding='gb2312')\n", (6663, 6714), True, 'import pandas as pd\n'), ((6726, 6789), 'pandas.read_csv', 'pd.read_csv', (['"""../data/f_test_b_20180305.csv"""'], {'encoding': '"""gb2312"""'}), "('../data/f_test_b_20180305.csv', encoding='gb2312')\n", (6737, 6789), True, 'import pandas as pd\n'), ((8643, 8654), 'time.time', 'time.time', ([], {}), '()\n', (8652, 8654), False, 'import time\n'), ((8699, 8735), 'numpy.zeros', 'np.zeros', (['(train.shape[0], loop_num)'], {}), '((train.shape[0], loop_num))\n', (8707, 8735), True, 'import numpy as np\n'), ((8745, 8763), 'numpy.zeros', 'np.zeros', (['loop_num'], {}), '(loop_num)\n', (8753, 8763), True, 'import numpy as np\n'), ((8777, 8829), 'numpy.zeros', 'np.zeros', (['(test.shape[0], every_loop_num * loop_num)'], {}), '((test.shape[0], every_loop_num * loop_num))\n', (8785, 8829), True, 'import numpy as np\n'), ((5332, 5360), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (5358, 5360), False, 'from sklearn import preprocessing\n'), ((8866, 8954), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (['train.label'], {'n_folds': 'every_loop_num', 'shuffle': '(True)', 'random_state': '(520)'}), '(train.label, n_folds=every_loop_num, shuffle=True,\n random_state=520)\n', (8881, 8954), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((4461, 4491), 'numpy.log10', 'np.log10', (["_train_f['VAR00007']"], {}), "(_train_f['VAR00007'])\n", (4469, 4491), True, 'import numpy as np\n'), ((4492, 4524), 'numpy.log10', 'np.log10', (["(_train_f['年龄'] / 1e-05)"], {}), "(_train_f['年龄'] / 1e-05)\n", (4500, 4524), True, 'import numpy as np\n'), ((4554, 4583), 'numpy.log10', 'np.log10', (["_test_f['VAR00007']"], {}), "(_test_f['VAR00007'])\n", (4562, 4583), True, 'import numpy as np\n'), ((4584, 4615), 'numpy.log10', 'np.log10', (["(_test_f['年龄'] / 1e-05)"], {}), "(_test_f['年龄'] / 1e-05)\n", (4592, 4615), True, 'import numpy as np\n'), ((9187, 9244), 'xgboost.DMatrix', 'xgb.DMatrix', (['train_feat1[predictor]', "train_feat1['label']"], {}), "(train_feat1[predictor], train_feat1['label'])\n", (9198, 9244), True, 'import xgboost as xgb\n'), ((9266, 9323), 'xgboost.DMatrix', 'xgb.DMatrix', (['train_feat2[predictor]', "train_feat2['label']"], {}), "(train_feat2[predictor], train_feat2['label'])\n", (9277, 9323), True, 'import xgboost as xgb\n'), ((9406, 9541), 'xgboost.cv', 'xgb.cv', (['xgb_params', 'xgb_train1'], {'num_boost_round': '(25000)', 'nfold': '(5)', 'feval': 'f1_error', 'early_stopping_rounds': '(50)', 'seed': '(1024)', 'maximize': '(True)'}), '(xgb_params, xgb_train1, num_boost_round=25000, nfold=5, feval=\n f1_error, early_stopping_rounds=50, seed=1024, maximize=True)\n', (9412, 9541), True, 'import xgboost as xgb\n'), ((9828, 9907), 'xgboost.train', 'xgb.train', (['xgb_params', 'xgb_train1'], {'num_boost_round': '(bst_nb + 50)', 'evals': 'watchlist'}), '(xgb_params, xgb_train1, num_boost_round=bst_nb + 50, evals=watchlist)\n', (9837, 9907), True, 'import xgboost as xgb\n'), ((9931, 9966), 'xgboost.DMatrix', 'xgb.DMatrix', (['train_feat2[predictor]'], {}), '(train_feat2[predictor])\n', (9942, 9966), True, 'import xgboost as xgb\n'), ((10120, 10148), 'xgboost.DMatrix', 'xgb.DMatrix', (['test[predictor]'], {}), '(test[predictor])\n', (10131, 10148), True, 'import xgboost as xgb\n'), ((10555, 10570), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10562, 10570), True, 'import numpy as np\n')] |
"""Correlograms View: show auto- and cross- correlograms of the clusters."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import numpy as np
import numpy.random as rdn
from galry import (Manager, PlotPaintManager, PlotInteractionManager, Visual,
GalryWidget, QtGui, QtCore, QtOpenGL, enforce_dtype, RectanglesVisual,
TextVisual, PlotVisual, AxesVisual)
from klustaviewa.stats.cache import IndexedMatrix
from kwiklib.dataio.tools import get_array
from kwiklib.utils.colors import COLORMAP
from klustaviewa.views.common import HighlightManager, KlustaViewaBindings, KlustaView
# -----------------------------------------------------------------------------
# Shaders
# -----------------------------------------------------------------------------
VERTEX_SHADER = """
float margin = 0.05;
float a = 1.0 / (nclusters * (1 + 2 * margin));
vec2 box_position = vec2(0, 0);
box_position.x = -1 + a * (1 + 2 * margin) * (2 * cluster.x + 1);
box_position.y = -1 + a * (1 + 2 * margin) * (2 * cluster.y + 1);
vec2 transformed_position = position;
transformed_position.y = 2 * transformed_position.y - 1;
transformed_position = box_position + a * transformed_position;
"""
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def get_histogram_points(hist):
"""Tesselates correlograms.
Arguments:
* hist: a N x Nsamples array, where each line contains an histogram.
Returns:
* X, Y: two N x (5*Nsamples+1) arrays with the coordinates of the
correlograms, a
"""
if hist.size == 0:
return np.array([[]]), np.array([[]])
n, nsamples = hist.shape
dx = 2. / nsamples
x0 = -1 + dx * np.arange(nsamples)
x = np.zeros((n, 5 * nsamples + 1))
# y = -np.ones((n, 5 * nsamples + 1))
y = np.zeros((n, 5 * nsamples + 1))
x[:,0:-1:5] = x0
x[:,1::5] = x0
x[:,2::5] = x0 + dx
x[:,3::5] = x0
x[:,4::5] = x0 + dx
x[:,-1] = 1
y[:,1::5] = hist
y[:,2::5] = hist
return x, y
# -----------------------------------------------------------------------------
# Data manager
# -----------------------------------------------------------------------------
class CorrelogramsDataManager(Manager):
def set_data(self, correlograms=None, cluster_colors=None, baselines=None,
clusters_selected=None, ncorrbins=None, corrbin=None,
keep_order=None,
normalization='row'):
if correlograms is None:
correlograms = IndexedMatrix(shape=(0, 0, 0))
baselines = np.zeros(0)
cluster_colors = np.zeros(0)
clusters_selected = []
ncorrbins = 0
corrbin = 0
self.keep_order = keep_order
# self.correlograms_array = get_correlograms_array(correlograms,
# clusters_selected=clusters_selected, ncorrbins=ncorrbins)
self.correlograms = correlograms
# Copy the original arrays for normalization.
self.baselines = baselines
self.baselines0 = baselines.copy()
self.correlograms_array = correlograms.to_array()
self.correlograms_array0 = self.correlograms_array.copy()
nclusters, nclusters, self.nbins = self.correlograms_array.shape
self.ncorrelograms = nclusters * nclusters
self.nticks = (ncorrbins + 1) * self.ncorrelograms
self.ncorrbins = ncorrbins
self.corrbin = corrbin
self.clusters_selected = np.array(clusters_selected, dtype=np.int32)
self.clusters_unique = np.array(sorted(clusters_selected), dtype=np.int32)
self.nclusters = len(clusters_selected)
assert nclusters == self.nclusters
self.cluster_colors = cluster_colors
self.cluster_colors_array = get_array(cluster_colors, dosort=True)
if keep_order:
self.permutation = np.argsort(clusters_selected)
else:
self.permutation = np.arange(self.nclusters)
self.cluster_colors_array_ordered = self.cluster_colors_array[self.permutation]
# HACK: if correlograms is empty, ncorrelograms == 1 here!
if self.correlograms_array.size == 0:
self.ncorrelograms = 0
# cluster i and j for each histogram in the view
clusters = [(i,j) for j in self.permutation
for i in self.permutation]
self.clusters = np.array(clusters, dtype=np.int32)
self.clusters0 = self.clusters
# baselines of the correlograms
self.nprimitives = self.ncorrelograms
# index 0 = heterogeneous clusters, index>0 ==> cluster index + 1
# self.cluster_colors = get_array(cluster_colors)
# normalize and update the data position
self.normalize(normalization)
# indices of correlograms on the diagonal
if self.nclusters:
identity = self.clusters[:,0] == self.clusters[:,1]
else:
identity = []
color_array_index = np.zeros(self.ncorrelograms, dtype=np.int32)
color_array_index[identity] = np.array(self.cluster_colors_array + 1,
dtype=np.int32)
# very first color in color map = white (cross-correlograms)
self.color = np.vstack((np.ones((1, 3)), COLORMAP))
self.color_array_index = color_array_index
self.clusters = np.repeat(self.clusters, self.nsamples, axis=0)
self.color_array_index = np.repeat(self.color_array_index,
self.nsamples, axis=0)
def normalize(self, normalization='row'):
self.correlograms_array = self.correlograms_array0.copy()
self.baselines = self.baselines0.copy()
if normalization == 'row':
# normalization
for i in range(self.nclusters):
# divide all correlograms in the row by the max of this histogram
correlogram_diagonal = self.correlograms_array[i, i, ...]
m = correlogram_diagonal.max()
if m > 0:
self.correlograms_array[i,:,:] /= m
self.baselines[i,:] /= m
# normalize all correlograms in the row so that they all fit in the
# window
m = self.correlograms_array[i,:,:].max()
if m > 0:
self.correlograms_array[i,:,:] /= m
self.baselines[i,:] /= m
elif normalization == 'uniform':
M = self.correlograms_array.max(axis=2)
self.correlograms_array /= M.reshape(
(self.nclusters, self.nclusters, 1))
self.baselines /= M
# get the vertex positions
X, Y = get_histogram_points(self.correlograms_array.reshape(
(self.ncorrelograms, self.nbins)))
n = X.size
self.nsamples = X.shape[1]
# fill the data array
self.position = np.empty((n, 2), dtype=np.float32)
self.position[:,0] = X.ravel()
self.position[:,1] = Y.ravel()
# -----------------------------------------------------------------------------
# Visuals
# -----------------------------------------------------------------------------
class CorrelogramsVisual(PlotVisual):
def initialize(self, nclusters=None, ncorrelograms=None, #nsamples=None,
ncorrbins=None, corrbin=None,
position=None, color=None, color_array_index=None, clusters=None):
self.position_attribute_name = "transformed_position"
super(CorrelogramsVisual, self).initialize(
position=position,
nprimitives=ncorrelograms,
color=color,
color_array_index=color_array_index,
autonormalizable=False,
)
self.primitive_type = 'TRIANGLE_STRIP'
self.add_attribute("cluster", vartype="int", ndim=2, data=clusters)
self.add_uniform("nclusters", vartype="int", ndim=1, data=nclusters)
self.add_vertex_main(VERTEX_SHADER)
class CorrelogramsBaselineVisual(PlotVisual):
def initialize(self, nclusters=None, baselines=None, clusters=None,
corrbin=None):
self.position_attribute_name = "transformed_position"
if baselines is None:
baselines = np.zeros((nclusters, nclusters))
baselines = baselines.ravel()
n = len(baselines)
position = np.zeros((2 * n, 2))
position[:,0] = np.tile(np.array([-1., 1.]), n)
position[:,1] = np.repeat(baselines, 2)
position = np.array(position, dtype=np.float32)
clusters = np.repeat(clusters, 2, axis=0)
super(CorrelogramsBaselineVisual, self).initialize(
position=position,
nprimitives=n,
color=(.25, .25, .25, 1.),
autonormalizable=False,
)
self.primitive_type = 'LINES'
self.add_attribute("cluster", vartype="int", ndim=2, data=clusters)
self.add_uniform("nclusters", vartype="int", ndim=1, data=nclusters)
self.add_vertex_main(VERTEX_SHADER)
class CorrelogramsTicksVisual(PlotVisual):
def initialize(self, ncorrbins=None, corrbin=None, ncorrelograms=None,
clusters=None, nclusters=None):
if ncorrbins is None:
ncorrbins = 0
if corrbin is None:
corrbin = 0
ncorrbins += 1
self.position_attribute_name = "transformed_position"
nticks = ncorrbins * ncorrelograms
# n = 2 * nticks
position = np.zeros((2 * ncorrbins, 2))
position[:, 0] = np.repeat(np.linspace(-1., 1., ncorrbins), 2)
position[1::2, 1] = 0.05
position = np.array(position, dtype=np.float32)
clusters = np.repeat(clusters, 2 * ncorrbins, axis=0)
color = .25 * np.ones((ncorrbins, 4))
if ncorrbins % 2 == 1:
color[ncorrbins // 2, 3] = .85
position[ncorrbins, 1] = 1
else:
color[ncorrbins // 2, 3] = .85
color[ncorrbins // 2 - 1, 3] = .85
position[ncorrbins, 1] = 1
position[ncorrbins, 1] = 1
color = np.repeat(color, 2, axis=0)
color = np.tile(color, (ncorrelograms, 1))
position = np.tile(position, (ncorrelograms, 1))
super(CorrelogramsTicksVisual, self).initialize(
position=position,
nprimitives=nticks,
color=color,
autonormalizable=False,
)
self.primitive_type = 'LINES'
self.add_attribute("cluster", vartype="int", ndim=2, data=clusters)
self.add_uniform("nclusters", vartype="int", ndim=1, data=nclusters)
self.add_vertex_main(VERTEX_SHADER)
class CorrelogramsPaintManager(PlotPaintManager):
def initialize(self, **kwargs):
self.add_visual(CorrelogramsVisual,
nclusters=self.data_manager.nclusters,
ncorrelograms=self.data_manager.ncorrelograms,
position=self.data_manager.position,
color=self.data_manager.color,
color_array_index=self.data_manager.color_array_index,
clusters=self.data_manager.clusters,
ncorrbins=self.data_manager.ncorrbins,
corrbin=self.data_manager.corrbin,
name='correlograms')
self.add_visual(CorrelogramsBaselineVisual,
baselines=self.data_manager.baselines,
nclusters=self.data_manager.nclusters,
clusters=self.data_manager.clusters0,
name='baselines',
)
self.add_visual(CorrelogramsTicksVisual,
ncorrbins=self.data_manager.ncorrbins,
corrbin=self.data_manager.corrbin,
nclusters=self.data_manager.nclusters,
ncorrelograms=self.data_manager.ncorrelograms,
clusters=self.data_manager.clusters0,
name='ticks',
)
self.add_visual(TextVisual, text='0', name='clusterinfo', fontsize=16,
# posoffset=(50., -50.),
coordinates=(1., -1.),
posoffset=(-80., 30.),
is_static=True,
color=(1., 1., 1., 1.),
background_transparent=False,
letter_spacing=350.,
depth=-1,
visible=False)
def update(self):
self.reinitialize_visual(
# size=self.data_manager.position.shape[0],
nclusters=self.data_manager.nclusters,
ncorrelograms=self.data_manager.ncorrelograms,
position=self.data_manager.position,
color=self.data_manager.color,
color_array_index=self.data_manager.color_array_index,
clusters=self.data_manager.clusters,
ncorrbins=self.data_manager.ncorrbins,
corrbin=self.data_manager.corrbin,
visual='correlograms')
self.reinitialize_visual(
# size=2 * self.data_manager.baselines.size,
baselines=self.data_manager.baselines,
nclusters=self.data_manager.nclusters,
clusters=self.data_manager.clusters0,
visual='baselines')
self.reinitialize_visual(
# size=2 * self.data_manager.nticks,
ncorrbins=self.data_manager.ncorrbins,
corrbin=self.data_manager.corrbin,
nclusters=self.data_manager.nclusters,
ncorrelograms=self.data_manager.ncorrelograms,
clusters=self.data_manager.clusters0,
visual='ticks')
# -----------------------------------------------------------------------------
# Interaction
# -----------------------------------------------------------------------------
class CorrelogramsInfoManager(Manager):
def initialize(self):
pass
def show_closest_cluster(self, xd, yd):
margin = 0.05
a = 1.0 / (self.data_manager.nclusters * (1 + 2 * margin))
cx = int(((xd + 1) / (a * (1 + 2 * margin)) - 1) / 2 + .5)
cy = int(((yd + 1) / (a * (1 + 2 * margin)) - 1) / 2 + .5)
cx_rel = np.clip(cx, 0, self.data_manager.nclusters - 1)
cy_rel = np.clip(cy, 0, self.data_manager.nclusters - 1)
color1 = self.data_manager.cluster_colors_array_ordered[cy_rel]
r, g, b = COLORMAP[color1,:]
color1 = (r, g, b, .75)
cx = self.data_manager.clusters_unique[self.data_manager.permutation][cx_rel]
cy = self.data_manager.clusters_unique[self.data_manager.permutation][cy_rel]
text = "%d / %d" % (cx, cy)
self.paint_manager.set_data(#coordinates=(xd, yd), #color=color1,
text=text,
visible=True,
visual='clusterinfo')
class CorrelogramsInteractionManager(PlotInteractionManager):
def initialize(self):
self.normalization_index = 0
self.normalization_list = ['row', 'uniform']
self.register('ShowClosestCluster', self.show_closest_cluster)
self.register('ChangeNormalization', self.change_normalization)
self.register(None, self.hide_closest_cluster)
def hide_closest_cluster(self, parameter):
self.paint_manager.set_data(visible=False, visual='clusterinfo')
def show_closest_cluster(self, parameter):
if self.data_manager.nclusters == 0:
return
self.cursor = None
nav = self.get_processor('navigation')
# window coordinates
x, y = parameter
# data coordinates
xd, yd = nav.get_data_coordinates(x, y)
self.info_manager.show_closest_cluster(xd, yd)
def change_normalization(self, normalization=None):
if normalization is None:
self.normalization_index = np.mod(self.normalization_index + 1,
len(self.normalization_list))
normalization = self.normalization_list[self.normalization_index]
self.data_manager.normalize(normalization)
self.paint_manager.update()
self.parent.updateGL()
class CorrelogramsBindings(KlustaViewaBindings):
def set_normalization(self):
self.set('KeyPress', 'ChangeNormalization', key='N')
def set_clusterinfo(self):
self.set('Move', 'ShowClosestCluster', #key_modifier='Shift',
param_getter=lambda p:
(p['mouse_position'][0], p['mouse_position'][1]))
def initialize(self):
super(CorrelogramsBindings, self).initialize()
self.set_clusterinfo()
self.set_normalization()
# -----------------------------------------------------------------------------
# Top-level widget
# -----------------------------------------------------------------------------
class CorrelogramsView(KlustaView):
def __init__(self, *args, **kwargs):
# Activate antialiasing.
format = QtOpenGL.QGLFormat()
format.setSampleBuffers(True)
kwargs['format'] = format
super(CorrelogramsView, self).__init__(**kwargs)
def initialize(self):
self.set_bindings(CorrelogramsBindings)
self.set_companion_classes(paint_manager=CorrelogramsPaintManager,
interaction_manager=CorrelogramsInteractionManager,
info_manager=CorrelogramsInfoManager,
data_manager=CorrelogramsDataManager,)
def set_data(self, *args, **kwargs):
kwargs['normalization'] = self.interaction_manager.normalization_list[
self.interaction_manager.normalization_index]
self.data_manager.set_data(*args, **kwargs)
# update?
if self.initialized:
self.paint_manager.set_data(visible=True, visual='correlograms')
self.paint_manager.set_data(visible=True, visual='baselines')
self.paint_manager.set_data(visible=True, visual='ticks')
self.paint_manager.update()
self.updateGL()
def clear(self):
self.paint_manager.set_data(visible=False, visual='correlograms')
self.paint_manager.set_data(visible=False, visual='baselines')
self.paint_manager.set_data(visible=False, visual='ticks')
def change_normalization(self, normalization=None):
self.interaction_manager.change_normalization(normalization)
| [
"numpy.clip",
"kwiklib.dataio.tools.get_array",
"numpy.tile",
"numpy.repeat",
"numpy.ones",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"klustaviewa.stats.cache.IndexedMatrix",
"numpy.arange",
"galry.QtOpenGL.QGLFormat"
] | [((1994, 2025), 'numpy.zeros', 'np.zeros', (['(n, 5 * nsamples + 1)'], {}), '((n, 5 * nsamples + 1))\n', (2002, 2025), True, 'import numpy as np\n'), ((2076, 2107), 'numpy.zeros', 'np.zeros', (['(n, 5 * nsamples + 1)'], {}), '((n, 5 * nsamples + 1))\n', (2084, 2107), True, 'import numpy as np\n'), ((3813, 3856), 'numpy.array', 'np.array', (['clusters_selected'], {'dtype': 'np.int32'}), '(clusters_selected, dtype=np.int32)\n', (3821, 3856), True, 'import numpy as np\n'), ((4112, 4150), 'kwiklib.dataio.tools.get_array', 'get_array', (['cluster_colors'], {'dosort': '(True)'}), '(cluster_colors, dosort=True)\n', (4121, 4150), False, 'from kwiklib.dataio.tools import get_array\n'), ((4757, 4791), 'numpy.array', 'np.array', (['clusters'], {'dtype': 'np.int32'}), '(clusters, dtype=np.int32)\n', (4765, 4791), True, 'import numpy as np\n'), ((5390, 5434), 'numpy.zeros', 'np.zeros', (['self.ncorrelograms'], {'dtype': 'np.int32'}), '(self.ncorrelograms, dtype=np.int32)\n', (5398, 5434), True, 'import numpy as np\n'), ((5482, 5537), 'numpy.array', 'np.array', (['(self.cluster_colors_array + 1)'], {'dtype': 'np.int32'}), '(self.cluster_colors_array + 1, dtype=np.int32)\n', (5490, 5537), True, 'import numpy as np\n'), ((5764, 5811), 'numpy.repeat', 'np.repeat', (['self.clusters', 'self.nsamples'], {'axis': '(0)'}), '(self.clusters, self.nsamples, axis=0)\n', (5773, 5811), True, 'import numpy as np\n'), ((5845, 5901), 'numpy.repeat', 'np.repeat', (['self.color_array_index', 'self.nsamples'], {'axis': '(0)'}), '(self.color_array_index, self.nsamples, axis=0)\n', (5854, 5901), True, 'import numpy as np\n'), ((7315, 7349), 'numpy.empty', 'np.empty', (['(n, 2)'], {'dtype': 'np.float32'}), '((n, 2), dtype=np.float32)\n', (7323, 7349), True, 'import numpy as np\n'), ((8862, 8882), 'numpy.zeros', 'np.zeros', (['(2 * n, 2)'], {}), '((2 * n, 2))\n', (8870, 8882), True, 'import numpy as np\n'), ((8963, 8986), 'numpy.repeat', 'np.repeat', (['baselines', '(2)'], {}), '(baselines, 2)\n', (8972, 8986), True, 'import numpy as np\n'), ((9006, 9042), 'numpy.array', 'np.array', (['position'], {'dtype': 'np.float32'}), '(position, dtype=np.float32)\n', (9014, 9042), True, 'import numpy as np\n'), ((9071, 9101), 'numpy.repeat', 'np.repeat', (['clusters', '(2)'], {'axis': '(0)'}), '(clusters, 2, axis=0)\n', (9080, 9101), True, 'import numpy as np\n'), ((10076, 10104), 'numpy.zeros', 'np.zeros', (['(2 * ncorrbins, 2)'], {}), '((2 * ncorrbins, 2))\n', (10084, 10104), True, 'import numpy as np\n'), ((10228, 10264), 'numpy.array', 'np.array', (['position'], {'dtype': 'np.float32'}), '(position, dtype=np.float32)\n', (10236, 10264), True, 'import numpy as np\n'), ((10293, 10335), 'numpy.repeat', 'np.repeat', (['clusters', '(2 * ncorrbins)'], {'axis': '(0)'}), '(clusters, 2 * ncorrbins, axis=0)\n', (10302, 10335), True, 'import numpy as np\n'), ((10715, 10742), 'numpy.repeat', 'np.repeat', (['color', '(2)'], {'axis': '(0)'}), '(color, 2, axis=0)\n', (10724, 10742), True, 'import numpy as np\n'), ((10759, 10793), 'numpy.tile', 'np.tile', (['color', '(ncorrelograms, 1)'], {}), '(color, (ncorrelograms, 1))\n', (10766, 10793), True, 'import numpy as np\n'), ((10813, 10850), 'numpy.tile', 'np.tile', (['position', '(ncorrelograms, 1)'], {}), '(position, (ncorrelograms, 1))\n', (10820, 10850), True, 'import numpy as np\n'), ((14755, 14802), 'numpy.clip', 'np.clip', (['cx', '(0)', '(self.data_manager.nclusters - 1)'], {}), '(cx, 0, self.data_manager.nclusters - 1)\n', (14762, 14802), True, 'import numpy as np\n'), ((14820, 14867), 'numpy.clip', 'np.clip', (['cy', '(0)', '(self.data_manager.nclusters - 1)'], {}), '(cy, 0, self.data_manager.nclusters - 1)\n', (14827, 14867), True, 'import numpy as np\n'), ((17600, 17620), 'galry.QtOpenGL.QGLFormat', 'QtOpenGL.QGLFormat', ([], {}), '()\n', (17618, 17620), False, 'from galry import Manager, PlotPaintManager, PlotInteractionManager, Visual, GalryWidget, QtGui, QtCore, QtOpenGL, enforce_dtype, RectanglesVisual, TextVisual, PlotVisual, AxesVisual\n'), ((1854, 1868), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (1862, 1868), True, 'import numpy as np\n'), ((1870, 1884), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (1878, 1884), True, 'import numpy as np\n'), ((1961, 1980), 'numpy.arange', 'np.arange', (['nsamples'], {}), '(nsamples)\n', (1970, 1980), True, 'import numpy as np\n'), ((2790, 2820), 'klustaviewa.stats.cache.IndexedMatrix', 'IndexedMatrix', ([], {'shape': '(0, 0, 0)'}), '(shape=(0, 0, 0))\n', (2803, 2820), False, 'from klustaviewa.stats.cache import IndexedMatrix\n'), ((2845, 2856), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2853, 2856), True, 'import numpy as np\n'), ((2886, 2897), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2894, 2897), True, 'import numpy as np\n'), ((4214, 4243), 'numpy.argsort', 'np.argsort', (['clusters_selected'], {}), '(clusters_selected)\n', (4224, 4243), True, 'import numpy as np\n'), ((4289, 4314), 'numpy.arange', 'np.arange', (['self.nclusters'], {}), '(self.nclusters)\n', (4298, 4314), True, 'import numpy as np\n'), ((8727, 8759), 'numpy.zeros', 'np.zeros', (['(nclusters, nclusters)'], {}), '((nclusters, nclusters))\n', (8735, 8759), True, 'import numpy as np\n'), ((8915, 8936), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (8923, 8936), True, 'import numpy as np\n'), ((10140, 10173), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', 'ncorrbins'], {}), '(-1.0, 1.0, ncorrbins)\n', (10151, 10173), True, 'import numpy as np\n'), ((10367, 10390), 'numpy.ones', 'np.ones', (['(ncorrbins, 4)'], {}), '((ncorrbins, 4))\n', (10374, 10390), True, 'import numpy as np\n'), ((5652, 5667), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (5659, 5667), True, 'import numpy as np\n')] |
import logging
import numpy as np
from pandas.api.types import is_numeric_dtype
from scipy.interpolate import griddata
from scipy.stats import multivariate_normal
# Some useful functions
# Root mean square of a matrix
def RMS(x):
return np.sqrt(np.sum(np.mean(np.square(x), axis=0)))
# Log likelihood of a matrix given a mean and variance of same shape
def log_multivariate_normal_likelihood(x, mean, var):
assert x.shape == mean.shape, 'Data and mean do not have the same shape'
log_likelihood_array = np.zeros((x.shape[0], 1))
for idx, xi in enumerate(x):
if reshape_pt1_tonormal(var[idx]).shape[0] == 1:
covar = float(reshape_pt1_tonormal(var[idx]))
else:
covar = reshape_pt1_tonormal(var[idx])
if np.array(covar).all() == 0:
covar = 1e-8 * np.ones_like(covar)
log_likelihood_array[idx] = reshape_pt1(
multivariate_normal.logpdf(xi, mean=mean[idx], cov=covar))
log_likelihood = float(np.mean(log_likelihood_array, axis=0))
return log_likelihood
# Reshape any vector of (length,) object to (length, 1) (possibly several
# points but of dimension 1)
def reshape_dim1(x, verbose=False):
if np.isscalar(x) or np.array(x).ndim == 0:
x = np.reshape(x, (1, 1))
else:
x = np.array(x)
if len(x.shape) == 1:
x = np.reshape(x, (x.shape[0], 1))
if verbose:
print(x.shape)
return x
# Reshape any vector of (length,) object to (1, length) (single point of
# certain dimension)
def reshape_pt1(x, verbose=False):
if np.isscalar(x) or np.array(x).ndim == 0:
x = np.reshape(x, (1, 1))
else:
x = np.array(x)
if len(x.shape) == 1:
x = np.reshape(x, (1, x.shape[0]))
if verbose:
print(x.shape)
return x
# Reshape any point of type (1, length) to (length,)
def reshape_pt1_tonormal(x, verbose=False):
if np.isscalar(x) or np.array(x).ndim == 0:
x = np.reshape(x, (1,))
elif len(x.shape) == 1:
x = np.reshape(x, (x.shape[0],))
elif x.shape[0] == 1:
x = np.reshape(x, (x.shape[1],))
if verbose:
print(x.shape)
return x
# Reshape any vector of type (length, 1) to (length,)
def reshape_dim1_tonormal(x, verbose=False):
if np.isscalar(x) or np.array(x).ndim == 0:
x = np.reshape(x, (1,))
elif len(x.shape) == 1:
x = np.reshape(x, (x.shape[0],))
elif x.shape[1] == 1:
x = np.reshape(x, (x.shape[0],))
if verbose:
print(x.shape)
return x
# Functions returning the value of the information criterion to optimize at a
# certain point, given a trained GP model
def posterior_variance(x, model):
x = reshape_pt1(x, verbose=False)
(mean, var) = model.predict(x)
return var
def entropy(x, model):
x = reshape_pt1(x, verbose=False)
(mean, var) = model.predict(x)
return 1 / 2 * np.log(2 * np.pi * np.exp(0) * var ** 2)
# Remove outliers from a pandas dataframe
def remove_outlier(df):
# https://gist.github.com/ariffyasri/70f1e9139da770cb8514998124560281
low = .001
high = .999
quant_df = df.quantile([low, high])
mask = [True]
for name in list(df.columns):
if is_numeric_dtype(df[name]):
mask = (df[name] >= quant_df.loc[low, name]) & (
df[name] <= quant_df.loc[high, name])
return mask
# Vector x = (t, x) of time steps t at which x is known is interpolated at given
# time t, imposing initial value, and interpolating along each output dimension
# independently if there are more than one
def interpolate(t, x, t0, init_value, method='cubic'):
x = reshape_pt1(x)
if np.isscalar(t):
t = np.array([t])
else:
t = reshape_dim1_tonormal(t)
tf = x[-1, 0]
if len(x) == 1:
# If only one value of x available, assume constant
interpolate_x = np.tile(reshape_pt1(x[0, 1:]), (len(t), 1))
else:
# Interpolate data t_x at array of times wanted; if several output
# dims, interpolate all input dims for each output dim
interpolate_x = griddata(x[:, 0], x[:, 1:], t, method=method)
if t[0] == t0:
# Impose initial value
interpolate_x[0] = reshape_pt1(init_value)
# Interpolation slightly outside of range
if len(x) >= 2:
tol = 100 * (tf - x[-2, 0])
if tf < t[-1] <= tf + tol:
# If t[-1] less than tol over last available t, return x[-1]
interpolate_x[-1] = reshape_pt1(x[-1, 1:])
elif t0 > t[0] >= t0 - tol:
# If t[0] lass than tol before first available t, return x[0]
interpolate_x[0] = reshape_pt1(init_value)
if np.isnan(np.min(interpolate_x)):
print(t, x)
logging.error('NaNs in interpolation: values need to be interpolated '
'outside of range, should not happen!')
return reshape_pt1(interpolate_x)
| [
"numpy.mean",
"numpy.ones_like",
"numpy.reshape",
"numpy.isscalar",
"pandas.api.types.is_numeric_dtype",
"scipy.interpolate.griddata",
"numpy.square",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.min",
"scipy.stats.multivariate_normal.logpdf",
"logging.error"
] | [((522, 547), 'numpy.zeros', 'np.zeros', (['(x.shape[0], 1)'], {}), '((x.shape[0], 1))\n', (530, 547), True, 'import numpy as np\n'), ((3673, 3687), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (3684, 3687), True, 'import numpy as np\n'), ((994, 1031), 'numpy.mean', 'np.mean', (['log_likelihood_array'], {'axis': '(0)'}), '(log_likelihood_array, axis=0)\n', (1001, 1031), True, 'import numpy as np\n'), ((1207, 1221), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (1218, 1221), True, 'import numpy as np\n'), ((1260, 1281), 'numpy.reshape', 'np.reshape', (['x', '(1, 1)'], {}), '(x, (1, 1))\n', (1270, 1281), True, 'import numpy as np\n'), ((1304, 1315), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1312, 1315), True, 'import numpy as np\n'), ((1354, 1384), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], 1)'], {}), '(x, (x.shape[0], 1))\n', (1364, 1384), True, 'import numpy as np\n'), ((1575, 1589), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (1586, 1589), True, 'import numpy as np\n'), ((1628, 1649), 'numpy.reshape', 'np.reshape', (['x', '(1, 1)'], {}), '(x, (1, 1))\n', (1638, 1649), True, 'import numpy as np\n'), ((1672, 1683), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1680, 1683), True, 'import numpy as np\n'), ((1722, 1752), 'numpy.reshape', 'np.reshape', (['x', '(1, x.shape[0])'], {}), '(x, (1, x.shape[0]))\n', (1732, 1752), True, 'import numpy as np\n'), ((1911, 1925), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (1922, 1925), True, 'import numpy as np\n'), ((1964, 1983), 'numpy.reshape', 'np.reshape', (['x', '(1,)'], {}), '(x, (1,))\n', (1974, 1983), True, 'import numpy as np\n'), ((2280, 2294), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (2291, 2294), True, 'import numpy as np\n'), ((2333, 2352), 'numpy.reshape', 'np.reshape', (['x', '(1,)'], {}), '(x, (1,))\n', (2343, 2352), True, 'import numpy as np\n'), ((3219, 3245), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['df[name]'], {}), '(df[name])\n', (3235, 3245), False, 'from pandas.api.types import is_numeric_dtype\n'), ((3701, 3714), 'numpy.array', 'np.array', (['[t]'], {}), '([t])\n', (3709, 3714), True, 'import numpy as np\n'), ((4101, 4146), 'scipy.interpolate.griddata', 'griddata', (['x[:, 0]', 'x[:, 1:]', 't'], {'method': 'method'}), '(x[:, 0], x[:, 1:], t, method=method)\n', (4109, 4146), False, 'from scipy.interpolate import griddata\n'), ((4697, 4718), 'numpy.min', 'np.min', (['interpolate_x'], {}), '(interpolate_x)\n', (4703, 4718), True, 'import numpy as np\n'), ((4749, 4866), 'logging.error', 'logging.error', (['"""NaNs in interpolation: values need to be interpolated outside of range, should not happen!"""'], {}), "(\n 'NaNs in interpolation: values need to be interpolated outside of range, should not happen!'\n )\n", (4762, 4866), False, 'import logging\n'), ((908, 965), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['xi'], {'mean': 'mean[idx]', 'cov': 'covar'}), '(xi, mean=mean[idx], cov=covar)\n', (934, 965), False, 'from scipy.stats import multivariate_normal\n'), ((2024, 2052), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0],)'], {}), '(x, (x.shape[0],))\n', (2034, 2052), True, 'import numpy as np\n'), ((2393, 2421), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0],)'], {}), '(x, (x.shape[0],))\n', (2403, 2421), True, 'import numpy as np\n'), ((269, 281), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (278, 281), True, 'import numpy as np\n'), ((827, 846), 'numpy.ones_like', 'np.ones_like', (['covar'], {}), '(covar)\n', (839, 846), True, 'import numpy as np\n'), ((1225, 1236), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1233, 1236), True, 'import numpy as np\n'), ((1593, 1604), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1601, 1604), True, 'import numpy as np\n'), ((1929, 1940), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1937, 1940), True, 'import numpy as np\n'), ((2091, 2119), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[1],)'], {}), '(x, (x.shape[1],))\n', (2101, 2119), True, 'import numpy as np\n'), ((2298, 2309), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2306, 2309), True, 'import numpy as np\n'), ((2460, 2488), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0],)'], {}), '(x, (x.shape[0],))\n', (2470, 2488), True, 'import numpy as np\n'), ((772, 787), 'numpy.array', 'np.array', (['covar'], {}), '(covar)\n', (780, 787), True, 'import numpy as np\n'), ((2921, 2930), 'numpy.exp', 'np.exp', (['(0)'], {}), '(0)\n', (2927, 2930), True, 'import numpy as np\n')] |
from globals import backt_t, backt_z, max_time
from utils import desired_state
import numpy as np
def time_traj_land(t):
# t1=t-backt_t
x = 0
z = (backt_z*(max_time-t))/(max_time-backt_t)
y = 0
thetad = 0
phid = 0
psid = 0
thetadot_des = 0
phidot_des = 0
psidot_des = 0
xdot = 0
ydot = 0
zdot = 0
xddot = 0
yddot = 0
zddot = 0
#Tfwd=-(117.72)*(t-backt_t)/(max_time-backt_t)
#Tfwd=((2*117.72)*(t-backt_t)/(-max_time+backt_t))+117.72
Tfwd=0
Mfwd=0
if (z<0):
z=0
des_state = desired_state()
des_state.pos = np.array([x, y, z])
des_state.vel = np.array([xdot, ydot, zdot])
des_state.acc = np.array([xddot, yddot, zddot])
des_state.rot = np.array([phid, thetad, psid])
des_state.omega = np.array([phidot_des, thetadot_des, psidot_des])
des_state.control = np.array([Tfwd, Mfwd])
return des_state | [
"utils.desired_state",
"numpy.array"
] | [((589, 604), 'utils.desired_state', 'desired_state', ([], {}), '()\n', (602, 604), False, 'from utils import desired_state\n'), ((625, 644), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (633, 644), True, 'import numpy as np\n'), ((665, 693), 'numpy.array', 'np.array', (['[xdot, ydot, zdot]'], {}), '([xdot, ydot, zdot])\n', (673, 693), True, 'import numpy as np\n'), ((714, 745), 'numpy.array', 'np.array', (['[xddot, yddot, zddot]'], {}), '([xddot, yddot, zddot])\n', (722, 745), True, 'import numpy as np\n'), ((766, 796), 'numpy.array', 'np.array', (['[phid, thetad, psid]'], {}), '([phid, thetad, psid])\n', (774, 796), True, 'import numpy as np\n'), ((819, 867), 'numpy.array', 'np.array', (['[phidot_des, thetadot_des, psidot_des]'], {}), '([phidot_des, thetadot_des, psidot_des])\n', (827, 867), True, 'import numpy as np\n'), ((892, 914), 'numpy.array', 'np.array', (['[Tfwd, Mfwd]'], {}), '([Tfwd, Mfwd])\n', (900, 914), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy.io import loadmat
from echopype import open_raw
import pytest
@pytest.fixture
def ek60_path(test_path):
return test_path["EK60"]
# raw_paths = ['./echopype/test_data/ek60/set1/' + file
# for file in os.listdir('./echopype/test_data/ek60/set1')] # 2 range lengths
# raw_path = ['./echopype/test_data/ek60/set2/' + file
# for file in os.listdir('./echopype/test_data/ek60/set2')] # 3 range lengths
# Other data files
# raw_filename = 'data_zplsc/OceanStarr_2017-D20170725-T004612.raw' # OceanStarr 2 channel EK60
# raw_filename = '../data/DY1801_EK60-D20180211-T164025.raw' # Dyson 5 channel EK60
# raw_filename = 'data_zplsc/D20180206-T000625.raw # EK80
def test_convert_ek60_matlab_raw(ek60_path):
"""Compare parsed Beam group data with Matlab outputs."""
ek60_raw_path = str(
ek60_path.joinpath('DY1801_EK60-D20180211-T164025.raw')
)
ek60_matlab_path = str(
ek60_path.joinpath(
'from_matlab/DY1801_EK60-D20180211-T164025_rawData.mat'
)
)
# Convert file
echodata = open_raw(raw_file=ek60_raw_path, sonar_model='EK60')
# Compare with matlab outputs
ds_matlab = loadmat(ek60_matlab_path)
# power
assert np.allclose(
[
ds_matlab['rawData'][0]['pings'][0]['power'][0][fidx]
for fidx in range(5)
],
echodata.beam.backscatter_r.transpose(
'frequency', 'range_bin', 'ping_time'
),
rtol=0,
atol=1.6e-5,
)
# angle: alongship and athwartship
for angle in ['alongship', 'athwartship']:
assert np.array_equal(
[
ds_matlab['rawData'][0]['pings'][0][angle][0][fidx]
for fidx in range(5)
],
echodata.beam['angle_' + angle].transpose(
'frequency', 'range_bin', 'ping_time'
),
)
def test_convert_ek60_echoview_raw(ek60_path):
"""Compare parsed power data (count) with csv exported by EchoView."""
ek60_raw_path = str(
ek60_path.joinpath('DY1801_EK60-D20180211-T164025.raw')
)
ek60_csv_path = [
ek60_path.joinpath(
'from_echoview/DY1801_EK60-D20180211-T164025-Power%d.csv' % freq
)
for freq in [18, 38, 70, 120, 200]
]
# Read csv files exported by EchoView
channels = []
for file in ek60_csv_path:
channels.append(
pd.read_csv(file, header=None, skiprows=[0]).iloc[:, 13:]
)
test_power = np.stack(channels)
# Convert to netCDF and check
echodata = open_raw(raw_file=ek60_raw_path, sonar_model='EK60')
for fidx, atol in zip(range(5), [1e-5, 1.1e-5, 1.1e-5, 1e-5, 1e-5]):
assert np.allclose(
test_power[fidx, :, :],
echodata.beam.backscatter_r.isel(
frequency=fidx,
ping_time=slice(None, 10),
range_bin=slice(1, None),
),
atol=9e-6,
rtol=atol,
)
def test_convert_ek60_duplicate_ping_times(ek60_path):
"""Convert a file with duplicate ping times"""
raw_path = (
ek60_path
/ "ooi"
/ "CE02SHBP-MJ01C-07-ZPLSCB101_OOI-D20191201-T000000.raw"
)
ed = open_raw(raw_path, "EK60")
assert "duplicate_ping_times" in ed.provenance.attrs
assert "old_ping_time" in ed.provenance
| [
"echopype.open_raw",
"scipy.io.loadmat",
"pandas.read_csv",
"numpy.stack"
] | [((1118, 1170), 'echopype.open_raw', 'open_raw', ([], {'raw_file': 'ek60_raw_path', 'sonar_model': '"""EK60"""'}), "(raw_file=ek60_raw_path, sonar_model='EK60')\n", (1126, 1170), False, 'from echopype import open_raw\n'), ((1222, 1247), 'scipy.io.loadmat', 'loadmat', (['ek60_matlab_path'], {}), '(ek60_matlab_path)\n', (1229, 1247), False, 'from scipy.io import loadmat\n'), ((2560, 2578), 'numpy.stack', 'np.stack', (['channels'], {}), '(channels)\n', (2568, 2578), True, 'import numpy as np\n'), ((2629, 2681), 'echopype.open_raw', 'open_raw', ([], {'raw_file': 'ek60_raw_path', 'sonar_model': '"""EK60"""'}), "(raw_file=ek60_raw_path, sonar_model='EK60')\n", (2637, 2681), False, 'from echopype import open_raw\n'), ((3294, 3320), 'echopype.open_raw', 'open_raw', (['raw_path', '"""EK60"""'], {}), "(raw_path, 'EK60')\n", (3302, 3320), False, 'from echopype import open_raw\n'), ((2475, 2519), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'skiprows': '[0]'}), '(file, header=None, skiprows=[0])\n', (2486, 2519), True, 'import pandas as pd\n')] |
import argparse
import os
import numpy as np
from coremltools import ImageType
from coremltools.models.utils import save_spec
from yolov4.tf import YOLOv4
import coremltools as ct
import json
parser = argparse.ArgumentParser(description='Yolo4 To CoreML Converter.')
parser.add_argument('-n', '--names_path', help='Path to names file.')
parser.add_argument('-c', '--config_path', help='Path to Darknet cfg file.')
parser.add_argument('-w', '--weights_path', help='Path to Darknet weights file.')
parser.add_argument('-m', '--mlpackage_path', help='Path to output CoreML mlpackage file.')
yolo = YOLOv4()
def _main(args):
names_path = os.path.expanduser(args.names_path)
config_path = os.path.expanduser(args.config_path)
weights_path = os.path.expanduser(args.weights_path)
mlpackage_path = os.path.expanduser(args.mlpackage_path)
assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
config_path)
assert weights_path.endswith(
'.weights'), '{} is not a .weights file'.format(weights_path)
assert mlpackage_path.endswith(
'.mlpackage') | mlpackage_path.endswith(
'.mlmodel'), 'output path {} is not a .mlpackage or .mlmodel file'.format(mlpackage_path)
print('names: ', names_path)
print('config: ', config_path)
print('weights: ', weights_path)
print('mlpackage: ', mlpackage_path)
yolo.config.parse_names(names_path)
names = json.encoder.JSONEncoder().encode(yolo.config.names)
print('names: ', names)
yolo.config.parse_cfg(config_path)
yolo.make_model()
yolo.load_weights(weights_path, weights_type="yolo")
yolo.summary()
# Convert to Core ML
model = ct.convert(yolo.model,
inputs=[ImageType(name='input_1', scale=1/255., color_layout="BGR",
channel_first=False)],
minimum_deployment_target=ct.target.iOS15,
compute_precision=ct.precision.FLOAT16,
compute_units=ct.ComputeUnit.ALL,
skip_model_load=False,
debug=False
)
model.user_defined_metadata['yolo.anchors'] = np.array2string(yolo.config.anchors, separator=',')
model.user_defined_metadata['yolo.names'] = names
model.save(mlpackage_path)
print('model.is_package', model.is_package)
if (model.is_package) :
save_spec(model.get_spec(), mlpackage_path)
if __name__ == '__main__':
_main(parser.parse_args()) | [
"argparse.ArgumentParser",
"numpy.array2string",
"coremltools.ImageType",
"yolov4.tf.YOLOv4",
"os.path.expanduser",
"json.encoder.JSONEncoder"
] | [((202, 267), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Yolo4 To CoreML Converter."""'}), "(description='Yolo4 To CoreML Converter.')\n", (225, 267), False, 'import argparse\n'), ((597, 605), 'yolov4.tf.YOLOv4', 'YOLOv4', ([], {}), '()\n', (603, 605), False, 'from yolov4.tf import YOLOv4\n'), ((641, 676), 'os.path.expanduser', 'os.path.expanduser', (['args.names_path'], {}), '(args.names_path)\n', (659, 676), False, 'import os\n'), ((695, 731), 'os.path.expanduser', 'os.path.expanduser', (['args.config_path'], {}), '(args.config_path)\n', (713, 731), False, 'import os\n'), ((751, 788), 'os.path.expanduser', 'os.path.expanduser', (['args.weights_path'], {}), '(args.weights_path)\n', (769, 788), False, 'import os\n'), ((810, 849), 'os.path.expanduser', 'os.path.expanduser', (['args.mlpackage_path'], {}), '(args.mlpackage_path)\n', (828, 849), False, 'import os\n'), ((2215, 2266), 'numpy.array2string', 'np.array2string', (['yolo.config.anchors'], {'separator': '""","""'}), "(yolo.config.anchors, separator=',')\n", (2230, 2266), True, 'import numpy as np\n'), ((1435, 1461), 'json.encoder.JSONEncoder', 'json.encoder.JSONEncoder', ([], {}), '()\n', (1459, 1461), False, 'import json\n'), ((1748, 1835), 'coremltools.ImageType', 'ImageType', ([], {'name': '"""input_1"""', 'scale': '(1 / 255.0)', 'color_layout': '"""BGR"""', 'channel_first': '(False)'}), "(name='input_1', scale=1 / 255.0, color_layout='BGR',\n channel_first=False)\n", (1757, 1835), False, 'from coremltools import ImageType\n')] |
# xvfb-run -s "-screen 0 1400x900x24" python 01_generate_data.py car_racing --total_episodes 4000 --time_steps 300
import numpy as np
import random
import config
#import matplotlib.pyplot as plt
from env import make_env
import argparse
DIR_NAME = './data/rollout/'
def main(args):
env_name = args.env_name
total_episodes = args.total_episodes
time_steps = args.time_steps
render = args.render
run_all_envs = args.run_all_envs
action_refresh_rate = args.action_refresh_rate
if run_all_envs:
envs_to_generate = config.train_envs
else:
envs_to_generate = [env_name]
for current_env_name in envs_to_generate:
print("Generating data for env {}".format(current_env_name))
env = make_env(current_env_name) # <1>
s = 0
while s < total_episodes:
episode_id = random.randint(0, 2**31 - 1)
filename = DIR_NAME + str(episode_id) + ".npz"
observation = env.reset()
env.render()
t = 0
obs_sequence = []
action_sequence = []
reward_sequence = []
done_sequence = []
reward = -0.1
done = False
while t < time_steps: # and not done:
if t % action_refresh_rate == 0:
action = config.generate_data_action(t, env) # <2>
observation = config.adjust_obs(observation) # <3>
obs_sequence.append(observation)
action_sequence.append(action)
reward_sequence.append(reward)
done_sequence.append(done)
observation, reward, done, info = env.step(action) # <4>
t = t + 1
if render:
env.render()
print("Episode {} finished after {} timesteps".format(s, t))
np.savez_compressed(filename, obs=obs_sequence, action=action_sequence,
reward=reward_sequence, done=done_sequence) # <4>
s = s + 1
env.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Create new training data'))
parser.add_argument('env_name', type=str, help='name of environment')
parser.add_argument('--total_episodes', type=int, default=200,
help='total number of episodes to generate per worker')
parser.add_argument('--time_steps', type=int, default=300,
help='how many timesteps at start of episode?')
parser.add_argument('--render', default=0, type=int,
help='render the env as data is generated')
parser.add_argument('--action_refresh_rate', default=20, type=int,
help='how often to change the random action, in frames')
parser.add_argument('--run_all_envs', action='store_true',
help='if true, will ignore env_name and loop over all envs in train_envs variables in config.py')
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"config.adjust_obs",
"config.generate_data_action",
"env.make_env",
"numpy.savez_compressed",
"random.randint"
] | [((2130, 2193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create new training data"""'}), "(description='Create new training data')\n", (2153, 2193), False, 'import argparse\n'), ((750, 776), 'env.make_env', 'make_env', (['current_env_name'], {}), '(current_env_name)\n', (758, 776), False, 'from env import make_env\n'), ((859, 889), 'random.randint', 'random.randint', (['(0)', '(2 ** 31 - 1)'], {}), '(0, 2 ** 31 - 1)\n', (873, 889), False, 'import random\n'), ((1890, 2009), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'obs': 'obs_sequence', 'action': 'action_sequence', 'reward': 'reward_sequence', 'done': 'done_sequence'}), '(filename, obs=obs_sequence, action=action_sequence,\n reward=reward_sequence, done=done_sequence)\n', (1909, 2009), True, 'import numpy as np\n'), ((1415, 1445), 'config.adjust_obs', 'config.adjust_obs', (['observation'], {}), '(observation)\n', (1432, 1445), False, 'import config\n'), ((1341, 1376), 'config.generate_data_action', 'config.generate_data_action', (['t', 'env'], {}), '(t, env)\n', (1368, 1376), False, 'import config\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# <NAME>
# 2015.11.19
# test the python in Spark without pyspark
from pyspark import SparkContext, SparkConf
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
from pyspark.mllib.tree import DecisionTree
import numpy as np
def linear_regression():
conf = SparkConf().setAppName('RF')
sc = SparkContext(conf=conf)
raw_data = sc.textFile("./data/hour_noheader.csv")
# raw_data = spark.read.format("csv").option("header", "true").csv("./data/hour.csv")
num_data = raw_data.count()
records = raw_data.map(lambda x: x.split(","))
first = records.first()
print(first)
print(num_data)
#cache data
def get_mapping(rdd, idx):
return rdd.map(lambda fields: fields[idx]).distinct().zipWithIndex().collectAsMap()
print("maping first categorical feature conlumn: %s" % get_mapping(records, 2))
mappings = [get_mapping(records, i) for i in range(2, 10)]
print("mappings is:" + str(mappings))
cat_len = sum(map(len, mappings))
num_len = len(records.first()[10:14])
total_len = num_len + cat_len
print("Feature vector length for categorical features: %d" % cat_len)
print("Feature vector length for numerical features: %d" % num_len)
print("Total feature vector length: %d" % total_len)
# 提取特征
def extract_features(record):
cat_vec = np.zeros(cat_len)
i = 0
step = 0
for field in record[2:9]:
m = mappings[i]
idx = m[field]
cat_vec[idx + step] = 1
i = i + 1
step = step + len(m)
num_vec = np.array([float(field) for field in record[10:14]])
return np.concatenate((cat_vec, num_vec))
# 提取标签
def extract_label(record):
return float(record[-1])
data = records.map(lambda r: LabeledPoint(extract_label(r), extract_features(r)))
first_point = data.first()
print("Raw data: " + str(first[2:]))
print("Label: " + str(first_point.label))
print("Linear Model feature vector:\n" + str(first_point.features))
print("Linear Model feature vector length: " + str(len(first_point.features)))
# 创建决策树模型特征向量
def extract_features_dt(record):
return np.array(map(float, record[2:14]))
data_dt = records.map(lambda r: LabeledPoint(extract_label(r), extract_features_dt(r)))
first_point_dt = data_dt.first()
print("Decision Tree feature vector: " + str(first_point_dt.features))
print("Decision Tree feature vector length: " + str(len(first_point_dt.features)))
#训练线性模型并测试预测效果
linear_model = LinearRegressionWithSGD.train(data, iterations=10000, step=0.1, intercept=False)
true_vs_predicted = data.map(lambda p: (p.label, linear_model.predict(p.features)))
print("Linear Model predictions: " + str(true_vs_predicted.take(5)))
#训练决策树模型并测试预测效果
dt_model = DecisionTree.trainRegressor(data_dt, {})
preds = dt_model.predict(data_dt.map(lambda p: p.features))
actual = data.map(lambda p: p.label)
true_vs_predicted_dt = actual.zip(preds)
print("Decision Tree predictions: " + str(true_vs_predicted_dt.take(5)))
print("Decision Tree depth: " + str(dt_model.depth()))
print("Decision Tree number of nodes: " + str(dt_model.numNodes()))
#评估回归模型的方法:
"""
均方误差(MSE, Mean Sequared Error)
均方根误差(RMSE, Root Mean Squared Error)
平均绝对误差(MAE, Mean Absolute Error)
R-平方系数(R-squared coefficient)
均方根对数误差(RMSLE)
"""
# 均方误差&均方根误差
def squared_error(actual, pred):
return (pred - actual) ** 2
mse = true_vs_predicted.map(lambda t, p: squared_error(t, p)).mean()
mse_dt = true_vs_predicted_dt.map(lambda t, p: squared_error(t, p)).mean()
cat_features = dict([(i - 2, len(get_mapping(records, i)) + 1) for i in range(2, 10)])
# train the model again
dt_model_2 = DecisionTree.trainRegressor(data_dt, categoricalFeaturesInfo=cat_features)
preds_2 = dt_model_2.predict(data_dt.map(lambda p: p.features))
actual_2 = data.map(lambda p: p.label)
true_vs_predicted_dt_2 = actual_2.zip(preds_2)
# compute performance metrics for decision tree model
mse_dt_2 = true_vs_predicted_dt_2.map(lambda t, p: squared_error(t, p)).mean()
print("Linear Model - Mean Squared Error: %2.4f" % mse)
print("Decision Tree - Mean Squared Error: %2.4f" % mse_dt)
print("Categorical feature size mapping %s" % cat_features)
print("Decision Tree [Categorical feature]- Mean Squared Error: %2.4f" % mse_dt_2)
# 均方根对数误差
def squared_log_error(pred, actual):
return (np.log(pred + 1) - np.log(actual + 1)) ** 2
rmsle = np.sqrt(true_vs_predicted.map(lambda t, p: squared_log_error(t, p)).mean())
rmsle_dt = np.sqrt(true_vs_predicted_dt.map(lambda t, p: squared_log_error(t, p)).mean())
rmsle_dt_2 = np.sqrt(true_vs_predicted_dt_2.map(lambda t, p: squared_log_error(t, p)).mean())
print("Linear Model - Root Mean Squared Log Error: %2.4f" % rmsle)
print("Decision Tree - Root Mean Squared Log Error: %2.4f" % rmsle_dt)
print("Decision Tree [Categorical feature]- Root Mean Squared Log Error: %2.4f" % rmsle_dt_2)
# 改进和调优
# targets = records.map(lambda r: float(r[-1])).collect()
# hist(targets, bins=40, color='lightblue', normed=True)
# fig = matplotlib.pyplot.gcf()
# fig.set_size_inches(16, 10)
def random_forest():
conf = SparkConf().setAppName('RF')
sc = SparkContext(conf=conf)
# print("\npyspark version:" + str(sc.version) + "\n")
data = MLUtils.loadLibSVMFile(sc, './data/sample_libsvm_data.txt')
(trainingData, testData) = data.randomSplit([0.7, 0.3])
model = RandomForest.trainClassifier(trainingData, numClasses=2,
categoricalFeaturesInfo={}, numTrees=3,
featureSubsetStrategy="auto", impurity='gini',
maxDepth=4, maxBins=32)
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda v, p: v != p).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification forest model:')
print(model.toDebugString())
# Save and load model
model.save(sc, ".model/myRandomForestClassificationModel")
sameModel = RandomForestModel.load(sc, "./model/myRandomForestClassificationModel")
if __name__ == '__main__':
random_forest()
| [
"numpy.log",
"pyspark.SparkConf",
"pyspark.mllib.tree.DecisionTree.trainRegressor",
"numpy.zeros",
"pyspark.mllib.util.MLUtils.loadLibSVMFile",
"pyspark.mllib.tree.RandomForestModel.load",
"numpy.concatenate",
"pyspark.SparkContext",
"pyspark.mllib.tree.RandomForest.trainClassifier",
"pyspark.mlli... | [((493, 516), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (505, 516), False, 'from pyspark import SparkContext, SparkConf\n'), ((2809, 2894), 'pyspark.mllib.regression.LinearRegressionWithSGD.train', 'LinearRegressionWithSGD.train', (['data'], {'iterations': '(10000)', 'step': '(0.1)', 'intercept': '(False)'}), '(data, iterations=10000, step=0.1, intercept=False\n )\n', (2838, 2894), False, 'from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD\n'), ((3092, 3132), 'pyspark.mllib.tree.DecisionTree.trainRegressor', 'DecisionTree.trainRegressor', (['data_dt', '{}'], {}), '(data_dt, {})\n', (3119, 3132), False, 'from pyspark.mllib.tree import DecisionTree\n'), ((4121, 4195), 'pyspark.mllib.tree.DecisionTree.trainRegressor', 'DecisionTree.trainRegressor', (['data_dt'], {'categoricalFeaturesInfo': 'cat_features'}), '(data_dt, categoricalFeaturesInfo=cat_features)\n', (4148, 4195), False, 'from pyspark.mllib.tree import DecisionTree\n'), ((5731, 5754), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (5743, 5754), False, 'from pyspark import SparkContext, SparkConf\n'), ((5829, 5888), 'pyspark.mllib.util.MLUtils.loadLibSVMFile', 'MLUtils.loadLibSVMFile', (['sc', '"""./data/sample_libsvm_data.txt"""'], {}), "(sc, './data/sample_libsvm_data.txt')\n", (5851, 5888), False, 'from pyspark.mllib.util import MLUtils\n'), ((5965, 6140), 'pyspark.mllib.tree.RandomForest.trainClassifier', 'RandomForest.trainClassifier', (['trainingData'], {'numClasses': '(2)', 'categoricalFeaturesInfo': '{}', 'numTrees': '(3)', 'featureSubsetStrategy': '"""auto"""', 'impurity': '"""gini"""', 'maxDepth': '(4)', 'maxBins': '(32)'}), "(trainingData, numClasses=2,\n categoricalFeaturesInfo={}, numTrees=3, featureSubsetStrategy='auto',\n impurity='gini', maxDepth=4, maxBins=32)\n", (5993, 6140), False, 'from pyspark.mllib.tree import RandomForest, RandomForestModel\n'), ((6743, 6814), 'pyspark.mllib.tree.RandomForestModel.load', 'RandomForestModel.load', (['sc', '"""./model/myRandomForestClassificationModel"""'], {}), "(sc, './model/myRandomForestClassificationModel')\n", (6765, 6814), False, 'from pyspark.mllib.tree import RandomForest, RandomForestModel\n'), ((1550, 1567), 'numpy.zeros', 'np.zeros', (['cat_len'], {}), '(cat_len)\n', (1558, 1567), True, 'import numpy as np\n'), ((1874, 1908), 'numpy.concatenate', 'np.concatenate', (['(cat_vec, num_vec)'], {}), '((cat_vec, num_vec))\n', (1888, 1908), True, 'import numpy as np\n'), ((454, 465), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (463, 465), False, 'from pyspark import SparkContext, SparkConf\n'), ((5692, 5703), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (5701, 5703), False, 'from pyspark import SparkContext, SparkConf\n'), ((4864, 4880), 'numpy.log', 'np.log', (['(pred + 1)'], {}), '(pred + 1)\n', (4870, 4880), True, 'import numpy as np\n'), ((4883, 4901), 'numpy.log', 'np.log', (['(actual + 1)'], {}), '(actual + 1)\n', (4889, 4901), True, 'import numpy as np\n')] |
from __future__ import print_function
from six.moves import cPickle as pickle
import numpy as np
import os
import platform
import matplotlib.pyplot as plt
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename, n_class):
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
XX = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float") / 255.0
Y = np.array(Y)
YY = np.zeros([Y.shape[0], n_class])
YY[np.arange(Y.shape[0]), Y] = 1.0
return XX, YY
def Load_CIFAR10(path):
data_train = dict()
data_test = dict()
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(path, 'data_batch_%d' % (b,))
X, Y = load_CIFAR_batch(f, 10)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(path, 'test_batch'), 10)
data_train['input'] = Xtr
data_train['output'] = Ytr
data_test['input'] = Xte
data_test['output'] = Yte
return data_train, data_test
| [
"platform.python_version_tuple",
"six.moves.cPickle.load",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.arange"
] | [((191, 222), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (220, 222), False, 'import platform\n'), ((1086, 1104), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (1100, 1104), True, 'import numpy as np\n'), ((1115, 1133), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (1129, 1133), True, 'import numpy as np\n'), ((264, 278), 'six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (275, 278), True, 'from six.moves import cPickle as pickle\n'), ((695, 706), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (703, 706), True, 'import numpy as np\n'), ((720, 751), 'numpy.zeros', 'np.zeros', (['[Y.shape[0], n_class]'], {}), '([Y.shape[0], n_class])\n', (728, 751), True, 'import numpy as np\n'), ((952, 994), 'os.path.join', 'os.path.join', (['path', "('data_batch_%d' % (b,))"], {}), "(path, 'data_batch_%d' % (b,))\n", (964, 994), False, 'import os\n'), ((1179, 1211), 'os.path.join', 'os.path.join', (['path', '"""test_batch"""'], {}), "(path, 'test_batch')\n", (1191, 1211), False, 'import os\n'), ((322, 355), 'six.moves.cPickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (333, 355), True, 'from six.moves import cPickle as pickle\n'), ((763, 784), 'numpy.arange', 'np.arange', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (772, 784), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def sample_visualization(M_C, y_test, x_test, test_pred_class):
correct_index = [[None for i in range(5)] for j in range(10)]
wrong_index = [[None for i in range(5)] for j in range(10)]
for i in range(10):
label = np.where(y_test == i)[0]
pred = np.where(test_pred_class == i)[0]
# print(*label)
# print("\n")
# print(*pred)
# print("\n")
# print("\n")
correct = []
wrong = []
for k in pred:
if (k in label):
correct.append(k)
else:
wrong.append(k)
# print(*correct)
# print("\n")
# print(*wrong)
# print("\n")
# print("\n")
# print(len(correct))
# print(len(wrong))
for j in range(len(correct)):
if j == 5:
break
else:
correct_index[i][j] = correct[j]
# print(*correct_index)
for k in range(len(wrong)):
if k == 5:
break
else:
wrong_index[i][k] = wrong[k]
# print(*wrong_index)
fig, axes = plt.subplots(10, 10, figsize=(10, 10))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i in range(10):
for j in range(5):
if correct_index[i][j] != None:
indx1 = correct_index[i][j]
if M_C == True: # MNIST dataset
axes[j, i].imshow((x_test[indx1].reshape(28,28)), cmap='gray')
else: # CIFAR-10 dataset
#axes[j, i].imshow(x_test[indx1].reshape(32,32,3)) # used instead of following lines when using tensorflow
img = x_test[indx1].reshape(32, 32, 3)
img = img.reshape(32 * 32 * 3)
R = img[0:1024].reshape(32, 32)
G = img[1024:2048].reshape(32, 32)
B = img[2048:].reshape(32, 32)
img = np.dstack((R, G, B))
axes[j, i].imshow(img, interpolation='bicubic')
for k in range(5):
if wrong_index[i][k] != None:
indx2 = wrong_index[i][k]
if M_C == True: # MNIST dataset
axes[k + 5, i].imshow((x_test[indx2].reshape(28,28)), cmap='gray')
else: # CIFAR-10 dataset
#axes[k + 5, i].imshow(x_test[indx2].reshape(32,32,3)) # used instead of following lines when using tensorflow
img = x_test[indx2].reshape(32,32,3)
img=img.reshape(32*32*3)
R = img[0:1024].reshape(32, 32)
G = img[1024:2048].reshape(32, 32)
B = img[2048:].reshape(32, 32)
img = np.dstack((R, G, B))
axes[k + 5, i].imshow(img, interpolation='bicubic')
if M_C == True: # MNIST dataset
fig.suptitle(
'MNIST dataset \n \n1st 5 rows of correct predicted samples & 2nd 5 rows of wrong predicted samples',
size=16)
else: # CIFAR-10 dataset
fig.suptitle(
'CIFAR-10 dataset \n \n1st 5 rows of correct predicted samples & 2nd 5 rows of wrong predicted samples',
size=16)
plt.show(block=True)
''''#test function for mnist and cifar10 data on fully connected model
import DL
def test(M_C):
if M_C==True:
Label_Train, Features_Train, Label_Test, Features_Test = DL.ReadFile("F:\\eural\\project2\\Deep-Learning-framework-main\\MNISTcsv")
# %% training
batch_size = 64
num_epochs = 1
num_classes = 10
hidden_units = 300
input_dimensions = (28, 28, 1)
# change each label from scaler value to vector( 2 ---> [0, 0, 1, 0, 0, ...] ) (hot one)
Label_Train_hotone = DL.hot_one(Label_Train, num_classes)
model = DL.model()
model.input_dims(input_dimensions)
model.add('flatten')
model.add('Relu', hidden_units)
model.add('Linear', num_classes)
optim = DL.optimizer(0.5, 0.5)
loss_fn = DL.loss_Function('SoftmaxCrossEntropy')
loss_fn.setLambda(0)
model.fit(Features_Train, Label_Train_hotone,
batch_size, num_epochs, optim, loss_fn)
predicted_labels = np.argmax(model.predict(Features_Test), axis=0)
else:
Label_Train, Features_Train, Label_Test, Features_Test = DL.ReadFile(
"F:\\eural\\project2\\Deep-Learning-framework-main\\cifar-10-batches-py")
# %% training
batch_size = 128
num_epochs = 3
num_classes = 10
hidden_units = 100
input_dimensions = (32, 32, 3)
# change each label from scaler value to vector( 2 ---> [0, 0, 1, 0, 0, ...] ) (hot one)
Label_Train_hotone = DL.hot_one(Label_Train, num_classes)
model = DL.model()
model.input_dims(input_dimensions)
model.add('flatten')
model.add('Relu', hidden_units)
model.add('Linear', num_classes)
optim = DL.optimizer(0.001)
loss_fn = DL.loss_Function('SoftmaxCrossEntropy')
loss_fn.setLambda(0)
model.fit(Features_Train, Label_Train_hotone,
batch_size, num_epochs, optim, loss_fn)
z = model.predict(Features_Test)
# print ("predicted labels dimensions",z.shape)
predicted_labels = np.argmax(z, axis=0)
return Label_Test, Features_Test, predicted_labels'''
'''#calling test
M_C = True
Label_Test, Features_Test, predicted_labels=test(M_C)
sample_visualization(M_C, Label_Test, Features_Test, predicted_labels)
M_C = False
Label_Test, Features_Test, predicted_labels=test(M_C)
sample_visualization(M_C, Label_Test, Features_Test, predicted_labels)'''
| [
"numpy.where",
"numpy.dstack",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1362, 1400), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(10)', '(10)'], {'figsize': '(10, 10)'}), '(10, 10, figsize=(10, 10))\n', (1374, 1400), True, 'import matplotlib.pyplot as plt\n'), ((3619, 3639), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (3627, 3639), True, 'import matplotlib.pyplot as plt\n'), ((303, 324), 'numpy.where', 'np.where', (['(y_test == i)'], {}), '(y_test == i)\n', (311, 324), True, 'import numpy as np\n'), ((347, 377), 'numpy.where', 'np.where', (['(test_pred_class == i)'], {}), '(test_pred_class == i)\n', (355, 377), True, 'import numpy as np\n'), ((2245, 2265), 'numpy.dstack', 'np.dstack', (['(R, G, B)'], {}), '((R, G, B))\n', (2254, 2265), True, 'import numpy as np\n'), ((3097, 3117), 'numpy.dstack', 'np.dstack', (['(R, G, B)'], {}), '((R, G, B))\n', (3106, 3117), True, 'import numpy as np\n')] |
# $Id$
#
# Copyright (C) 2006-2011 <NAME>
# All Rights Reserved
#
import os
from chembl_beaker.beaker.draw.molDrawing import MolDrawing, DrawingOptions
def CoordsAreAllZero(m, confId=-1):
conf = m.GetConformer(confId)
for i in range(m.GetNumAtoms()):
if list(conf.GetAtomPosition(i))!=[0.0,0.0,0.0]:
return False
return True
def _getCanvas():
useAGG=False
useCairo=True
from chembl_beaker.beaker.draw.cairoCanvas import Canvas
return useAGG,useCairo,Canvas
def _createCanvas(size):
useAGG,useCairo,Canvas=_getCanvas()
from PIL import Image
img = Image.new("RGBA",size,(0,0,0,0))
canvas = Canvas(img)
return img,canvas
def MolToImage(mol, size=(300,300), kekulize=True, wedgeBonds=True,
fitImage=False, options=None, canvas=None, **kwargs):
""" returns a PIL image containing a drawing of the molecule
Keyword arguments:
kekulize -- run kekulization routine on input `mol` (default True)
size -- final image size, in pixel (default (300,300))
wedgeBonds -- draw wedge (stereo) bonds (default True)
highlightAtoms -- list of atoms to highlight (default [])
highlightMap -- dictionary of (atom, color) pairs (default None)
highlightBonds -- list of bonds to highlight (default [])
"""
if not mol:
raise ValueError('Null molecule provided')
if canvas is None:
img,canvas=_createCanvas(size)
else:
img=None
fontSize = int(min(size) / 20)
if min(size) < 200:
fontSize = 1
if options is None:
options = DrawingOptions()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
options.atomLabelFontSize = fontSize
drawer = MolDrawing(canvas=canvas,drawingOptions=options)
if kekulize:
from rdkit import Chem
Chem.Kekulize(mol)
if not mol.GetNumConformers() or CoordsAreAllZero(mol):
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
if 'legend' in kwargs:
legend = kwargs['legend']
del kwargs['legend']
else:
legend=''
drawer.AddMol(mol,**kwargs)
if legend:
from chembl_beaker.beaker.draw.molDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0]/2,int(.94*size[1]),0 # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font=Font(face='sans',size=12)
canvas.addCanvasText(legend,pos,font)
if kwargs.get('returnCanvas',False):
return img,canvas,drawer
else:
canvas.flush()
return img
def MolToFile(mol,fileName,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None, fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
# original contribution from <NAME>
if not fileName:
raise ValueError('no fileName provided')
if not mol:
raise ValueError('Null molecule provided')
if imageType is None:
imageType=os.path.splitext(fileName)[1][1:]
if options is None:
options = DrawingOptions()
useAGG,useCairo,Canvas = _getCanvas()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if useCairo or useAGG:
canvas = Canvas(size=size,imageType=imageType,
fileName=fileName)
else:
options.radicalSymbol = '.' #<- the sping canvas doesn't support unicode well
canvas = Canvas(size=size,name=fileName,imageType=imageType)
drawer = MolDrawing(canvas=canvas,drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol,filename,size=(300,300),kekulize=True, wedgeBonds=True,
**kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol,size=size,kekulize=kekulize,wedgeBonds=wedgeBonds,**kwargs)
img.save(filename)
tkRoot=None
tkLabel=None
tkPI=None
def ShowMol(mol,size=(300,300),kekulize=True,wedgeBonds=True,
title='RDKit Molecule',**kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot,tkLabel,tkPI
import Tkinter
try:
import ImageTk
except ImportError:
from PIL import ImageTk
img = MolToImage(mol,size,kekulize,wedgeBonds,**kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot,image=tkPI)
tkLabel.place(x=0,y=0,width=img.size[0],height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d'%(img.size))
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0]
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def MolsToImage(mols, subImgSize=(200,200),legends=None,**kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None: legends = [None]*len(mols)
res = Image.new("RGBA",(subImgSize[0]*len(mols),subImgSize[1]))
for i,mol in enumerate(mols):
res.paste(MolToImage(mol,subImgSize,legend=legends[i],**kwargs),(i*subImgSize[0],0))
return res
def MolsToGridImage(mols,molsPerRow=3,subImgSize=(200,200),legends=None,
highlightAtomLists=None,**kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None: legends = [None]*len(mols)
nRows = len(mols)//molsPerRow
if len(mols)%molsPerRow : nRows+=1
res = Image.new("RGBA",(molsPerRow*subImgSize[0],nRows*subImgSize[1]),(255,255,255,0))
for i,mol in enumerate(mols):
row = i//molsPerRow
col = i%molsPerRow
highlights=None
if highlightAtomLists and highlightAtomLists[i]:
highlights=highlightAtomLists[i]
res.paste(MolToImage(mol,subImgSize,legend=legends[i],highlightAtoms=highlights,fitImage=True,
**kwargs),(col*subImgSize[0],row*subImgSize[1]))
return res
def ReactionToImage(rxn, subImgSize=(200,200),**kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
mols = []
for i in range(rxn.GetNumReactantTemplates()):
tmpl=rxn.GetReactantTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
mols.append(None)
for i in range(rxn.GetNumProductTemplates()):
tmpl = rxn.GetProductTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
res = Image.new("RGBA",(subImgSize[0]*len(mols),subImgSize[1]),(255,255,255,0))
for i,mol in enumerate(mols):
if mol is not None:
nimg = MolToImage(mol,subImgSize,kekulize=False,**kwargs)
else:
nimg,canvas = _createCanvas(subImgSize)
p0 = (10,subImgSize[1]//2)
p1 = (subImgSize[0]-10,subImgSize[1]//2)
p3 = (subImgSize[0]-20,subImgSize[1]//2-10)
p4 = (subImgSize[0]-20,subImgSize[1]//2+10)
canvas.addCanvasLine(p0,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p3,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p4,p1,lineWidth=2,color=(0,0,0))
if hasattr(canvas,'flush'):
canvas.flush()
else:
canvas.save()
res.paste(nimg,(i*subImgSize[0],0))
return res
| [
"Tkinter.Label",
"chembl_beaker.beaker.draw.molDrawing.Font",
"rdkit.Chem.Kekulize",
"PIL.ImageTk.PhotoImage",
"matplotlib.mlab.bivariate_normal",
"PIL.Image.new",
"chembl_beaker.beaker.draw.molDrawing.DrawingOptions",
"chembl_beaker.beaker.draw.cairoCanvas.Canvas",
"rdkit.Chem.AllChem.Compute2DCoor... | [((596, 633), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'size', '(0, 0, 0, 0)'], {}), "('RGBA', size, (0, 0, 0, 0))\n", (605, 633), False, 'from PIL import Image\n'), ((640, 651), 'chembl_beaker.beaker.draw.cairoCanvas.Canvas', 'Canvas', (['img'], {}), '(img)\n', (646, 651), False, 'from chembl_beaker.beaker.draw.cairoCanvas import Canvas\n'), ((1711, 1760), 'chembl_beaker.beaker.draw.molDrawing.MolDrawing', 'MolDrawing', ([], {'canvas': 'canvas', 'drawingOptions': 'options'}), '(canvas=canvas, drawingOptions=options)\n', (1721, 1760), False, 'from chembl_beaker.beaker.draw.molDrawing import MolDrawing, DrawingOptions\n'), ((3732, 3781), 'chembl_beaker.beaker.draw.molDrawing.MolDrawing', 'MolDrawing', ([], {'canvas': 'canvas', 'drawingOptions': 'options'}), '(canvas=canvas, drawingOptions=options)\n', (3742, 3781), False, 'from chembl_beaker.beaker.draw.molDrawing import MolDrawing, DrawingOptions\n'), ((5159, 5183), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', 'step'], {}), '(0, 1, step)\n', (5171, 5183), False, 'import numpy\n'), ((5188, 5212), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', 'step'], {}), '(0, 1, step)\n', (5200, 5212), False, 'import numpy\n'), ((5219, 5239), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5233, 5239), False, 'import numpy\n'), ((6287, 6381), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(molsPerRow * subImgSize[0], nRows * subImgSize[1])', '(255, 255, 255, 0)'], {}), "('RGBA', (molsPerRow * subImgSize[0], nRows * subImgSize[1]), (255,\n 255, 255, 0))\n", (6296, 6381), False, 'from PIL import Image\n'), ((1537, 1553), 'chembl_beaker.beaker.draw.molDrawing.DrawingOptions', 'DrawingOptions', ([], {}), '()\n', (1551, 1553), False, 'from chembl_beaker.beaker.draw.molDrawing import MolDrawing, DrawingOptions\n'), ((1807, 1825), 'rdkit.Chem.Kekulize', 'Chem.Kekulize', (['mol'], {}), '(mol)\n', (1820, 1825), False, 'from rdkit import Chem\n'), ((1928, 1956), 'rdkit.Chem.AllChem.Compute2DCoords', 'AllChem.Compute2DCoords', (['mol'], {}), '(mol)\n', (1951, 1956), False, 'from rdkit.Chem import AllChem\n'), ((2609, 2635), 'chembl_beaker.beaker.draw.molDrawing.Font', 'Font', ([], {'face': '"""sans"""', 'size': '(12)'}), "(face='sans', size=12)\n", (2613, 2635), False, 'from chembl_beaker.beaker.draw.molDrawing import Font\n'), ((3277, 3293), 'chembl_beaker.beaker.draw.molDrawing.DrawingOptions', 'DrawingOptions', ([], {}), '()\n', (3291, 3293), False, 'from chembl_beaker.beaker.draw.molDrawing import MolDrawing, DrawingOptions\n'), ((3479, 3536), 'chembl_beaker.beaker.draw.cairoCanvas.Canvas', 'Canvas', ([], {'size': 'size', 'imageType': 'imageType', 'fileName': 'fileName'}), '(size=size, imageType=imageType, fileName=fileName)\n', (3485, 3536), False, 'from chembl_beaker.beaker.draw.cairoCanvas import Canvas\n'), ((3669, 3722), 'chembl_beaker.beaker.draw.cairoCanvas.Canvas', 'Canvas', ([], {'size': 'size', 'name': 'fileName', 'imageType': 'imageType'}), '(size=size, name=fileName, imageType=imageType)\n', (3675, 3722), False, 'from chembl_beaker.beaker.draw.cairoCanvas import Canvas\n'), ((3862, 3880), 'rdkit.Chem.Kekulize', 'Chem.Kekulize', (['mol'], {}), '(mol)\n', (3875, 3880), False, 'from rdkit import Chem\n'), ((3958, 3986), 'rdkit.Chem.AllChem.Compute2DCoords', 'AllChem.Compute2DCoords', (['mol'], {}), '(mol)\n', (3981, 3986), False, 'from rdkit.Chem import AllChem\n'), ((4800, 4812), 'Tkinter.Tk', 'Tkinter.Tk', ([], {}), '()\n', (4810, 4812), False, 'import Tkinter\n'), ((4848, 4871), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (4866, 4871), False, 'from PIL import ImageTk\n'), ((4886, 4919), 'Tkinter.Label', 'Tkinter.Label', (['tkRoot'], {'image': 'tkPI'}), '(tkRoot, image=tkPI)\n', (4899, 4919), False, 'import Tkinter\n'), ((5302, 5373), 'matplotlib.mlab.bivariate_normal', 'mlab.bivariate_normal', (['X', 'Y', 'a', 'a', 'mol._atomPs[0][0]', 'mol._atomPs[0][1]'], {}), '(X, Y, a, a, mol._atomPs[0][0], mol._atomPs[0][1])\n', (5323, 5373), False, 'from matplotlib import mlab\n'), ((5429, 5500), 'matplotlib.mlab.bivariate_normal', 'mlab.bivariate_normal', (['X', 'Y', 'a', 'a', 'mol._atomPs[i][0]', 'mol._atomPs[i][1]'], {}), '(X, Y, a, a, mol._atomPs[i][0], mol._atomPs[i][1])\n', (5450, 5500), False, 'from matplotlib import mlab\n'), ((3206, 3232), 'os.path.splitext', 'os.path.splitext', (['fileName'], {}), '(fileName)\n', (3222, 3232), False, 'import os\n')] |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List of checks for the network."""
import os
import makani
from makani.analysis.checks import base_check
from makani.analysis.checks import log_util
from makani.avionics.common import tether_message_types as tether_message
from makani.avionics.network import network_config
import numpy
_FRAME_MISSING_LIMIT = 16
class BaseMessageCheck(base_check.BaseCheckItem):
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources,
normal_ranges, warning_ranges):
self._sources = sources
self._message_type = message_type
name = self.__class__.__name__ + '.%s.%s' % (message_type, sources)
super(BaseMessageCheck, self).__init__(
for_log, normal_ranges=normal_ranges, warning_ranges=warning_ranges,
name=name, sort_by_sequence=False)
class MessageIntervalCheck(BaseMessageCheck):
"""Base class for message interval checks."""
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources):
super(MessageIntervalCheck, self).__init__(for_log, message_type, sources,
[[0.0, 1.0]], [[0.0, 1.0]])
def _RegisterInputs(self):
inputs = []
for source in self._sources:
inputs.append(self._Arg(self._message_type, source, 'timestamp'))
return inputs
def _Check(self, *timestamps):
assert self._for_log
assert len(timestamps) == len(self._sources) and len(timestamps)
timestamps = [series for series in timestamps if series is not None]
if len(self._sources) > 1:
timestamps = numpy.concatenate(timestamps)
timestamps.sort()
else:
timestamps = timestamps[0]
interval = numpy.diff(timestamps)
self._CheckByRange(self._name,
interval, self._normal_ranges, self._warning_ranges)
class BaseMissingFrameCheck(BaseMessageCheck):
"""Base class for missing frame checks."""
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources, field,
normal_ranges, warning_ranges, frame_wraparound):
self._frame_wraparound = frame_wraparound
self._field = field
super(BaseMissingFrameCheck, self).__init__(for_log, message_type, sources,
normal_ranges, warning_ranges)
def _RegisterInputs(self):
inputs = []
for source in self._sources:
inputs.append(self._Arg(self._message_type, source, self._field))
for source in self._sources:
inputs.append(self._Arg(self._message_type, source, 'timestamp'))
return inputs
def _Check(self, *data):
assert self._for_log
assert len(data) == len(self._sources) * 2
frame_index = [series for series in data[:len(self._sources)]
if series is not None]
timestamps = [series for series in data[len(self._sources):]
if series is not None]
if len(self._sources) > 1:
timestamps = numpy.concatenate(timestamps)
frame_index = numpy.concatenate(frame_index)
frame_index = frame_index[timestamps.argsort()]
else:
frame_index = frame_index[0]
frame_index = frame_index.astype(int)
frame_index = log_util.UnwrapSequence(frame_index, self._frame_wraparound)
frame_index.sort()
frame_index_diff = numpy.diff(frame_index)
self._CheckByRange(self._name, frame_index_diff,
self._normal_ranges, self._warning_ranges)
class MissingFrameCheck(BaseMissingFrameCheck):
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources,
frame_wraparound=tether_message.TETHER_FRAME_INDEX_ROLLOVER):
super(MissingFrameCheck, self).__init__(
for_log, message_type, sources, 'frame_index',
[[0, _FRAME_MISSING_LIMIT]], [[0, None]], frame_wraparound)
class MissingReceivedFrameCheck(BaseMissingFrameCheck):
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources,
frame_wraparound=tether_message.TETHER_FRAME_INDEX_ROLLOVER):
super(MissingReceivedFrameCheck, self).__init__(
for_log, message_type, sources, 'received_frame_index',
[[0, _FRAME_MISSING_LIMIT]], [[0, None]], frame_wraparound)
class DuplicateFrameCheck(BaseMissingFrameCheck):
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources,
frame_wraparound=tether_message.TETHER_FRAME_INDEX_ROLLOVER):
super(DuplicateFrameCheck, self).__init__(
for_log, message_type, sources, 'frame_index',
[[1, None]], [[1, None]], frame_wraparound)
class DuplicateReceivedFrameCheck(BaseMissingFrameCheck):
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, sources,
frame_wraparound=tether_message.TETHER_FRAME_INDEX_ROLLOVER):
super(DuplicateReceivedFrameCheck, self).__init__(
for_log, message_type, sources, 'received_frame_index',
[[1, None]], [[1, None]], frame_wraparound)
class FrameUpdateIntervalCheck(BaseMessageCheck):
"""Check for the frame update rate."""
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, source,
frame_index_increment, target_frequency,
target_interval_tolerance=0.1,
frame_wraparound=tether_message.TETHER_FRAME_INDEX_ROLLOVER):
"""Check the time interval between frame updates.
Args:
for_log: True if this check is to analyze logs, not realtime AIO.
message_type: The short name of the message type.
source: The short name of the AIO node sending the message.
frame_index_increment: The increment of frame updates.
target_frequency: The expected frequency that frames got updated.
target_interval_tolerance: The factor by which frame update intervals
can deviate from the expected.
frame_wraparound: The bound of the frame index beyond which the index
wraps around from 0.
"""
self._frame_wraparound = frame_wraparound
target_interval = 1.0 / target_frequency
acceptable_deviation = target_interval * target_interval_tolerance
interval_limit = [[target_interval - acceptable_deviation,
target_interval + acceptable_deviation]]
self._frame_index_increment = frame_index_increment
super(FrameUpdateIntervalCheck, self).__init__(
for_log, message_type, [source], interval_limit, [[None, None]])
def _RegisterInputs(self):
inputs = []
for source in self._sources:
inputs.append(self._Arg(self._message_type, source, 'frame_index'))
for source in self._sources:
inputs.append(self._Arg(self._message_type, source, 'timestamp'))
return inputs
@base_check.SkipIfAnyInputIsNone
def _Check(self, frame_index, timestamps):
assert self._for_log
frame_index = frame_index.astype(int)
frame_index = log_util.UnwrapSequence(frame_index, self._frame_wraparound)
frame_index_diff = numpy.diff(frame_index)
frame_update_selection = (frame_index_diff > 0)
update_timestamps = timestamps[1:][frame_update_selection]
self._CheckByRange('Frame Ever Updated (%s)' % self._name,
numpy.array([update_timestamps.size]), [[1, None]],
[[1, None]])
if update_timestamps.size > 0:
frame_increment_selection = (
frame_index_diff == self._frame_index_increment)
# A bit mask for all the frame updates, where 1 means frame increments
# without loosing any in between.
frame_increment_indices = (
frame_increment_selection[frame_update_selection])
update_timestamp_diff = numpy.diff(update_timestamps)
update_timestamp_diff = update_timestamp_diff[frame_increment_indices[1:]]
self._CheckByRange(self._name,
update_timestamp_diff, self._normal_ranges,
self._warning_ranges)
class TetherUpChecks(base_check.ListOfChecks):
"""The GPS checklist."""
def __init__(self, for_log):
self._items_to_check = []
groups = [
{
'message': 'TetherUp',
'sources': ['CsGsA'],
},
{
'message': 'TetherUp',
'sources': ['CsGsB'],
},
]
for group in groups:
self._items_to_check += [
MessageIntervalCheck(for_log, group['message'], group['sources']),
MissingFrameCheck(for_log, group['message'], group['sources']),
DuplicateFrameCheck(for_log, group['message'], group['sources']),
]
class TetherDownChecks(base_check.ListOfChecks):
"""The GPS checklist."""
def __init__(self, for_log):
self._items_to_check = []
groups = [
{
'message': 'TetherDown',
'sources': ['CsA'],
},
{
'message': 'TetherDown',
'sources': ['CsGsA'],
},
{
'message': 'TetherDown',
'sources': ['CsB'],
},
]
for group in groups:
self._items_to_check += [
MessageIntervalCheck(for_log, group['message'], group['sources']),
MissingFrameCheck(for_log, group['message'], group['sources']),
DuplicateFrameCheck(for_log, group['message'], group['sources']),
]
if group['message'] == 'TetherDown':
self._items_to_check.append(
MissingReceivedFrameCheck(
for_log, group['message'], group['sources']))
groups = [
{
'message': 'TetherDown',
'sources': ['CsGsA'],
},
]
normal_ranges = [[-92, None]]
warning_ranges = [[-112, None]]
for group in groups:
self._items_to_check += [
base_check.FieldRangeCheck(
for_log, group['message'], group['sources'],
'received_signal_strength', normal_ranges, warning_ranges),
base_check.FieldRangeCheck(
for_log, group['message'], group['sources'],
'comms_status.received_signal_strength',
normal_ranges, warning_ranges),
]
class AggregatedLinkChecks(base_check.ListOfChecks):
"""The GPS checklist."""
def __init__(self, for_log):
self._items_to_check = []
groups = [
{
'message': 'TetherDown',
'sources': ['CsA', 'CsGsA', 'CsB'],
},
{
'message': 'TetherUp',
'sources': ['CsA', 'CsGsA', 'CsGsB'],
},
]
for group in groups:
self._items_to_check += [
MessageIntervalCheck(for_log, group['message'], group['sources']),
MissingFrameCheck(for_log, group['message'], group['sources']),
]
class FrameUpdateRateChecks(base_check.ListOfChecks):
def __init__(self, for_log):
self._items_to_check = []
config = network_config.NetworkConfig(
os.path.join(makani.HOME, 'avionics/network/network.yaml'))
for m in config.all_messages:
if m.name == 'TetherDown':
for sender in m.all_senders:
sender_name = sender.camel_name
if sender_name.startswith('CsGs'):
self._items_to_check.append(
FrameUpdateIntervalCheck(
for_log, m.name, sender_name,
tether_message.TETHER_RADIO_DECIMATION,
m.frequency_hz / tether_message.TETHER_RADIO_DECIMATION))
else:
self._items_to_check.append(
FrameUpdateIntervalCheck(
for_log, m.name, sender_name, 1, m.frequency_hz))
elif m.name == 'TetherUp':
for sender in m.all_senders:
sender_name = sender.camel_name
if not sender_name.startswith('CsGs'):
self._items_to_check.append(
FrameUpdateIntervalCheck(
for_log, m.name, sender_name,
tether_message.TETHER_RADIO_DECIMATION,
m.frequency_hz / tether_message.TETHER_RADIO_DECIMATION))
else:
self._items_to_check.append(
FrameUpdateIntervalCheck(
for_log, m.name, sender_name, 1, m.frequency_hz))
| [
"makani.analysis.checks.base_check.FieldRangeCheck",
"os.path.join",
"numpy.diff",
"numpy.array",
"numpy.concatenate",
"makani.analysis.checks.log_util.UnwrapSequence"
] | [((2256, 2278), 'numpy.diff', 'numpy.diff', (['timestamps'], {}), '(timestamps)\n', (2266, 2278), False, 'import numpy\n'), ((3747, 3807), 'makani.analysis.checks.log_util.UnwrapSequence', 'log_util.UnwrapSequence', (['frame_index', 'self._frame_wraparound'], {}), '(frame_index, self._frame_wraparound)\n', (3770, 3807), False, 'from makani.analysis.checks import log_util\n'), ((3854, 3877), 'numpy.diff', 'numpy.diff', (['frame_index'], {}), '(frame_index)\n', (3864, 3877), False, 'import numpy\n'), ((7421, 7481), 'makani.analysis.checks.log_util.UnwrapSequence', 'log_util.UnwrapSequence', (['frame_index', 'self._frame_wraparound'], {}), '(frame_index, self._frame_wraparound)\n', (7444, 7481), False, 'from makani.analysis.checks import log_util\n'), ((7506, 7529), 'numpy.diff', 'numpy.diff', (['frame_index'], {}), '(frame_index)\n', (7516, 7529), False, 'import numpy\n'), ((2144, 2173), 'numpy.concatenate', 'numpy.concatenate', (['timestamps'], {}), '(timestamps)\n', (2161, 2173), False, 'import numpy\n'), ((3506, 3535), 'numpy.concatenate', 'numpy.concatenate', (['timestamps'], {}), '(timestamps)\n', (3523, 3535), False, 'import numpy\n'), ((3556, 3586), 'numpy.concatenate', 'numpy.concatenate', (['frame_index'], {}), '(frame_index)\n', (3573, 3586), False, 'import numpy\n'), ((7731, 7768), 'numpy.array', 'numpy.array', (['[update_timestamps.size]'], {}), '([update_timestamps.size])\n', (7742, 7768), False, 'import numpy\n'), ((8193, 8222), 'numpy.diff', 'numpy.diff', (['update_timestamps'], {}), '(update_timestamps)\n', (8203, 8222), False, 'import numpy\n'), ((11378, 11436), 'os.path.join', 'os.path.join', (['makani.HOME', '"""avionics/network/network.yaml"""'], {}), "(makani.HOME, 'avionics/network/network.yaml')\n", (11390, 11436), False, 'import os\n'), ((10247, 10381), 'makani.analysis.checks.base_check.FieldRangeCheck', 'base_check.FieldRangeCheck', (['for_log', "group['message']", "group['sources']", '"""received_signal_strength"""', 'normal_ranges', 'warning_ranges'], {}), "(for_log, group['message'], group['sources'],\n 'received_signal_strength', normal_ranges, warning_ranges)\n", (10273, 10381), False, 'from makani.analysis.checks import base_check\n'), ((10418, 10565), 'makani.analysis.checks.base_check.FieldRangeCheck', 'base_check.FieldRangeCheck', (['for_log', "group['message']", "group['sources']", '"""comms_status.received_signal_strength"""', 'normal_ranges', 'warning_ranges'], {}), "(for_log, group['message'], group['sources'],\n 'comms_status.received_signal_strength', normal_ranges, warning_ranges)\n", (10444, 10565), False, 'from makani.analysis.checks import base_check\n')] |
import glob
import pickle
import numpy as np
import cv2
class CameraCalibration:
def __init__(self, nCols = 9, nRows = 6, calibrationFiles = None):
"""Creates a camera calibration object
nCols - Number of columns
nRows - Number of rows
calibrationFiles - Files calibration based on. If None camera parameters will be loaded from a pickle binary.
"""
self._nRows = nRows
self._nCols = nCols
self._pickleFilename = 'camera_parameters.p'
if calibrationFiles != None:
self._cameraMatrix, self._distCoeffs = self._calibrate(calibrationFiles)
else:
cameraParams = pickle.load(open(self._pickleFilename, 'rb'))
self._cameraMatrix = cameraParams['cameraMatrix']
self._distCoeffs = cameraParams['distCoeffs']
def _calibrate(self, path):
"""Calibrates the camera
path - Files of image calibration based on.
Returns camera matrix and distortion coefficients
"""
coordinates = np.zeros((self._nRows * self._nCols, 3), np.float32)
coordinates[:, :2] = np.mgrid[0 : self._nCols, 0 : self._nRows].T.reshape(self._nCols * self._nRows, 2)
objectPoints = []
imagePoints = []
calibrationFiles = glob.glob(path)
for idx, filename in enumerate(calibrationFiles):
colorImage = cv2.imread(filename) # in BGR
grayscaleImage = cv2.cvtColor(colorImage, cv2.COLOR_BGR2GRAY)
retVal, corners = cv2.findChessboardCorners(grayscaleImage, (self._nCols, self._nRows))
if retVal: # it is not sure all chessboard corners are found
objectPoints.append(coordinates)
imagePoints.append(corners)
retVal, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, imagePoints, (1280, 720), None, None)
pickle.dump({'cameraMatrix': cameraMatrix, 'distCoeffs': distCoeffs}, open(self._pickleFilename, 'wb'))
return cameraMatrix, distCoeffs
def undistort(self, image):
"""Undistort an image
image - Image what will be undistorted
Returns the undistorted image
"""
return cv2.undistort(image, self._cameraMatrix, self._distCoeffs)
| [
"cv2.undistort",
"numpy.zeros",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.imread",
"glob.glob"
] | [((1054, 1106), 'numpy.zeros', 'np.zeros', (['(self._nRows * self._nCols, 3)', 'np.float32'], {}), '((self._nRows * self._nCols, 3), np.float32)\n', (1062, 1106), True, 'import numpy as np\n'), ((1297, 1312), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (1306, 1312), False, 'import glob\n'), ((1823, 1894), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objectPoints', 'imagePoints', '(1280, 720)', 'None', 'None'], {}), '(objectPoints, imagePoints, (1280, 720), None, None)\n', (1842, 1894), False, 'import cv2\n'), ((2232, 2290), 'cv2.undistort', 'cv2.undistort', (['image', 'self._cameraMatrix', 'self._distCoeffs'], {}), '(image, self._cameraMatrix, self._distCoeffs)\n', (2245, 2290), False, 'import cv2\n'), ((1396, 1416), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1406, 1416), False, 'import cv2\n'), ((1455, 1499), 'cv2.cvtColor', 'cv2.cvtColor', (['colorImage', 'cv2.COLOR_BGR2GRAY'], {}), '(colorImage, cv2.COLOR_BGR2GRAY)\n', (1467, 1499), False, 'import cv2\n'), ((1530, 1599), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['grayscaleImage', '(self._nCols, self._nRows)'], {}), '(grayscaleImage, (self._nCols, self._nRows))\n', (1555, 1599), False, 'import cv2\n')] |
import pandas as pd
from sklearn.cluster import MeanShift
from sklearn.metrics import homogeneity_completeness_v_measure, fowlkes_mallows_score, adjusted_mutual_info_score
from sklearn.preprocessing import LabelEncoder
import numpy as np
from collections import Counter
from easydl import prepare_path
KEY_CLUSTERS = 'clusters'
KEY_CLASSES = 'classes'
KEY_AMI_SCORE = 'adjusted_mutual_info_score'
KEY_FM_SCORE = 'fowlkes_mallows_score'
KEY_PRECISION = 'precision'
KEY_PURERATE = 'pure_rate'
KEY_CLASS_TO_CLUSTER_RATIO = 'class_to_cluster_raio'
KEY_PURE_RATE_TIMES_CCRATIO = 'pure_times_class_cluster_ratio'
def evaluate_clustering_with_labels(ytrue, ycluster):
true_label_encoder = LabelEncoder()
cluster_label_encoder = LabelEncoder()
ytrue_int = true_label_encoder.fit_transform(ytrue)
ycluster_int = cluster_label_encoder.fit_transform(ycluster)
result = {}
result['clusters'] = len(set(ycluster_int))
result['classes'] = len(set(ytrue_int))
result['adjusted_mutual_info_score'] = adjusted_mutual_info_score(ytrue_int, ycluster_int)
result['fowlkes_mallows_score'] = fowlkes_mallows_score(ytrue_int, ycluster_int)
num_correct = 0
pure_count = 0
prediction = {}
for k, v in zip(ycluster_int, ytrue_int):
if k not in prediction:
prediction[k] = Counter()
prediction[k].update([v])
for k, c in prediction.items():
num_correct += c.most_common(1)[0][1]
if sum(c.values()) == c.most_common(1)[0][1]:
pure_count += sum(c.values())
result['precision'] = num_correct / len(ytrue_int)
result['pure_rate'] = pure_count / len(ytrue_int)
result['class_to_cluster_raio'] = len(set(ytrue_int)) / len(set(ycluster_int))
result['pure_times_class_cluster_ratio'] = result['pure_rate'] * result['class_to_cluster_raio']
return result
def tune_mean_shift(x, y, bandwidth_range=None, save_path='meanshift-tune.csv', disable_tqdm=False):
"""
x, y are testing set
"""
from tqdm import tqdm
rows = []
if bandwidth_range is None:
bandwidth_range = np.arange(0.05, 1, 0.05)
for bandwidth in tqdm(bandwidth_range, disable=disable_tqdm):
cls = MeanShift(bandwidth=bandwidth)
ypred = cls.fit_predict(x)
metrics1 = evaluate_clustering_with_labels(y, ypred)
row = {'bandwidth': bandwidth}
row.update(metrics1)
rows.append(row)
rows = pd.DataFrame(rows)
prepare_path(save_path)
rows.to_csv(save_path, index=False) | [
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.adjusted_mutual_info_score",
"tqdm.tqdm",
"collections.Counter",
"sklearn.metrics.fowlkes_mallows_score",
"pandas.DataFrame",
"sklearn.cluster.MeanShift",
"numpy.arange",
"easydl.prepare_path"
] | [((689, 703), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (701, 703), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((732, 746), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (744, 746), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1021, 1072), 'sklearn.metrics.adjusted_mutual_info_score', 'adjusted_mutual_info_score', (['ytrue_int', 'ycluster_int'], {}), '(ytrue_int, ycluster_int)\n', (1047, 1072), False, 'from sklearn.metrics import homogeneity_completeness_v_measure, fowlkes_mallows_score, adjusted_mutual_info_score\n'), ((1111, 1157), 'sklearn.metrics.fowlkes_mallows_score', 'fowlkes_mallows_score', (['ytrue_int', 'ycluster_int'], {}), '(ytrue_int, ycluster_int)\n', (1132, 1157), False, 'from sklearn.metrics import homogeneity_completeness_v_measure, fowlkes_mallows_score, adjusted_mutual_info_score\n'), ((2149, 2192), 'tqdm.tqdm', 'tqdm', (['bandwidth_range'], {'disable': 'disable_tqdm'}), '(bandwidth_range, disable=disable_tqdm)\n', (2153, 2192), False, 'from tqdm import tqdm\n'), ((2442, 2460), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (2454, 2460), True, 'import pandas as pd\n'), ((2465, 2488), 'easydl.prepare_path', 'prepare_path', (['save_path'], {}), '(save_path)\n', (2477, 2488), False, 'from easydl import prepare_path\n'), ((2102, 2126), 'numpy.arange', 'np.arange', (['(0.05)', '(1)', '(0.05)'], {}), '(0.05, 1, 0.05)\n', (2111, 2126), True, 'import numpy as np\n'), ((2208, 2238), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth'}), '(bandwidth=bandwidth)\n', (2217, 2238), False, 'from sklearn.cluster import MeanShift\n'), ((1324, 1333), 'collections.Counter', 'Counter', ([], {}), '()\n', (1331, 1333), False, 'from collections import Counter\n')] |
import numba
import torch
import numpy as np
from .common import check_numpy_to_torch
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def camera_to_lidar(points, r_rect, velo2cam):
pts = np.concatenate(
[points[:, :3], np.ones([points.shape[0], 1])], axis=1)
pts = pts @ np.linalg.inv((r_rect @ velo2cam).T)
points[:, :3] = pts[:, :3]
return points
def lidar_to_camera(points, r_rect, velo2cam):
pts = np.concatenate(
[points[:, :3], np.ones([points.shape[0], 1])], axis=1)
pts = pts @ (r_rect @ velo2cam).T
points[:, :3] = pts[:, :3]
return points
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def projection_matrix_to_CRT_kitti(P):
"""
将投影矩阵P利用QR分解分解出摄像机内外参数
输入:
P:投影矩阵,3*4
输出:
K:内参数矩阵,3*3
R:旋转矩阵,3*3
T:平移向量,3*1
"""
# P = K @ [R|T]
# K is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = P[0:3, 0:3]
CT = P[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
K = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return K, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] *
4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate(
[near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def points_in_convex_polygon_3d_jit(points, polygon_surfaces, num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jitv2(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(
points, polygon_surfaces, normal_vec, d, num_surfaces
)
@numba.njit
def surface_equ_3d_jitv2(surfaces):
# polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3]
num_polygon = surfaces.shape[0]
max_num_surfaces = surfaces.shape[1]
normal_vec = np.zeros((num_polygon, max_num_surfaces, 3), dtype=surfaces.dtype)
d = np.zeros((num_polygon, max_num_surfaces), dtype=surfaces.dtype)
sv0 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
sv1 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
for i in range(num_polygon):
for j in range(max_num_surfaces):
sv0[0] = surfaces[i, j, 0, 0] - surfaces[i, j, 1, 0]
sv0[1] = surfaces[i, j, 0, 1] - surfaces[i, j, 1, 1]
sv0[2] = surfaces[i, j, 0, 2] - surfaces[i, j, 1, 2]
sv1[0] = surfaces[i, j, 1, 0] - surfaces[i, j, 2, 0]
sv1[1] = surfaces[i, j, 1, 1] - surfaces[i, j, 2, 1]
sv1[2] = surfaces[i, j, 1, 2] - surfaces[i, j, 2, 2]
normal_vec[i, j, 0] = sv0[1] * sv1[2] - sv0[2] * sv1[1]
normal_vec[i, j, 1] = sv0[2] * sv1[0] - sv0[0] * sv1[2]
normal_vec[i, j, 2] = sv0[0] * sv1[1] - sv0[1] * sv1[0]
d[i, j] = (
-surfaces[i, j, 0, 0] * normal_vec[i, j, 0]
- surfaces[i, j, 0, 1] * normal_vec[i, j, 1]
- surfaces[i, j, 0, 2] * normal_vec[i, j, 2]
)
return normal_vec, d
@numba.njit
def _points_in_convex_polygon_3d_jit(
points, polygon_surfaces, normal_vec, d, num_surfaces=None
):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = (
points[i, 0] * normal_vec[j, k, 0]
+ points[i, 1] * normal_vec[j, k, 1]
+ points[i, 2] * normal_vec[j, k, 2]
+ d[j, k]
)
if sign >= 0:
ret[i, j] = False
break
return ret
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
| [
"numpy.linalg.qr",
"numpy.ones",
"torch.floor",
"torch.sin",
"torch.stack",
"numpy.array",
"torch.cos",
"numba.jit",
"torch.matmul",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.zeros",
"numpy.full",
"torch.cat"
] | [((3339, 3363), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3348, 3363), False, 'import numba\n'), ((570, 586), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (579, 586), False, 'import torch\n'), ((598, 614), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (607, 614), False, 'import torch\n'), ((871, 914), 'torch.matmul', 'torch.matmul', (['points[:, :, 0:3]', 'rot_matrix'], {}), '(points[:, :, 0:3], rot_matrix)\n', (883, 914), False, 'import torch\n'), ((932, 981), 'torch.cat', 'torch.cat', (['(points_rot, points[:, :, 3:])'], {'dim': '(-1)'}), '((points_rot, points[:, :, 3:]), dim=-1)\n', (941, 981), False, 'import torch\n'), ((1722, 1769), 'numpy.concatenate', 'np.concatenate', (['[xyz_lidar, w, l, h, r]'], {'axis': '(1)'}), '([xyz_lidar, w, l, h, r], axis=1)\n', (1736, 1769), True, 'import numpy as np\n'), ((1992, 2033), 'numpy.concatenate', 'np.concatenate', (['[xyz, l, h, w, r]'], {'axis': '(1)'}), '([xyz, l, h, w, r], axis=1)\n', (2006, 2033), True, 'import numpy as np\n'), ((2410, 2427), 'numpy.linalg.inv', 'np.linalg.inv', (['CR'], {}), '(CR)\n', (2423, 2427), True, 'import numpy as np\n'), ((2445, 2467), 'numpy.linalg.qr', 'np.linalg.qr', (['RinvCinv'], {}), '(RinvCinv)\n', (2457, 2467), True, 'import numpy as np\n'), ((2476, 2495), 'numpy.linalg.inv', 'np.linalg.inv', (['Cinv'], {}), '(Cinv)\n', (2489, 2495), True, 'import numpy as np\n'), ((2504, 2523), 'numpy.linalg.inv', 'np.linalg.inv', (['Rinv'], {}), '(Rinv)\n', (2517, 2523), True, 'import numpy as np\n'), ((2833, 2919), 'numpy.array', 'np.array', (['[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]]'], {'dtype': 'C.dtype'}), '([[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.\n dtype)\n', (2841, 2919), True, 'import numpy as np\n'), ((3181, 3240), 'numpy.concatenate', 'np.concatenate', (['[near_box_corners, far_box_corners]'], {'axis': '(0)'}), '([near_box_corners, far_box_corners], axis=0)\n', (3195, 3240), True, 'import numpy as np\n'), ((3274, 3316), 'numpy.concatenate', 'np.concatenate', (['[ret_xy, z_points]'], {'axis': '(1)'}), '([ret_xy, z_points], axis=1)\n', (3288, 3316), True, 'import numpy as np\n'), ((3777, 3828), 'numpy.zeros', 'np.zeros', (['(num_boxes, 6, 4, 3)'], {'dtype': 'corners.dtype'}), '((num_boxes, 6, 4, 3), dtype=corners.dtype)\n', (3785, 3828), True, 'import numpy as np\n'), ((5467, 5533), 'numpy.zeros', 'np.zeros', (['(num_polygon, max_num_surfaces, 3)'], {'dtype': 'surfaces.dtype'}), '((num_polygon, max_num_surfaces, 3), dtype=surfaces.dtype)\n', (5475, 5533), True, 'import numpy as np\n'), ((5542, 5605), 'numpy.zeros', 'np.zeros', (['(num_polygon, max_num_surfaces)'], {'dtype': 'surfaces.dtype'}), '((num_polygon, max_num_surfaces), dtype=surfaces.dtype)\n', (5550, 5605), True, 'import numpy as np\n'), ((7389, 7440), 'numpy.ones', 'np.ones', (['(num_points, num_polygons)'], {'dtype': 'np.bool_'}), '((num_points, num_polygons), dtype=np.bool_)\n', (7396, 7440), True, 'import numpy as np\n'), ((1194, 1230), 'numpy.linalg.inv', 'np.linalg.inv', (['(r_rect @ velo2cam).T'], {}), '((r_rect @ velo2cam).T)\n', (1207, 1230), True, 'import numpy as np\n'), ((2699, 2756), 'numpy.array', 'np.array', (['([near_clip] * 4 + [far_clip] * 4)'], {'dtype': 'C.dtype'}), '([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)\n', (2707, 2756), True, 'import numpy as np\n'), ((2975, 3035), 'numpy.array', 'np.array', (['[fku / near_clip, -fkv / near_clip]'], {'dtype': 'C.dtype'}), '([fku / near_clip, -fkv / near_clip], dtype=C.dtype)\n', (2983, 3035), True, 'import numpy as np\n'), ((3095, 3153), 'numpy.array', 'np.array', (['[fku / far_clip, -fkv / far_clip]'], {'dtype': 'C.dtype'}), '([fku / far_clip, -fkv / far_clip], dtype=C.dtype)\n', (3103, 3153), True, 'import numpy as np\n'), ((4916, 4965), 'numpy.full', 'np.full', (['(num_polygons,)', '(9999999)'], {'dtype': 'np.int64'}), '((num_polygons,), 9999999, dtype=np.int64)\n', (4923, 4965), True, 'import numpy as np\n'), ((8517, 8533), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (8530, 8533), True, 'import numpy as np\n'), ((198, 232), 'torch.floor', 'torch.floor', (['(val / period + offset)'], {}), '(val / period + offset)\n', (209, 232), False, 'import torch\n'), ((1138, 1167), 'numpy.ones', 'np.ones', (['[points.shape[0], 1]'], {}), '([points.shape[0], 1])\n', (1145, 1167), True, 'import numpy as np\n'), ((1379, 1408), 'numpy.ones', 'np.ones', (['[points.shape[0], 1]'], {}), '([points.shape[0], 1])\n', (1386, 1408), True, 'import numpy as np\n'), ((3848, 3934), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]'], {}), '([0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2,\n 6, 7])\n', (3856, 3934), True, 'import numpy as np\n'), ((720, 799), 'torch.stack', 'torch.stack', (['(cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones)'], {'dim': '(1)'}), '((cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones), dim=1)\n', (731, 799), False, 'import torch\n')] |
from typing import List
import yolo3_one_file_to_detect_them_all as yolo
from tensorflow.keras.models import load_model
import numpy as np
from numpy import expand_dims
from PIL import Image
from keras.preprocessing.image import img_to_array
from matplotlib import pyplot
from video_frame import VideoFrame
from bounding_box import BoundingBox
class PeopleDetector:
"""
People detection with YOLO.
Attributes
----------
model : keras model
YOLOv3
"""
def __init__(self):
self.model = load_model('model.h5')
def load_image_pixels(self, image, shape = (512,512)):
""" kép + modell számára elvárt méret -> előfeldolgozott kép + eredeti méret """
height, width, _= image.shape
image = Image.fromarray(image)
image = image.resize(shape)
image = img_to_array(image)
image = image.astype('float32')
image /= 255.0
image = expand_dims(image, 0)
return image, width, height
def get_boxes(self, boxes, labels, thresh):
""" Minden dobozra minden címkét letesztel, egy dobozra akár többet is """
v_boxes, v_labels, v_scores = list(), list(), list()
for box in boxes:
for i in range(len(labels)):
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
return v_boxes, v_labels, v_scores
def correct_yolo_boxes(self, boxes, image_h, image_w, net_h, net_w):
""" Itt átírtam az eredetit mert az nem ment """
for i in range(len(boxes)):
boxes[i].xmin = int(boxes[i].xmin * image_w)
boxes[i].xmax = int(boxes[i].xmax * image_w)
boxes[i].ymin = int(boxes[i].ymin * image_h)
boxes[i].ymax = int(boxes[i].ymax * image_h)
def detect(self, frame:VideoFrame, input_w = 256, input_h = 256, class_threshold = 0.6, labels = ["person"], anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]):
"""
Bemeneti parméterek:
input_w/h: modell bemeneti mérete
class_treshold: ennyi konfidencia felett tartjuk meg a jelölt osztályokat
labels: ezeket ismeri fel (be lehet rakni csomó mindent, fun)
anchors: ezek alapján képzi le a BB-ket
Feldolgozás lépései:
1. Kép betöltése, előfeldolgozása
2. Modell futtatása
3. BoundigBox-ok előállítása
4. BB méret korrekció
5. átfedések kezelése
4. BB címkézése
Kimenet:
boxes: befoglaló doboz
labels: predikált osztály (nálunk ugye ez mindig person lesz, ezért kivehető akár)
scores: ~konfidencia
"""
image, image_w, image_h = self.load_image_pixels(frame,(input_w, input_h))
yhat = self.model.predict(image)
boxes = list()
for i in range(len(yhat)):
boxes += yolo.decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w)
self.correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w)
yolo.do_nms(boxes, 0.5)
boxes, labels, scores = self.get_boxes(boxes, labels, class_threshold)
ret_boxes = []
for box in boxes:
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
width, height = x2 - x1, y2 - y1
ret_boxes.append(BoundingBox(x1,y1,width,height))
return ret_boxes, scores
| [
"keras.preprocessing.image.img_to_array",
"PIL.Image.fromarray",
"yolo3_one_file_to_detect_them_all.decode_netout",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"bounding_box.BoundingBox",
"yolo3_one_file_to_detect_them_all.do_nms"
] | [((560, 582), 'tensorflow.keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (570, 582), False, 'from tensorflow.keras.models import load_model\n'), ((793, 815), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (808, 815), False, 'from PIL import Image\n'), ((870, 889), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (882, 889), False, 'from keras.preprocessing.image import img_to_array\n'), ((972, 993), 'numpy.expand_dims', 'expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (983, 993), False, 'from numpy import expand_dims\n'), ((3398, 3421), 'yolo3_one_file_to_detect_them_all.do_nms', 'yolo.do_nms', (['boxes', '(0.5)'], {}), '(boxes, 0.5)\n', (3409, 3421), True, 'import yolo3_one_file_to_detect_them_all as yolo\n'), ((3219, 3296), 'yolo3_one_file_to_detect_them_all.decode_netout', 'yolo.decode_netout', (['yhat[i][0]', 'anchors[i]', 'class_threshold', 'input_h', 'input_w'], {}), '(yhat[i][0], anchors[i], class_threshold, input_h, input_w)\n', (3237, 3296), True, 'import yolo3_one_file_to_detect_them_all as yolo\n'), ((3710, 3744), 'bounding_box.BoundingBox', 'BoundingBox', (['x1', 'y1', 'width', 'height'], {}), '(x1, y1, width, height)\n', (3721, 3744), False, 'from bounding_box import BoundingBox\n')] |
import re
from logging import getLogger
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
from youtube_stat.config import Config
from youtube_stat.data.processor import DataProcessor
from youtube_stat.lib.datetime_util import parse_date_str
from youtube_stat.lib.file_util import load_json_from_file
logger = getLogger(__name__)
class Analyser:
def __init__(self, config: Config):
self.config = config
def start(self):
dp = DataProcessor(self.config)
df = dp.load_training_data()
words = load_json_from_file(dp.word_index_path)
for w, i in list(words.items()):
words[f"w{i:03d}"] = w
self.plot_distribution(df)
self.plot_group_distribution(dp)
self.analyze("log_view", df, np.log(df.view), words)
self.analyze("like_rate", df, df.like / df.view, words)
def plot_distribution(self, df):
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 6))
sns.distplot(df.view, kde=False, ax=axes[0, 0])
axes[0, 0].set_title("View Count Distribution")
sns.distplot(df.like, kde=False, ax=axes[0, 1])
axes[0, 1].set_title("Like Count Distribution")
sns.distplot(df.dislike, kde=False, ax=axes[1, 0])
axes[1, 0].set_title("Dislike Count Distribution")
sns.distplot(df.comment, kde=False, ax=axes[1, 1])
axes[1, 1].set_title("Comment Count Distribution")
plt.subplots_adjust(hspace=0.4)
fig.savefig(f"{self.config.resource.working_dir}/{self.config.resource.summary_dist_graph_name}")
###
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 3))
sns.distplot(np.log(df.view), kde=False, ax=axes[0])
axes[0].set_title("Log(View Count) Distribution")
sns.distplot(df.like / df.view, kde=False, ax=axes[1])
axes[1].set_title("Like/View Rate Distribution")
fig.savefig(f"{self.config.resource.working_dir}/{self.config.resource.target_dist_graph_name}")
def plot_group_distribution(self, dp: DataProcessor):
bdf = dp.load_basic_data()
bdf['month'] = bdf.apply(lambda r: parse_date_str(r.date).strftime("%Y-%m"), axis=1)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))
sns.boxplot(bdf.month, bdf.view, order=sorted(bdf.month.unique()), ax=ax)
ax.set_title("view by month")
fig.savefig(f"{self.config.resource.working_dir}/view_by_month.png")
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))
sns.boxplot(bdf.wday, bdf.view, order=sorted(bdf.wday.unique()), ax=ax)
ax.set_title("view by weekday(0=Mon ~ 6=Sun")
fig.savefig(f"{self.config.resource.working_dir}/view_by_weekday.png")
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))
sns.boxplot(bdf.month, bdf.like / bdf.view, order=sorted(bdf.month.unique()), ax=ax)
ax.set_title("like rate by month")
fig.savefig(f"{self.config.resource.working_dir}/like_rate_by_month.png")
def analyze(self, name, df, df_y, words):
template = open(self.config.resource.resource_dir / self.config.resource.summary_template_name, "rt").read()
params = {"name": name}
x_cols = [x for x in df.columns if re.search(r"^(201|Mon|Tue|Wed|Thr|Fri|Sat|Sun|w)", x)]
df_x = df.loc[:, x_cols]
df_x = sm.add_constant(df_x, prepend=False)
model = smf.OLS(df_y, df_x, hasconst=True)
result = model.fit()
summary = result.summary2()
params["stat1"] = summary.tables[0].to_html(header=False, index=False)
params["stat2"] = summary.tables[2].to_html(header=False, index=False)
coef_df = summary.tables[1]
cf = coef_df[coef_df['P>|t|'] < 0.1].loc[:, "Coef."]
wdf = {}
ddf = {}
for k, v in dict(cf).items():
if k in words:
wdf[words[k]] = v
else:
ddf[k] = v
import_vars = pd.DataFrame(list(sorted([[k, v] for k, v in wdf.items()], key=lambda x: x[1])))
import_vars.columns = ["word", "Coef"]
coef_df.index = [words.get(x, x) for x in coef_df.index]
params['coef_table'] = coef_df.round(3).to_html()
params['important_table'] = import_vars.round(3).to_html(header=True, index=False)
with open(f"{self.config.resource.working_dir}/{name}_summary.html", "wt") as f:
f.write(template % params)
| [
"logging.getLogger",
"seaborn.distplot",
"youtube_stat.lib.file_util.load_json_from_file",
"youtube_stat.data.processor.DataProcessor",
"numpy.log",
"statsmodels.formula.api.OLS",
"statsmodels.api.add_constant",
"youtube_stat.lib.datetime_util.parse_date_str",
"matplotlib.pyplot.subplots",
"matplo... | [((422, 441), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (431, 441), False, 'from logging import getLogger\n'), ((564, 590), 'youtube_stat.data.processor.DataProcessor', 'DataProcessor', (['self.config'], {}), '(self.config)\n', (577, 590), False, 'from youtube_stat.data.processor import DataProcessor\n'), ((645, 684), 'youtube_stat.lib.file_util.load_json_from_file', 'load_json_from_file', (['dp.word_index_path'], {}), '(dp.word_index_path)\n', (664, 684), False, 'from youtube_stat.lib.file_util import load_json_from_file\n'), ((1021, 1068), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(12, 6)'}), '(nrows=2, ncols=2, figsize=(12, 6))\n', (1033, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1125), 'seaborn.distplot', 'sns.distplot', (['df.view'], {'kde': '(False)', 'ax': 'axes[0, 0]'}), '(df.view, kde=False, ax=axes[0, 0])\n', (1090, 1125), True, 'import seaborn as sns\n'), ((1191, 1238), 'seaborn.distplot', 'sns.distplot', (['df.like'], {'kde': '(False)', 'ax': 'axes[0, 1]'}), '(df.like, kde=False, ax=axes[0, 1])\n', (1203, 1238), True, 'import seaborn as sns\n'), ((1304, 1354), 'seaborn.distplot', 'sns.distplot', (['df.dislike'], {'kde': '(False)', 'ax': 'axes[1, 0]'}), '(df.dislike, kde=False, ax=axes[1, 0])\n', (1316, 1354), True, 'import seaborn as sns\n'), ((1423, 1473), 'seaborn.distplot', 'sns.distplot', (['df.comment'], {'kde': '(False)', 'ax': 'axes[1, 1]'}), '(df.comment, kde=False, ax=axes[1, 1])\n', (1435, 1473), True, 'import seaborn as sns\n'), ((1542, 1573), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.4)'}), '(hspace=0.4)\n', (1561, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1761), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 3)'}), '(nrows=1, ncols=2, figsize=(12, 3))\n', (1726, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1945), 'seaborn.distplot', 'sns.distplot', (['(df.like / df.view)'], {'kde': '(False)', 'ax': 'axes[1]'}), '(df.like / df.view, kde=False, ax=axes[1])\n', (1903, 1945), True, 'import seaborn as sns\n'), ((2314, 2361), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(15, 5)'}), '(nrows=1, ncols=1, figsize=(15, 5))\n', (2326, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2625), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(15, 5)'}), '(nrows=1, ncols=1, figsize=(15, 5))\n', (2590, 2625), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2905), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(15, 5)'}), '(nrows=1, ncols=1, figsize=(15, 5))\n', (2870, 2905), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3504), 'statsmodels.api.add_constant', 'sm.add_constant', (['df_x'], {'prepend': '(False)'}), '(df_x, prepend=False)\n', (3483, 3504), True, 'import statsmodels.api as sm\n'), ((3521, 3555), 'statsmodels.formula.api.OLS', 'smf.OLS', (['df_y', 'df_x'], {'hasconst': '(True)'}), '(df_y, df_x, hasconst=True)\n', (3528, 3555), True, 'import statsmodels.formula.api as smf\n'), ((875, 890), 'numpy.log', 'np.log', (['df.view'], {}), '(df.view)\n', (881, 890), True, 'import numpy as np\n'), ((1784, 1799), 'numpy.log', 'np.log', (['df.view'], {}), '(df.view)\n', (1790, 1799), True, 'import numpy as np\n'), ((3364, 3416), 're.search', 're.search', (['"""^(201|Mon|Tue|Wed|Thr|Fri|Sat|Sun|w)"""', 'x'], {}), "('^(201|Mon|Tue|Wed|Thr|Fri|Sat|Sun|w)', x)\n", (3373, 3416), False, 'import re\n'), ((2245, 2267), 'youtube_stat.lib.datetime_util.parse_date_str', 'parse_date_str', (['r.date'], {}), '(r.date)\n', (2259, 2267), False, 'from youtube_stat.lib.datetime_util import parse_date_str\n')] |
import os
import sys
import numpy as np
from skimage import io
from skimage import transform as transf
import tensorflow as tf
import time
VGG_MEAN = [103.939, 116.779, 123.68]
class Zoomout_Vgg16:
def __init__(self, vgg16_npy_path=None,zlayers=["conv1_1","conv1_2","conv2_1","conv2_2","conv3_1","conv3_2","conv3_3","conv4_1","conv4_2","conv4_3","conv5_1","conv5_2","conv5_3","relu6","relu7"],downsample=4,weight=224,height=224,deconv_layer="pool5"):
self.zlayers = zlayers
self.zlayers_num = len(self.zlayers)
self.net = {}
self.strides={"conv1_1":1,"conv1_2":1,"pool1":2,
"conv2_1":2,"conv2_2":2,"pool2":4,
"conv3_1":4,"conv3_2":4,"conv3_3":4,"pool3":8,
"conv4_1":8,"conv4_2":8,"conv4_3":8,"pool4":16,
"conv5_1":16,"conv5_2":16,"conv5_3":16,"pool5":32,
}
self.channels={"conv1_1":64,"conv1_2":64,"pool1":64,
"conv2_1":128,"conv2_2":128,"pool2":128,
"conv3_1":256,"conv3_2":256,"conv3_3":256,"pool3":256,
"conv4_1":512,"conv4_2":512,"conv4_3":512,"pool4":512,
"conv5_1":512,"conv5_2":512,"conv5_3":512,"pool5":512,
}
self.downsample = downsample
self.w = weight
self.h = height
self.w_d = int(weight / downsample)
self.h_d = int(height / downsample)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
print("npy file loaded")
self.build()
self.net["input_deconv"] = tf.placeholder(shape=[1,int(224/self.strides[deconv_layer]),int(224/self.strides[deconv_layer]),self.channels[deconv_layer]],dtype=tf.float32)
self.net["output_deconv"] = self.build_deconv(this_layer=deconv_layer,feature_maps=self.net["input_deconv"])
def build(self):
self.net["input"] = tf.placeholder(shape=[1,self.w,self.h,3],dtype=tf.float32)
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=self.net["input"])
assert red.get_shape().as_list()[1:] == [self.w, self.h, 1]
assert green.get_shape().as_list()[1:] == [self.w, self.h, 1]
assert blue.get_shape().as_list()[1:] == [self.w, self.h, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [self.w, self.h, 3]
self.net["conv1_1"] = self.conv_layer(bgr, "conv1_1")
self.net["conv1_2"] = self.conv_layer(self.net["conv1_1"], "conv1_2")
self.net["pool1"] = self.max_pool(self.net["conv1_2"], 'pool1')
tmp = tf.tile(self.net["pool1"],[1,1,2,2])
tmp = tf.reshape(tmp,self.net["conv1_2"].shape)
self.net["pool1_mask"] = tf.cast(tf.greater_equal(self.net["conv1_2"],tmp),dtype=tf.float32)
self.net["conv2_1"] = self.conv_layer(self.net["pool1"], "conv2_1")
self.net["conv2_2"] = self.conv_layer(self.net["conv2_1"], "conv2_2")
self.net["pool2"] = self.max_pool(self.net["conv2_2"], 'pool2')
tmp = tf.tile(self.net["pool2"],[1,1,2,2])
tmp = tf.reshape(tmp,self.net["conv2_2"].shape)
self.net["pool2_mask"] = tf.cast(tf.greater_equal(self.net["conv2_2"],tmp),dtype=tf.float32)
self.net["conv3_1"] = self.conv_layer(self.net["pool2"], "conv3_1")
self.net["conv3_2"] = self.conv_layer(self.net["conv3_1"], "conv3_2")
self.net["conv3_3"] = self.conv_layer(self.net["conv3_2"], "conv3_3")
self.net["pool3"] = self.max_pool(self.net["conv3_3"], 'pool3')
tmp = tf.tile(self.net["pool3"],[1,1,2,2])
tmp = tf.reshape(tmp,self.net["conv3_3"].shape)
self.net["pool3_mask"] = tf.cast(tf.greater_equal(self.net["conv3_3"],tmp),dtype=tf.float32)
self.net["conv4_1"] = self.conv_layer(self.net["pool3"], "conv4_1")
self.net["conv4_2"] = self.conv_layer(self.net["conv4_1"], "conv4_2")
self.net["conv4_3"] = self.conv_layer(self.net["conv4_2"], "conv4_3")
self.net["pool4"] = self.max_pool(self.net["conv4_3"], 'pool4')
tmp = tf.tile(self.net["pool4"],[1,1,2,2])
tmp = tf.reshape(tmp,self.net["conv4_3"].shape)
self.net["pool4_mask"] = tf.cast(tf.greater_equal(self.net["conv4_3"],tmp),dtype=tf.float32)
self.net["conv5_1"] = self.conv_layer(self.net["pool4"], "conv5_1")
self.net["conv5_2"] = self.conv_layer(self.net["conv5_1"], "conv5_2")
self.net["conv5_3"] = self.conv_layer(self.net["conv5_2"], "conv5_3")
self.net["pool5"] = self.max_pool(self.net["conv5_3"], 'pool5')
tmp = tf.tile(self.net["pool5"],[1,1,2,2])
tmp = tf.reshape(tmp,self.net["conv5_3"].shape)
self.net["pool5_mask"] = tf.cast(tf.greater_equal(self.net["conv5_3"],tmp),dtype=tf.float32)
self.net["fc6"] = self.fc_layer(self.net["pool5"], "fc6")
assert self.net["fc6"].get_shape().as_list()[1:] == [4096]
self.net["relu6"] = tf.nn.relu(self.net["fc6"])
self.net["fc7"] = self.fc_layer(self.net["relu6"], "fc7")
self.net["relu7"] = tf.nn.relu(self.net["fc7"])
self.net["fc8"] = self.fc_layer(self.net["relu7"], "fc8")
self.net["output"] = tf.nn.softmax(self.net["fc8"], name="prob")
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
def build_deconv(self,this_layer="pool5",feature_maps=None):
layer_index = int(this_layer[4])
if this_layer.startswith("pool"):
if layer_index <=2: last_layer = "conv%d_2" % layer_index
else: last_layer = "conv%d_3" % layer_index
tmp = tf.tile(feature_maps,[1,1,2,2])
tmp = tf.reshape(tmp, self.net["%s_mask" % this_layer].shape)
last_layer_feature_maps = tmp*self.net["%s_mask" % this_layer]
print("last_layer:%s" % last_layer)
return self.build_deconv(last_layer,feature_maps=last_layer_feature_maps)
if this_layer.startswith("conv"):
num_of_conv_layers = layer_index <= 2 and 2 or 3
for k in range(num_of_conv_layers,0,-1):
last_layer = "conv%d_%d" % (layer_index,k)
print("last_layer:%s" % last_layer)
relu = tf.nn.relu(feature_maps)
bias = tf.nn.bias_add(relu,-1*self.get_bias(last_layer))
output_shape = [1,int(224/self.strides[last_layer]),int(224/self.strides[last_layer]),len(self.data_dict[last_layer][0][0][0])]
print("output_shape:%s" % str(output_shape))
last_layer_feature_maps = tf.nn.conv2d_transpose(relu,self.data_dict[last_layer][0],output_shape,strides=[1,1,1,1],padding="SAME")
feature_maps = last_layer_feature_maps
if layer_index == 1:
return last_layer_feature_maps
return self.build_deconv("pool%d" % (layer_index-1), feature_maps=last_layer_feature_maps)
os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[0]
if __name__ == "__main__":
deconv_layer = "pool5"
zoomout = Zoomout_Vgg16("vgg16.npy",deconv_layer=deconv_layer)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
img = io.imread("input/test.jpg")
img = transf.resize(img,(224,224))
f_ = sess.run(zoomout.net[deconv_layer],feed_dict={zoomout.net["input"]:[img]})
for i in range(zoomout.channels[deconv_layer]):
f = np.zeros([1,int(224/zoomout.strides[deconv_layer]),int(224/zoomout.strides[deconv_layer]),zoomout.channels[deconv_layer]])
#f[:,:,:,i] = f_[:,:,:,i]
max_9th_value = np.sort(f_[:,:,:,i]).flatten()[-9]
max_9th_mask = np.greater_equal(f_[:,:,:,i],max_9th_value).astype("int8")
f[:,:,:,i] = max_9th_mask * f_[:,:,:,i]
img_v = sess.run(zoomout.net["output_deconv"],feed_dict={zoomout.net["input"]:[img],zoomout.net["input_deconv"]:f})
mean = np.ones([224,224,3])
mean[:,:,0] *= VGG_MEAN[2]
mean[:,:,1] *= VGG_MEAN[1]
mean[:,:,2] *= VGG_MEAN[0]
img_v = np.reshape(img_v,[224,224,3])
img_v += mean
img_v = img_v.astype("int8")
io.imsave("output/%s_%d.png" % (deconv_layer,i),img_v)
| [
"tensorflow.tile",
"tensorflow.split",
"tensorflow.nn.softmax",
"tensorflow.nn.conv2d_transpose",
"numpy.greater_equal",
"numpy.reshape",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.sort",
"tensorflow.concat",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"numpy.ones",
"tensorflo... | [((8673, 8685), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8683, 8685), True, 'import tensorflow as tf\n'), ((8744, 8771), 'skimage.io.imread', 'io.imread', (['"""input/test.jpg"""'], {}), "('input/test.jpg')\n", (8753, 8771), False, 'from skimage import io\n'), ((8782, 8812), 'skimage.transform.resize', 'transf.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (8795, 8812), True, 'from skimage import transform as transf\n'), ((1921, 1983), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, self.w, self.h, 3]', 'dtype': 'tf.float32'}), '(shape=[1, self.w, self.h, 3], dtype=tf.float32)\n', (1935, 1983), True, 'import tensorflow as tf\n'), ((2037, 2100), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(3)', 'value': "self.net['input']"}), "(axis=3, num_or_size_splits=3, value=self.net['input'])\n", (2045, 2100), True, 'import tensorflow as tf\n'), ((2322, 2412), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[blue - VGG_MEAN[0], green - VGG_MEAN[1], red - VGG_MEAN[2]]'}), '(axis=3, values=[blue - VGG_MEAN[0], green - VGG_MEAN[1], red -\n VGG_MEAN[2]])\n', (2331, 2412), True, 'import tensorflow as tf\n'), ((2751, 2791), 'tensorflow.tile', 'tf.tile', (["self.net['pool1']", '[1, 1, 2, 2]'], {}), "(self.net['pool1'], [1, 1, 2, 2])\n", (2758, 2791), True, 'import tensorflow as tf\n'), ((2802, 2844), 'tensorflow.reshape', 'tf.reshape', (['tmp', "self.net['conv1_2'].shape"], {}), "(tmp, self.net['conv1_2'].shape)\n", (2812, 2844), True, 'import tensorflow as tf\n'), ((3186, 3226), 'tensorflow.tile', 'tf.tile', (["self.net['pool2']", '[1, 1, 2, 2]'], {}), "(self.net['pool2'], [1, 1, 2, 2])\n", (3193, 3226), True, 'import tensorflow as tf\n'), ((3237, 3279), 'tensorflow.reshape', 'tf.reshape', (['tmp', "self.net['conv2_2'].shape"], {}), "(tmp, self.net['conv2_2'].shape)\n", (3247, 3279), True, 'import tensorflow as tf\n'), ((3699, 3739), 'tensorflow.tile', 'tf.tile', (["self.net['pool3']", '[1, 1, 2, 2]'], {}), "(self.net['pool3'], [1, 1, 2, 2])\n", (3706, 3739), True, 'import tensorflow as tf\n'), ((3750, 3792), 'tensorflow.reshape', 'tf.reshape', (['tmp', "self.net['conv3_3'].shape"], {}), "(tmp, self.net['conv3_3'].shape)\n", (3760, 3792), True, 'import tensorflow as tf\n'), ((4212, 4252), 'tensorflow.tile', 'tf.tile', (["self.net['pool4']", '[1, 1, 2, 2]'], {}), "(self.net['pool4'], [1, 1, 2, 2])\n", (4219, 4252), True, 'import tensorflow as tf\n'), ((4263, 4305), 'tensorflow.reshape', 'tf.reshape', (['tmp', "self.net['conv4_3'].shape"], {}), "(tmp, self.net['conv4_3'].shape)\n", (4273, 4305), True, 'import tensorflow as tf\n'), ((4725, 4765), 'tensorflow.tile', 'tf.tile', (["self.net['pool5']", '[1, 1, 2, 2]'], {}), "(self.net['pool5'], [1, 1, 2, 2])\n", (4732, 4765), True, 'import tensorflow as tf\n'), ((4776, 4818), 'tensorflow.reshape', 'tf.reshape', (['tmp', "self.net['conv5_3'].shape"], {}), "(tmp, self.net['conv5_3'].shape)\n", (4786, 4818), True, 'import tensorflow as tf\n'), ((5081, 5108), 'tensorflow.nn.relu', 'tf.nn.relu', (["self.net['fc6']"], {}), "(self.net['fc6'])\n", (5091, 5108), True, 'import tensorflow as tf\n'), ((5204, 5231), 'tensorflow.nn.relu', 'tf.nn.relu', (["self.net['fc7']"], {}), "(self.net['fc7'])\n", (5214, 5231), True, 'import tensorflow as tf\n'), ((5329, 5372), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["self.net['fc8']"], {'name': '"""prob"""'}), "(self.net['fc8'], name='prob')\n", (5342, 5372), True, 'import tensorflow as tf\n'), ((5427, 5523), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['bottom'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': 'name'}), "(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name=name)\n", (5441, 5523), True, 'import tensorflow as tf\n'), ((5573, 5669), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['bottom'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': 'name'}), "(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name=name)\n", (5587, 5669), True, 'import tensorflow as tf\n'), ((6622, 6673), 'tensorflow.constant', 'tf.constant', (['self.data_dict[name][0]'], {'name': '"""filter"""'}), "(self.data_dict[name][0], name='filter')\n", (6633, 6673), True, 'import tensorflow as tf\n'), ((6720, 6771), 'tensorflow.constant', 'tf.constant', (['self.data_dict[name][1]'], {'name': '"""biases"""'}), "(self.data_dict[name][1], name='biases')\n", (6731, 6771), True, 'import tensorflow as tf\n'), ((6823, 6875), 'tensorflow.constant', 'tf.constant', (['self.data_dict[name][0]'], {'name': '"""weights"""'}), "(self.data_dict[name][0], name='weights')\n", (6834, 6875), True, 'import tensorflow as tf\n'), ((8699, 8732), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8730, 8732), True, 'import tensorflow as tf\n'), ((9444, 9466), 'numpy.ones', 'np.ones', (['[224, 224, 3]'], {}), '([224, 224, 3])\n', (9451, 9466), True, 'import numpy as np\n'), ((9586, 9618), 'numpy.reshape', 'np.reshape', (['img_v', '[224, 224, 3]'], {}), '(img_v, [224, 224, 3])\n', (9596, 9618), True, 'import numpy as np\n'), ((9683, 9739), 'skimage.io.imsave', 'io.imsave', (["('output/%s_%d.png' % (deconv_layer, i))", 'img_v'], {}), "('output/%s_%d.png' % (deconv_layer, i), img_v)\n", (9692, 9739), False, 'from skimage import io\n'), ((2885, 2927), 'tensorflow.greater_equal', 'tf.greater_equal', (["self.net['conv1_2']", 'tmp'], {}), "(self.net['conv1_2'], tmp)\n", (2901, 2927), True, 'import tensorflow as tf\n'), ((3320, 3362), 'tensorflow.greater_equal', 'tf.greater_equal', (["self.net['conv2_2']", 'tmp'], {}), "(self.net['conv2_2'], tmp)\n", (3336, 3362), True, 'import tensorflow as tf\n'), ((3833, 3875), 'tensorflow.greater_equal', 'tf.greater_equal', (["self.net['conv3_3']", 'tmp'], {}), "(self.net['conv3_3'], tmp)\n", (3849, 3875), True, 'import tensorflow as tf\n'), ((4346, 4388), 'tensorflow.greater_equal', 'tf.greater_equal', (["self.net['conv4_3']", 'tmp'], {}), "(self.net['conv4_3'], tmp)\n", (4362, 4388), True, 'import tensorflow as tf\n'), ((4859, 4901), 'tensorflow.greater_equal', 'tf.greater_equal', (["self.net['conv5_3']", 'tmp'], {}), "(self.net['conv5_3'], tmp)\n", (4875, 4901), True, 'import tensorflow as tf\n'), ((5719, 5742), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (5736, 5742), True, 'import tensorflow as tf\n'), ((5810, 5866), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['bottom', 'filt', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(bottom, filt, [1, 1, 1, 1], padding='SAME')\n", (5822, 5866), True, 'import tensorflow as tf\n'), ((5933, 5966), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'conv_biases'], {}), '(conv, conv_biases)\n', (5947, 5966), True, 'import tensorflow as tf\n'), ((5987, 6003), 'tensorflow.nn.relu', 'tf.nn.relu', (['bias'], {}), '(bias)\n', (5997, 6003), True, 'import tensorflow as tf\n'), ((6080, 6103), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (6097, 6103), True, 'import tensorflow as tf\n'), ((6247, 6276), 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, dim]'], {}), '(bottom, [-1, dim])\n', (6257, 6276), True, 'import tensorflow as tf\n'), ((7169, 7204), 'tensorflow.tile', 'tf.tile', (['feature_maps', '[1, 1, 2, 2]'], {}), '(feature_maps, [1, 1, 2, 2])\n', (7176, 7204), True, 'import tensorflow as tf\n'), ((7219, 7274), 'tensorflow.reshape', 'tf.reshape', (['tmp', "self.net['%s_mask' % this_layer].shape"], {}), "(tmp, self.net['%s_mask' % this_layer].shape)\n", (7229, 7274), True, 'import tensorflow as tf\n'), ((1472, 1514), 'numpy.load', 'np.load', (['vgg16_npy_path'], {'encoding': '"""latin1"""'}), "(vgg16_npy_path, encoding='latin1')\n", (1479, 1514), True, 'import numpy as np\n'), ((6515, 6536), 'tensorflow.matmul', 'tf.matmul', (['x', 'weights'], {}), '(x, weights)\n', (6524, 6536), True, 'import tensorflow as tf\n'), ((7774, 7798), 'tensorflow.nn.relu', 'tf.nn.relu', (['feature_maps'], {}), '(feature_maps)\n', (7784, 7798), True, 'import tensorflow as tf\n'), ((8119, 8234), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['relu', 'self.data_dict[last_layer][0]', 'output_shape'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(relu, self.data_dict[last_layer][0], output_shape,\n strides=[1, 1, 1, 1], padding='SAME')\n", (8141, 8234), True, 'import tensorflow as tf\n'), ((9198, 9245), 'numpy.greater_equal', 'np.greater_equal', (['f_[:, :, :, i]', 'max_9th_value'], {}), '(f_[:, :, :, i], max_9th_value)\n', (9214, 9245), True, 'import numpy as np\n'), ((9140, 9163), 'numpy.sort', 'np.sort', (['f_[:, :, :, i]'], {}), '(f_[:, :, :, i])\n', (9147, 9163), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The polarization.core test suite.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import unittest
from os.path import dirname, join
import numpy as np
from scipy import signal
import obspy
from obspy.signal import polarization, util
def _create_test_data():
"""
Test data used for some polarization tests.
:return:
"""
x = np.arange(0, 2048 / 20.0, 1.0 / 20.0)
x *= 2. * np.pi
y = np.cos(x)
tr_z = obspy.Trace(data=y)
tr_z.stats.sampling_rate = 20.
tr_z.stats.starttime = obspy.UTCDateTime('2014-03-01T00:00')
tr_z.stats.station = 'POLT'
tr_z.stats.channel = 'HHZ'
tr_z.stats.network = 'XX'
tr_n = tr_z.copy()
tr_n.data *= 2.
tr_n.stats.channel = 'HHN'
tr_e = tr_z.copy()
tr_e.stats.channel = 'HHE'
sz = obspy.Stream()
sz.append(tr_z)
sz.append(tr_n)
sz.append(tr_e)
sz.sort(reverse=True)
return sz
class PolarizationTestCase(unittest.TestCase):
"""
Test cases for polarization analysis
"""
def setUp(self):
path = join(dirname(__file__), 'data')
# setting up sliding window data
data_z = np.loadtxt(join(path, 'MBGA_Z.ASC'))
data_e = np.loadtxt(join(path, 'MBGA_E.ASC'))
data_n = np.loadtxt(join(path, 'MBGA_N.ASC'))
n = 256
fs = 75
inc = int(0.05 * fs)
self.data_win_z, self.nwin, self.no_win = \
util.enframe(data_z, signal.hamming(n), inc)
self.data_win_e, self.nwin, self.no_win = \
util.enframe(data_e, signal.hamming(n), inc)
self.data_win_n, self.nwin, self.no_win = \
util.enframe(data_n, signal.hamming(n), inc)
# global test input
self.fk = [2, 1, 0, -1, -2]
self.norm = pow(np.max(data_z), 2)
self.res = np.loadtxt(join(path, '3cssan.hy.1.MBGA_Z'))
def tearDown(self):
pass
def test_polarization(self):
"""
windowed data
"""
pol = polarization.eigval(self.data_win_e, self.data_win_n,
self.data_win_z, self.fk, self.norm)
rms = np.sqrt(np.sum((pol[0] - self.res[:, 34]) ** 2) /
np.sum(self.res[:, 34] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[1] - self.res[:, 35]) ** 2) /
np.sum(self.res[:, 35] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[2] - self.res[:, 36]) ** 2) /
np.sum(self.res[:, 36] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[3] - self.res[:, 40]) ** 2) /
np.sum(self.res[:, 40] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[4] - self.res[:, 42]) ** 2) /
np.sum(self.res[:, 42] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[5][:, 0] - self.res[:, 37]) ** 2) /
np.sum(self.res[:, 37] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[5][:, 1] - self.res[:, 38]) ** 2) /
np.sum(self.res[:, 38] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[5][:, 2] - self.res[:, 39]) ** 2) /
np.sum(self.res[:, 39] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[6] - self.res[:, 41]) ** 2) /
np.sum(self.res[:, 41] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[7] - self.res[:, 43]) ** 2) /
np.sum(self.res[:, 43] ** 2))
self.assertEqual(rms < 1.0e-5, True)
def test_polarization_1d(self):
"""
1 dimenstional input --- regression test case for bug #919
"""
pol = polarization.eigval(self.data_win_e[100, :],
self.data_win_n[100, :],
self.data_win_z[100, :],
self.fk, self.norm)
pol_5_ref = [2.81387533e-04, 3.18409580e-04, 6.74030846e-04,
5.55067015e-01, 4.32938188e-01]
self.assertTrue(np.allclose(np.concatenate(pol[:5]), pol_5_ref))
def test_polarization_pm(self):
st = _create_test_data()
t = st[0].stats.starttime
e = st[0].stats.endtime
out = polarization.polarization_analysis(
st, win_len=10.0, win_frac=0.1, frqlow=1.0, frqhigh=5.0,
verbose=False, stime=t, etime=e, method="pm",
var_noise=0.0)
# all values should be equal for the test data, so check first value
# and make sure all values are almost equal
self.assertEqual(out["timestamp"][0], 1393632001.0)
self.assertAlmostEqual(out["azimuth"][0], 26.56505117707799)
self.assertAlmostEqual(out["incidence"][0], 65.905157447889309)
self.assertAlmostEqual(out["azimuth_error"][0], 0.000000)
self.assertAlmostEqual(out["incidence_error"][0], 0.000000)
for key in ["azimuth", "incidence"]:
got = out[key]
self.assertTrue(np.allclose(got / got[0], np.ones_like(got),
rtol=1e-4))
for key in ["azimuth_error", "incidence_error"]:
got = out[key]
expected = np.empty_like(got)
expected.fill(got[0])
self.assertTrue(np.allclose(got, expected, rtol=1e-4, atol=1e-16))
self.assertTrue(np.allclose(out["timestamp"] - out["timestamp"][0],
np.arange(0, 92, 1)))
def test_polarization_flinn(self):
st = _create_test_data()
t = st[0].stats.starttime
e = st[0].stats.endtime
out = polarization.polarization_analysis(
st, win_len=10.0, win_frac=0.1, frqlow=1.0, frqhigh=5.0,
verbose=False, stime=t, etime=e,
method="flinn", var_noise=0.0)
# all values should be equal for the test data, so check first value
# and make sure all values are almost equal
self.assertEqual(out["timestamp"][0], 1393632001.0)
self.assertAlmostEqual(out["azimuth"][0], 26.56505117707799)
self.assertAlmostEqual(out["incidence"][0], 65.905157447889309)
self.assertAlmostEqual(out["rectilinearity"][0], 1.000000)
self.assertAlmostEqual(out["planarity"][0], 1.000000)
for key in ["azimuth", "incidence", "rectilinearity", "planarity"]:
got = out[key]
self.assertTrue(np.allclose(got / got[0], np.ones_like(got),
rtol=1e-4))
self.assertTrue(np.allclose(out["timestamp"] - out["timestamp"][0],
np.arange(0, 92, 1)))
def test_polarization_vidale(self):
st = _create_test_data()
t = st[0].stats.starttime
e = st[0].stats.endtime
out = polarization.polarization_analysis(
st, win_len=10.0, win_frac=0.1, frqlow=1.0, frqhigh=5.0,
verbose=False, stime=t, etime=e,
method="vidale", var_noise=0.0)
# all values should be equal for the test data, so check first value
# and make sure all values are almost equal
self.assertEqual(out["timestamp"][0], 1393632003.0)
self.assertAlmostEqual(out["azimuth"][0], 26.56505117707799)
self.assertAlmostEqual(out["incidence"][0], 65.905157447889309)
self.assertAlmostEqual(out["rectilinearity"][0], 1.000000)
self.assertAlmostEqual(out["planarity"][0], 1.000000)
self.assertAlmostEqual(out["ellipticity"][0], 3.8195545129768958e-06)
for key in ["azimuth", "incidence", "rectilinearity", "planarity",
"ellipticity"]:
got = out[key]
self.assertTrue(np.allclose(got / got[0], np.ones_like(got),
rtol=1e-4))
self.assertTrue(np.allclose(out["timestamp"] - out["timestamp"][0],
np.arange(0, 97.85, 0.05), rtol=1e-5))
def suite():
return unittest.makeSuite(PolarizationTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
"obspy.Stream",
"numpy.ones_like",
"numpy.allclose",
"scipy.signal.hamming",
"unittest.makeSuite",
"obspy.UTCDateTime",
"os.path.join",
"numpy.max",
"os.path.dirname",
"obspy.signal.polarization.polarization_analysis",
"numpy.sum",
"numpy.empty_like",
"numpy.cos",
"obspy.Trace",
"numpy.c... | [((497, 534), 'numpy.arange', 'np.arange', (['(0)', '(2048 / 20.0)', '(1.0 / 20.0)'], {}), '(0, 2048 / 20.0, 1.0 / 20.0)\n', (506, 534), True, 'import numpy as np\n'), ((563, 572), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (569, 572), True, 'import numpy as np\n'), ((584, 603), 'obspy.Trace', 'obspy.Trace', ([], {'data': 'y'}), '(data=y)\n', (595, 603), False, 'import obspy\n'), ((666, 703), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2014-03-01T00:00"""'], {}), "('2014-03-01T00:00')\n", (683, 703), False, 'import obspy\n'), ((936, 950), 'obspy.Stream', 'obspy.Stream', ([], {}), '()\n', (948, 950), False, 'import obspy\n'), ((8299, 8347), 'unittest.makeSuite', 'unittest.makeSuite', (['PolarizationTestCase', '"""test"""'], {}), "(PolarizationTestCase, 'test')\n", (8317, 8347), False, 'import unittest\n'), ((8381, 8415), 'unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (8394, 8415), False, 'import unittest\n'), ((2120, 2215), 'obspy.signal.polarization.eigval', 'polarization.eigval', (['self.data_win_e', 'self.data_win_n', 'self.data_win_z', 'self.fk', 'self.norm'], {}), '(self.data_win_e, self.data_win_n, self.data_win_z, self\n .fk, self.norm)\n', (2139, 2215), False, 'from obspy.signal import polarization, util\n'), ((4015, 4134), 'obspy.signal.polarization.eigval', 'polarization.eigval', (['self.data_win_e[100, :]', 'self.data_win_n[100, :]', 'self.data_win_z[100, :]', 'self.fk', 'self.norm'], {}), '(self.data_win_e[100, :], self.data_win_n[100, :], self.\n data_win_z[100, :], self.fk, self.norm)\n', (4034, 4134), False, 'from obspy.signal import polarization, util\n'), ((4578, 4739), 'obspy.signal.polarization.polarization_analysis', 'polarization.polarization_analysis', (['st'], {'win_len': '(10.0)', 'win_frac': '(0.1)', 'frqlow': '(1.0)', 'frqhigh': '(5.0)', 'verbose': '(False)', 'stime': 't', 'etime': 'e', 'method': '"""pm"""', 'var_noise': '(0.0)'}), "(st, win_len=10.0, win_frac=0.1, frqlow=\n 1.0, frqhigh=5.0, verbose=False, stime=t, etime=e, method='pm',\n var_noise=0.0)\n", (4612, 4739), False, 'from obspy.signal import polarization, util\n'), ((5957, 6121), 'obspy.signal.polarization.polarization_analysis', 'polarization.polarization_analysis', (['st'], {'win_len': '(10.0)', 'win_frac': '(0.1)', 'frqlow': '(1.0)', 'frqhigh': '(5.0)', 'verbose': '(False)', 'stime': 't', 'etime': 'e', 'method': '"""flinn"""', 'var_noise': '(0.0)'}), "(st, win_len=10.0, win_frac=0.1, frqlow=\n 1.0, frqhigh=5.0, verbose=False, stime=t, etime=e, method='flinn',\n var_noise=0.0)\n", (5991, 6121), False, 'from obspy.signal import polarization, util\n'), ((7127, 7292), 'obspy.signal.polarization.polarization_analysis', 'polarization.polarization_analysis', (['st'], {'win_len': '(10.0)', 'win_frac': '(0.1)', 'frqlow': '(1.0)', 'frqhigh': '(5.0)', 'verbose': '(False)', 'stime': 't', 'etime': 'e', 'method': '"""vidale"""', 'var_noise': '(0.0)'}), "(st, win_len=10.0, win_frac=0.1, frqlow=\n 1.0, frqhigh=5.0, verbose=False, stime=t, etime=e, method='vidale',\n var_noise=0.0)\n", (7161, 7292), False, 'from obspy.signal import polarization, util\n'), ((1199, 1216), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1206, 1216), False, 'from os.path import dirname, join\n'), ((1295, 1319), 'os.path.join', 'join', (['path', '"""MBGA_Z.ASC"""'], {}), "(path, 'MBGA_Z.ASC')\n", (1299, 1319), False, 'from os.path import dirname, join\n'), ((1349, 1373), 'os.path.join', 'join', (['path', '"""MBGA_E.ASC"""'], {}), "(path, 'MBGA_E.ASC')\n", (1353, 1373), False, 'from os.path import dirname, join\n'), ((1403, 1427), 'os.path.join', 'join', (['path', '"""MBGA_N.ASC"""'], {}), "(path, 'MBGA_N.ASC')\n", (1407, 1427), False, 'from os.path import dirname, join\n'), ((1575, 1592), 'scipy.signal.hamming', 'signal.hamming', (['n'], {}), '(n)\n', (1589, 1592), False, 'from scipy import signal\n'), ((1684, 1701), 'scipy.signal.hamming', 'signal.hamming', (['n'], {}), '(n)\n', (1698, 1701), False, 'from scipy import signal\n'), ((1793, 1810), 'scipy.signal.hamming', 'signal.hamming', (['n'], {}), '(n)\n', (1807, 1810), False, 'from scipy import signal\n'), ((1905, 1919), 'numpy.max', 'np.max', (['data_z'], {}), '(data_z)\n', (1911, 1919), True, 'import numpy as np\n'), ((1954, 1986), 'os.path.join', 'join', (['path', '"""3cssan.hy.1.MBGA_Z"""'], {}), "(path, '3cssan.hy.1.MBGA_Z')\n", (1958, 1986), False, 'from os.path import dirname, join\n'), ((5537, 5555), 'numpy.empty_like', 'np.empty_like', (['got'], {}), '(got)\n', (5550, 5555), True, 'import numpy as np\n'), ((2267, 2306), 'numpy.sum', 'np.sum', (['((pol[0] - self.res[:, 34]) ** 2)'], {}), '((pol[0] - self.res[:, 34]) ** 2)\n', (2273, 2306), True, 'import numpy as np\n'), ((2331, 2359), 'numpy.sum', 'np.sum', (['(self.res[:, 34] ** 2)'], {}), '(self.res[:, 34] ** 2)\n', (2337, 2359), True, 'import numpy as np\n'), ((2428, 2467), 'numpy.sum', 'np.sum', (['((pol[1] - self.res[:, 35]) ** 2)'], {}), '((pol[1] - self.res[:, 35]) ** 2)\n', (2434, 2467), True, 'import numpy as np\n'), ((2492, 2520), 'numpy.sum', 'np.sum', (['(self.res[:, 35] ** 2)'], {}), '(self.res[:, 35] ** 2)\n', (2498, 2520), True, 'import numpy as np\n'), ((2589, 2628), 'numpy.sum', 'np.sum', (['((pol[2] - self.res[:, 36]) ** 2)'], {}), '((pol[2] - self.res[:, 36]) ** 2)\n', (2595, 2628), True, 'import numpy as np\n'), ((2653, 2681), 'numpy.sum', 'np.sum', (['(self.res[:, 36] ** 2)'], {}), '(self.res[:, 36] ** 2)\n', (2659, 2681), True, 'import numpy as np\n'), ((2750, 2789), 'numpy.sum', 'np.sum', (['((pol[3] - self.res[:, 40]) ** 2)'], {}), '((pol[3] - self.res[:, 40]) ** 2)\n', (2756, 2789), True, 'import numpy as np\n'), ((2814, 2842), 'numpy.sum', 'np.sum', (['(self.res[:, 40] ** 2)'], {}), '(self.res[:, 40] ** 2)\n', (2820, 2842), True, 'import numpy as np\n'), ((2911, 2950), 'numpy.sum', 'np.sum', (['((pol[4] - self.res[:, 42]) ** 2)'], {}), '((pol[4] - self.res[:, 42]) ** 2)\n', (2917, 2950), True, 'import numpy as np\n'), ((2975, 3003), 'numpy.sum', 'np.sum', (['(self.res[:, 42] ** 2)'], {}), '(self.res[:, 42] ** 2)\n', (2981, 3003), True, 'import numpy as np\n'), ((3072, 3117), 'numpy.sum', 'np.sum', (['((pol[5][:, 0] - self.res[:, 37]) ** 2)'], {}), '((pol[5][:, 0] - self.res[:, 37]) ** 2)\n', (3078, 3117), True, 'import numpy as np\n'), ((3142, 3170), 'numpy.sum', 'np.sum', (['(self.res[:, 37] ** 2)'], {}), '(self.res[:, 37] ** 2)\n', (3148, 3170), True, 'import numpy as np\n'), ((3239, 3284), 'numpy.sum', 'np.sum', (['((pol[5][:, 1] - self.res[:, 38]) ** 2)'], {}), '((pol[5][:, 1] - self.res[:, 38]) ** 2)\n', (3245, 3284), True, 'import numpy as np\n'), ((3309, 3337), 'numpy.sum', 'np.sum', (['(self.res[:, 38] ** 2)'], {}), '(self.res[:, 38] ** 2)\n', (3315, 3337), True, 'import numpy as np\n'), ((3406, 3451), 'numpy.sum', 'np.sum', (['((pol[5][:, 2] - self.res[:, 39]) ** 2)'], {}), '((pol[5][:, 2] - self.res[:, 39]) ** 2)\n', (3412, 3451), True, 'import numpy as np\n'), ((3476, 3504), 'numpy.sum', 'np.sum', (['(self.res[:, 39] ** 2)'], {}), '(self.res[:, 39] ** 2)\n', (3482, 3504), True, 'import numpy as np\n'), ((3573, 3612), 'numpy.sum', 'np.sum', (['((pol[6] - self.res[:, 41]) ** 2)'], {}), '((pol[6] - self.res[:, 41]) ** 2)\n', (3579, 3612), True, 'import numpy as np\n'), ((3637, 3665), 'numpy.sum', 'np.sum', (['(self.res[:, 41] ** 2)'], {}), '(self.res[:, 41] ** 2)\n', (3643, 3665), True, 'import numpy as np\n'), ((3734, 3773), 'numpy.sum', 'np.sum', (['((pol[7] - self.res[:, 43]) ** 2)'], {}), '((pol[7] - self.res[:, 43]) ** 2)\n', (3740, 3773), True, 'import numpy as np\n'), ((3798, 3826), 'numpy.sum', 'np.sum', (['(self.res[:, 43] ** 2)'], {}), '(self.res[:, 43] ** 2)\n', (3804, 3826), True, 'import numpy as np\n'), ((4390, 4413), 'numpy.concatenate', 'np.concatenate', (['pol[:5]'], {}), '(pol[:5])\n', (4404, 4413), True, 'import numpy as np\n'), ((5618, 5669), 'numpy.allclose', 'np.allclose', (['got', 'expected'], {'rtol': '(0.0001)', 'atol': '(1e-16)'}), '(got, expected, rtol=0.0001, atol=1e-16)\n', (5629, 5669), True, 'import numpy as np\n'), ((5781, 5800), 'numpy.arange', 'np.arange', (['(0)', '(92)', '(1)'], {}), '(0, 92, 1)\n', (5790, 5800), True, 'import numpy as np\n'), ((6950, 6969), 'numpy.arange', 'np.arange', (['(0)', '(92)', '(1)'], {}), '(0, 92, 1)\n', (6959, 6969), True, 'import numpy as np\n'), ((8234, 8259), 'numpy.arange', 'np.arange', (['(0)', '(97.85)', '(0.05)'], {}), '(0, 97.85, 0.05)\n', (8243, 8259), True, 'import numpy as np\n'), ((5359, 5376), 'numpy.ones_like', 'np.ones_like', (['got'], {}), '(got)\n', (5371, 5376), True, 'import numpy as np\n'), ((6767, 6784), 'numpy.ones_like', 'np.ones_like', (['got'], {}), '(got)\n', (6779, 6784), True, 'import numpy as np\n'), ((8051, 8068), 'numpy.ones_like', 'np.ones_like', (['got'], {}), '(got)\n', (8063, 8068), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Date: March, 2015
# after the matlab version
#in some cases a selection of an apropriate backend is necessary for a plot to show up (uncomment import matplotlib and one of the two backends).
#import matplotlib
#matplotlib.use('TkAgg')
#matplotlib.use('Qt4Agg')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from readsim import readsim
from xzplot import plot_dict
def arg_parser():
usage = "usage: %(prog)s [options] <filename.nc> <z level>\n\
Basic: %(prog)s Downslope 1\n\
Example: %(prog)s -o plot.pdf --vci 5 Downslope 1"
description = """
Produces Hovmoeller plots (t, x-plots) of velocity
See Also
--------
hovz_vel
"""
op = ArgumentParser(usage=usage, description=description,
formatter_class=RawDescriptionHelpFormatter)
# Positional arguments
op.add_argument('filename', metavar='filename', nargs=1, type=str,
help='File holding the data from the model')
op.add_argument('zlev', metavar='zlev', nargs=1, type=int,
help='level number')
# Optional arguments
op.add_argument("-o", dest="figname", default='hovx_vel.pdf',
help="Name of the output figure",
metavar="FILE.pdf")
op.add_argument("--vci", dest="vci", default=2, metavar="2", type=int,
help="set velocity contouring interval [m/s]")
op.add_argument("--vlim", dest="vlim", default=(0, 60), nargs=2,
metavar=("0", "60"), help="restrict the velocity contours",
type=float)
return op
def plot():
f, ax = plt.subplots(1)
data = np.squeeze(var.horizontal_velocity[:, zlev, :])
# Determine min and max integer velocities for plotting routine
minVel = np.nanmin(data)
vciDiff = int((var.u00 - minVel) / args.vci + 0.5)
vMinInt = var.u00 - vciDiff * args.vci
maxVel = np.nanmax(data)
vciDiff = int((maxVel - var.u00) / args.vci + 0.5)
vMaxInt = var.u00 + vciDiff * args.vci
# Set value range and ticks
clev = np.arange(vMinInt, vMaxInt + args.vci, args.vci)
ticks = np.arange(clev[0], clev[-1] + args.vci, args.vci)
valRange = np.arange(clev[0] - 0.5*args.vci,
clev[-1] + 1.5*args.vci,
args.vci)
# Set min and max value for color normalization
distUpMid = clev[-1] + 1.5*args.vci - var.u00
distMidDown = var.u00 - clev[0] - 0.5*args.vci
maxDist = max(distUpMid, distMidDown)
vmin = var.u00 - maxDist
vmax = var.u00 + maxDist
# Plot
cs = ax.contourf(var.x, var.time/3600., data,
valRange,
vmin=vmin,
vmax=vmax,
cmap=pd[varname]['cmap'])
# Add a colorbar
cb = plt.colorbar(cs, ticks=ticks, spacing='uniform')
ax.set_xlabel("x [km]")
ax.set_ylabel("Time [h]")
ax.xaxis.set_minor_locator(MultipleLocator(50))
ax.set_title('Velocity at zlev = {0}'.format(zlev))
if __name__ == '__main__':
op = arg_parser()
# get command line arguments
args = op.parse_args()
zlev = args.zlev[0]
varname = 'horizontal_velocity'
var = readsim(args.filename[0], varname)
pd = plot_dict(args, var, varname)
plot()
with plt.rc_context({'savefig.format': 'pdf'}):
plt.savefig(args.figname)
plt.show()
| [
"matplotlib.pyplot.rc_context",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.colorbar",
"xzplot.plot_dict",
"numpy.squeeze",
"readsim.readsim",
"numpy.nanmax",
"numpy.nanmin",
"matplotlib.pyplot.subplots",
"numpy.arange",
"ma... | [((862, 964), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'usage': 'usage', 'description': 'description', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(usage=usage, description=description, formatter_class=\n RawDescriptionHelpFormatter)\n', (876, 964), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((1803, 1818), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1815, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1878), 'numpy.squeeze', 'np.squeeze', (['var.horizontal_velocity[:, zlev, :]'], {}), '(var.horizontal_velocity[:, zlev, :])\n', (1841, 1878), True, 'import numpy as np\n'), ((1961, 1976), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (1970, 1976), True, 'import numpy as np\n'), ((2089, 2104), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (2098, 2104), True, 'import numpy as np\n'), ((2247, 2295), 'numpy.arange', 'np.arange', (['vMinInt', '(vMaxInt + args.vci)', 'args.vci'], {}), '(vMinInt, vMaxInt + args.vci, args.vci)\n', (2256, 2295), True, 'import numpy as np\n'), ((2308, 2357), 'numpy.arange', 'np.arange', (['clev[0]', '(clev[-1] + args.vci)', 'args.vci'], {}), '(clev[0], clev[-1] + args.vci, args.vci)\n', (2317, 2357), True, 'import numpy as np\n'), ((2373, 2445), 'numpy.arange', 'np.arange', (['(clev[0] - 0.5 * args.vci)', '(clev[-1] + 1.5 * args.vci)', 'args.vci'], {}), '(clev[0] - 0.5 * args.vci, clev[-1] + 1.5 * args.vci, args.vci)\n', (2382, 2445), True, 'import numpy as np\n'), ((2992, 3040), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs'], {'ticks': 'ticks', 'spacing': '"""uniform"""'}), "(cs, ticks=ticks, spacing='uniform')\n", (3004, 3040), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3426), 'readsim.readsim', 'readsim', (['args.filename[0]', 'varname'], {}), '(args.filename[0], varname)\n', (3399, 3426), False, 'from readsim import readsim\n'), ((3436, 3465), 'xzplot.plot_dict', 'plot_dict', (['args', 'var', 'varname'], {}), '(args, var, varname)\n', (3445, 3465), False, 'from xzplot import plot_dict\n'), ((3570, 3580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3578, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3150), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(50)'], {}), '(50)\n', (3146, 3150), False, 'from matplotlib.ticker import MultipleLocator\n'), ((3488, 3529), 'matplotlib.pyplot.rc_context', 'plt.rc_context', (["{'savefig.format': 'pdf'}"], {}), "({'savefig.format': 'pdf'})\n", (3502, 3529), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3564), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.figname'], {}), '(args.figname)\n', (3550, 3564), True, 'import matplotlib.pyplot as plt\n')] |
import tensorflow as tf
import numpy as np
import pyomo.environ as pyo
from relumip import AnnModel
from relumip.utils.visualization import plot_results_2d
# Load the trained tensorflow model which will be embedded into the optimization problem.
tf_model = tf.keras.models.load_model('data/peaks_3x10.h5')
# Create a pyomo model into which the ANN will be embedded.
model = pyo.ConcreteModel()
model.construct()
# All network variables will be added to a user-defined block within the model.
model.ann = pyo.Block()
# The network input and output variables have to be defined by the user.
# For the network input, finite variable bounds have to be supplied (they can be inferred from the data used to train
# the model, for example).
model.ann.Input1 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))
model.ann.Input2 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))
model.ann.Output = pyo.Var(bounds=(-10000, 10000), within=pyo.Reals)
# Input and output variables are stored in lists to be passes to the AnnModel.
input_vars = [model.ann.Input1, model.ann.Input2]
output_vars = [model.ann.Output]
# A solver instance has to be defined for bound tightening. Make sure that an appropriate MIP solver is installed.
solver = pyo.SolverFactory('glpk')
# Now the AnnModel instance can be created.
ann_model = AnnModel(tf_model=tf_model, modeling_language='PYOMO')
# Input and output variables are connected to the network.
# The block dedicated for the ANN model has to be passed as well.
ann_model.connect_network_input(opt_model=model.ann, input_vars=input_vars)
ann_model.connect_network_output(opt_model=model.ann, output_vars=output_vars)
# This call generates the network formulation inside the block.
# The bound tightening strategy has to be specified, for Pyomo the options are 'MIP' or 'LP' (default).
ann_model.embed_network_formulation(bound_tightening_strategy='LP', solver=solver)
# In this example, no additional model components besides the ANN are considered.
# We choose to minimize the network output and display the solved model.
model.obj = pyo.Objective(expr=model.ann.Output, sense=pyo.minimize)
res = solver.solve(model)
model.display()
# To visualize the computed results, a test data set is generated within the ANN input domain and the tensorflow model
# is evaluated on it. The solution point computed above is extracted and shown on the response surface plot.
sample_input = 6 * np.random.rand(10000, 2) - 3
sample_output = tf_model.predict(sample_input)
sol_point = [input_vars[0].value, input_vars[1].value, output_vars[0].value]
plot_results_2d(sample_input, sample_output, sol_point=sol_point)
# The model parameters computed during bound tightening can be saved for future use of the same model. See the
# 'load_precomputed_parameters_example.py' file on more information on how to load precomputed parameters
ann_model.save_param('data/peaks3x10_param')
| [
"numpy.random.rand",
"pyomo.environ.Block",
"pyomo.environ.Objective",
"relumip.utils.visualization.plot_results_2d",
"relumip.AnnModel",
"pyomo.environ.Var",
"tensorflow.keras.models.load_model",
"pyomo.environ.SolverFactory",
"pyomo.environ.ConcreteModel"
] | [((259, 307), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""data/peaks_3x10.h5"""'], {}), "('data/peaks_3x10.h5')\n", (285, 307), True, 'import tensorflow as tf\n'), ((377, 396), 'pyomo.environ.ConcreteModel', 'pyo.ConcreteModel', ([], {}), '()\n', (394, 396), True, 'import pyomo.environ as pyo\n'), ((508, 519), 'pyomo.environ.Block', 'pyo.Block', ([], {}), '()\n', (517, 519), True, 'import pyomo.environ as pyo\n'), ((758, 799), 'pyomo.environ.Var', 'pyo.Var', ([], {'within': 'pyo.Reals', 'bounds': '(-3, 3)'}), '(within=pyo.Reals, bounds=(-3, 3))\n', (765, 799), True, 'import pyomo.environ as pyo\n'), ((819, 860), 'pyomo.environ.Var', 'pyo.Var', ([], {'within': 'pyo.Reals', 'bounds': '(-3, 3)'}), '(within=pyo.Reals, bounds=(-3, 3))\n', (826, 860), True, 'import pyomo.environ as pyo\n'), ((880, 929), 'pyomo.environ.Var', 'pyo.Var', ([], {'bounds': '(-10000, 10000)', 'within': 'pyo.Reals'}), '(bounds=(-10000, 10000), within=pyo.Reals)\n', (887, 929), True, 'import pyomo.environ as pyo\n'), ((1218, 1243), 'pyomo.environ.SolverFactory', 'pyo.SolverFactory', (['"""glpk"""'], {}), "('glpk')\n", (1235, 1243), True, 'import pyomo.environ as pyo\n'), ((1301, 1355), 'relumip.AnnModel', 'AnnModel', ([], {'tf_model': 'tf_model', 'modeling_language': '"""PYOMO"""'}), "(tf_model=tf_model, modeling_language='PYOMO')\n", (1309, 1355), False, 'from relumip import AnnModel\n'), ((2057, 2113), 'pyomo.environ.Objective', 'pyo.Objective', ([], {'expr': 'model.ann.Output', 'sense': 'pyo.minimize'}), '(expr=model.ann.Output, sense=pyo.minimize)\n', (2070, 2113), True, 'import pyomo.environ as pyo\n'), ((2557, 2622), 'relumip.utils.visualization.plot_results_2d', 'plot_results_2d', (['sample_input', 'sample_output'], {'sol_point': 'sol_point'}), '(sample_input, sample_output, sol_point=sol_point)\n', (2572, 2622), False, 'from relumip.utils.visualization import plot_results_2d\n'), ((2404, 2428), 'numpy.random.rand', 'np.random.rand', (['(10000)', '(2)'], {}), '(10000, 2)\n', (2418, 2428), True, 'import numpy as np\n')] |
"""
Class for describing tesselations of the sphere
Following the usual usage, a tesselation or tiling of the sphere is a
covering of the sphere by a set of gemetrical shapes called tiles, such that
each point of the sphere belongs to one and only one tile (we will be negligent
about edges of tiles). Thus, no two tiles overlap, and there is no point on the
sphere which does not belong to a tile.
Such tilings provide a natural spatial grouping of supernovae on the sky. Such
spatial groupings lead to similar (if not same) observational properties in
terms of the set of observational pointings, as well as the sky properties (eg.
seeing, psf, airmass, MW extinction etc.). The similarities increase as the size of the tiles
decrease. Therefore, a sensible way to distribute SN simulations is by grouping
the simulations on small tiles together.
Allowed tilings must satisfy certain properties as encoded in the abstract base
class below as well as listed here. Additionally, there are desired properties
that are hepful (for example in speeding up the process), but not essential to
the computations. Such methods may involve properties like equal area of tiles,
hierarchical structure of times, efficient queries, or precomputed results for
OpSim outputs.
Methods and Attributes Required and Intended Usage:
---------------------------------------------------
- tileIDSequence : Obtain the entire set of tiles which cover the unit
sphere
- pointingSequenceForTile : A method to obtain the maximal set of
pointings that may overlap a point on a tile. Obviously, there can be
points in a tile which do not overlap with subsets of such pointings.
The intended usage is to simulate all SN in a tile together by using
the set of maximal pointings. This can be done in modes
"""
from __future__ import absolute_import, print_function
from future.utils import with_metaclass
import abc
import numpy as np
__all__ = ["Tiling"]
class Tiling(with_metaclass(abc.ABCMeta, object)):
"""
Abstract Base Class for tilings specifying the methods that are mandatory
Attributes
----------
tileIDSequence : sequence
sequence of IDs indexing the tiles. The IDs may be integers or strings.
Methods
-------
pointingSequenceForTile :
"""
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractproperty
def tileIDSequence(self):
pass
@abc.abstractmethod
def pointingSequenceForTile(self, tileID, allPointings=None, columns=None,
**kwargs):
"""
Return a sequence of IDs identifying the maximal set of OpSim pointings
(obsHistID) that intersect with the tile having ID tileID.
Parameters
----------
tileID : int, or string, mandatory
Index for desired tile (should not be for a sequence)
allPointings : instance of {string, DBConnection, `pandas.DataFrame`}
Information about a set of pointings we will worry about. The set
of pointings may be in a database or a dataFrame, and different ways
of connecting to them is ideally supported.
columns : tuple of strings, defaults to None
if None returns only obsHistIDs. Otherwise returns the columns
listed
kwargs : extra parameters
specify method to use optional precomputations to speed up this
function
Returns
-------
`numpy.ndarray` of obsHistIDs
.. notes: This can be a crude method returning all of the pointings in
allPointings if one runs in a pedantic mode later to do a more
careful filtering. Even in those cases, this may be helpful in
reducing the nimbers
"""
pass
@abc.abstractmethod
def area(self, tileID):
"""
return the area of the tile with ID tileID
Parameters
----------
tileID : int or string, or `numpy.ndarray` thereof, mandatory
Index for desired tile
Returns
-------
area : `numpy.ndarray` of dtype float
"""
pass
@abc.abstractmethod
def tileIDsForSN(self, ra, dec):
"""
return a numpy array of tileIDs for point sources located at ra, dec
where ra, dec are `numpy.ndarrays` each of size equal to number of
point sources.
Parameters
----------
ra : `numpy.ndarray` of dtype float, mandatory
ra values
dec : `numpy.ndarray` of dtype float, mandatory
dec values
Returns
-------
`numpy.ndarray` of tileIDs. So the dtype is probably integer or string
"""
pass
@abc.abstractmethod
def positions(self, tileID, numSamples):
"""
return a tuple of numpy arrays theta and phi, each of size numSamples
"""
@staticmethod
def samplePatchOnSphere(phi, theta, delta, size, rng, degrees=True):
"""
Uniformly distributes samples on a patch on a sphere between
phi \pm delta, and theta \pm delta on a sphere. Uniform distribution
implies that the number of points in a patch of sphere is proportional
to the area of the patch. Here, the coordinate system is the usual
spherical coordinate system with the azimuthal angle theta going from
0 degrees at the North Pole, to 90 degrees at the South Pole, through
0. at the equator.
This function is not equipped to handle wrap-around the ranges of theta
phi and therefore does not work at the poles.
Parameters
----------
phi: float, mandatory, degrees
center of the spherical patch in ra with range
theta: float, mandatory, degrees
delta: float, mandatory, degrees
size: int, mandatory
number of samples
seed : int, optional, defaults to 1
random Seed used for generating values
degrees : bool, optional, defaults to True
if True, returns angles in degrees, else in
radians
Returns
-------
tuple of (phivals, thetavals) where phivals and thetavals are arrays of
size size in degrees.
"""
u = rng.uniform(size=size)
v = rng.uniform(size=size)
phi = np.radians(phi)
theta = np.radians(theta)
delta = np.radians(delta)
phivals = 2. * delta * u + (phi - delta)
phivals = np.where(phivals >= 0., phivals, phivals + 2. * np.pi)
# use conventions in spherical coordinates
# theta = np.pi/2.0 - theta
thetamax = theta + delta
thetamin = theta - delta
# if thetamax > np.pi or thetamin < 0. :
# raise ValueError('Function not implemented to cover wrap around poles')
# Cumulative Density Function is cos(thetamin) - cos(theta) / cos(thetamin) - cos(thetamax)
a = np.cos(thetamin) - np.cos(thetamax)
thetavals = np.arccos(-v * a + np.cos(thetamin))
if degrees:
return np.degrees(phivals), np.degrees(thetavals)
else:
return phivals, thetavals
| [
"numpy.radians",
"future.utils.with_metaclass",
"numpy.where",
"numpy.cos",
"numpy.degrees"
] | [((1994, 2029), 'future.utils.with_metaclass', 'with_metaclass', (['abc.ABCMeta', 'object'], {}), '(abc.ABCMeta, object)\n', (2008, 2029), False, 'from future.utils import with_metaclass\n'), ((6425, 6440), 'numpy.radians', 'np.radians', (['phi'], {}), '(phi)\n', (6435, 6440), True, 'import numpy as np\n'), ((6457, 6474), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (6467, 6474), True, 'import numpy as np\n'), ((6491, 6508), 'numpy.radians', 'np.radians', (['delta'], {}), '(delta)\n', (6501, 6508), True, 'import numpy as np\n'), ((6577, 6633), 'numpy.where', 'np.where', (['(phivals >= 0.0)', 'phivals', '(phivals + 2.0 * np.pi)'], {}), '(phivals >= 0.0, phivals, phivals + 2.0 * np.pi)\n', (6585, 6633), True, 'import numpy as np\n'), ((7034, 7050), 'numpy.cos', 'np.cos', (['thetamin'], {}), '(thetamin)\n', (7040, 7050), True, 'import numpy as np\n'), ((7053, 7069), 'numpy.cos', 'np.cos', (['thetamax'], {}), '(thetamax)\n', (7059, 7069), True, 'import numpy as np\n'), ((7109, 7125), 'numpy.cos', 'np.cos', (['thetamin'], {}), '(thetamin)\n', (7115, 7125), True, 'import numpy as np\n'), ((7167, 7186), 'numpy.degrees', 'np.degrees', (['phivals'], {}), '(phivals)\n', (7177, 7186), True, 'import numpy as np\n'), ((7188, 7209), 'numpy.degrees', 'np.degrees', (['thetavals'], {}), '(thetavals)\n', (7198, 7209), True, 'import numpy as np\n')] |
import scipy.spatial as ssp
import numpy as np
import swarms.commons.utils as U
import shapely.geometry as sg
from swarms.base import Agent
class Evader(Agent):
def __init__(self, experiment):
super(Evader, self).__init__()
self.obs_radius = experiment.obs_radius
self.world_size = experiment.world_size
self.torus = experiment.torus
self.dynamics = 'direct'
self.max_speed = 2 * 10 # cm/s
if self.torus:
self.bounding_box = np.array([0., 2 * self.world_size, 0., 2 * self.world_size])
else:
self.bounding_box = np.array([0., self.world_size, 0., self.world_size])
self.action_callback = self.step
def reset(self, state):
self.state.p_pos = state
self.state.p_vel = np.zeros(2)
def step(self, agent, world):
if self.torus:
points_center = np.vstack([world.agent_states[:, 0:2], self.state.p_pos])
pursuers_down_right = np.hstack([world.agent_states[:, 0:1] + world.world_size, world.agent_states[:, 1:2]])
pursuers_up_left = np.hstack([world.agent_states[:, 0:1], world.agent_states[:, 1:2] + world.world_size])
pursuers_up_right = np.hstack(
[world.agent_states[:, 0:1] + world.world_size, world.agent_states[:, 1:2] + world.world_size])
evader_down_right = np.hstack([self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2]])
evader_up_left = np.hstack([self.state.p_pos[0:1], self.state.p_pos[1:2] + world.world_size])
evader_up_right = np.hstack([self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2] + world.world_size])
points_down_right = np.hstack([points_center[:, 0:1] + world.world_size, points_center[:, 1:2]])
points_up_left = np.hstack([points_center[:, 0:1], points_center[:, 1:2] + world.world_size])
points_up_right = np.hstack(
[points_center[:, 0:1] + world.world_size, points_center[:, 1:2] + world.world_size])
nodes = np.vstack([world.agent_states[:, 0:2],
pursuers_down_right,
pursuers_up_left,
pursuers_up_right,
self.state.p_pos,
evader_down_right,
evader_up_left,
evader_up_right])
dist_matrix_full = U.get_euclid_distances(nodes)
quadrant_check = np.sign(self.state.p_pos - world.world_size / 2)
if np.all(quadrant_check == np.array([1, 1])):
evader_quadrant = 0
elif np.all(quadrant_check == np.array([-1, 1])):
evader_quadrant = 1
elif np.all(quadrant_check == np.array([1, -1])):
evader_quadrant = 2
elif np.all(quadrant_check == np.array([-1, -1])):
evader_quadrant = 3
evader_dist = dist_matrix_full[:-4, -4 + evader_quadrant]
sub_list = list(np.where(evader_dist < self.obs_radius)[0])
if len(sub_list) > 10:
sub_list = list(np.argsort(evader_dist)[0:10])
sub_list.append(4 * world.nr_agents + evader_quadrant)
evader_sub = len(sub_list) - 1
closest_pursuer = np.where(evader_dist == evader_dist.min())[0]
nodes_center_sub = nodes[sub_list, :]
nodes_left = np.copy(nodes_center_sub)
nodes_left[:, 0] = self.bounding_box[0] - (nodes_left[:, 0] - self.bounding_box[0])
nodes_right = np.copy(nodes_center_sub)
nodes_right[:, 0] = self.bounding_box[1] + (self.bounding_box[1] - nodes_right[:, 0])
nodes_down = np.copy(nodes_center_sub)
nodes_down[:, 1] = self.bounding_box[2] - (nodes_down[:, 1] - self.bounding_box[2])
nodes_up = np.copy(nodes_center_sub)
nodes_up[:, 1] = self.bounding_box[3] + (self.bounding_box[3] - nodes_up[:, 1])
points = np.vstack([nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up])
else:
nodes = np.vstack([world.agent_states[:, 0:2],
self.state.p_pos,
])
distances = U.get_euclid_distances(nodes)
evader_dist = distances[-1, :-1]
closest_pursuer = np.where(evader_dist == evader_dist.min())[0]
sub_list = list(np.where(evader_dist < self.obs_radius)[0])
if len(sub_list) > 10:
sub_list = list(np.argsort(evader_dist)[0:10])
sub_list.append(world.nr_agents)
evader_sub = len(sub_list) - 1
nodes_center_sub = nodes[sub_list, :]
nodes_left = np.copy(nodes_center_sub)
nodes_left[:, 0] = self.bounding_box[0] - (nodes_left[:, 0] - self.bounding_box[0])
nodes_right = np.copy(nodes_center_sub)
nodes_right[:, 0] = self.bounding_box[1] + (self.bounding_box[1] - nodes_right[:, 0])
nodes_down = np.copy(nodes_center_sub)
nodes_down[:, 1] = self.bounding_box[2] - (nodes_down[:, 1] - self.bounding_box[2])
nodes_up = np.copy(nodes_center_sub)
nodes_up[:, 1] = self.bounding_box[3] + (self.bounding_box[3] - nodes_up[:, 1])
points = np.vstack([nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up])
vor = ssp.Voronoi(points)
d = np.zeros(2)
for i, ridge in enumerate(vor.ridge_points):
if evader_sub in set(ridge) and np.all([r <= evader_sub for r in ridge]):
if self.torus:
neighbor = min([sub_list[r] for r in ridge])
else:
# neighbor = min(ridge)
neighbor = min([sub_list[r] for r in ridge])
if neighbor in closest_pursuer:
ridge_inds = vor.ridge_vertices[i]
a = vor.vertices[ridge_inds[0], :]
b = vor.vertices[ridge_inds[1], :]
line_of_control = b - a
L_i = np.linalg.norm(line_of_control)
if self.torus:
xi = nodes[neighbor, :] - nodes[4 * world.nr_agents + evader_quadrant]
else:
xi = nodes[neighbor, :] - self.state.p_pos
eta_h_i = xi / np.linalg.norm(xi)
eta_v_i = np.array([-eta_h_i[1], eta_h_i[0]])
if self.torus:
line1 = sg.LineString([nodes[4 * world.nr_agents + evader_quadrant], nodes[neighbor, :]])
else:
line1 = sg.LineString([self.state.p_pos, nodes[neighbor, :]])
line2 = sg.LineString([a, b])
intersection = line1.intersection(line2)
if not intersection.is_empty:
inter_point = np.hstack(intersection.xy)
if np.dot(line_of_control, eta_v_i.flatten()) > 0:
l_i = np.linalg.norm(a - inter_point)
else:
l_i = np.linalg.norm(b - inter_point)
else:
if np.dot(line_of_control, eta_v_i.flatten()) > 0:
l_i = 0
else:
l_i = L_i
alpha_h_i = - L_i / 2
alpha_v_i = (l_i ** 2 - (L_i - l_i) ** 2) / (2 * np.linalg.norm(xi))
d = (alpha_h_i * eta_h_i - alpha_v_i * eta_v_i) / np.sqrt(alpha_h_i ** 2 + alpha_v_i ** 2)
assert ('d' in locals())
return d
| [
"numpy.copy",
"numpy.sqrt",
"numpy.hstack",
"numpy.where",
"numpy.linalg.norm",
"swarms.commons.utils.get_euclid_distances",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"shapely.geometry.LineString",
"numpy.vstack",
"numpy.sign",
"scipy.spatial.Voronoi",
"numpy.all"
] | [((791, 802), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (799, 802), True, 'import numpy as np\n'), ((5479, 5498), 'scipy.spatial.Voronoi', 'ssp.Voronoi', (['points'], {}), '(points)\n', (5490, 5498), True, 'import scipy.spatial as ssp\n'), ((5512, 5523), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5520, 5523), True, 'import numpy as np\n'), ((500, 562), 'numpy.array', 'np.array', (['[0.0, 2 * self.world_size, 0.0, 2 * self.world_size]'], {}), '([0.0, 2 * self.world_size, 0.0, 2 * self.world_size])\n', (508, 562), True, 'import numpy as np\n'), ((607, 661), 'numpy.array', 'np.array', (['[0.0, self.world_size, 0.0, self.world_size]'], {}), '([0.0, self.world_size, 0.0, self.world_size])\n', (615, 661), True, 'import numpy as np\n'), ((889, 946), 'numpy.vstack', 'np.vstack', (['[world.agent_states[:, 0:2], self.state.p_pos]'], {}), '([world.agent_states[:, 0:2], self.state.p_pos])\n', (898, 946), True, 'import numpy as np\n'), ((981, 1072), 'numpy.hstack', 'np.hstack', (['[world.agent_states[:, 0:1] + world.world_size, world.agent_states[:, 1:2]]'], {}), '([world.agent_states[:, 0:1] + world.world_size, world.\n agent_states[:, 1:2]])\n', (990, 1072), True, 'import numpy as np\n'), ((1099, 1190), 'numpy.hstack', 'np.hstack', (['[world.agent_states[:, 0:1], world.agent_states[:, 1:2] + world.world_size]'], {}), '([world.agent_states[:, 0:1], world.agent_states[:, 1:2] + world.\n world_size])\n', (1108, 1190), True, 'import numpy as np\n'), ((1218, 1328), 'numpy.hstack', 'np.hstack', (['[world.agent_states[:, 0:1] + world.world_size, world.agent_states[:, 1:2] +\n world.world_size]'], {}), '([world.agent_states[:, 0:1] + world.world_size, world.\n agent_states[:, 1:2] + world.world_size])\n', (1227, 1328), True, 'import numpy as np\n'), ((1373, 1449), 'numpy.hstack', 'np.hstack', (['[self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2]]'], {}), '([self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2]])\n', (1382, 1449), True, 'import numpy as np\n'), ((1479, 1555), 'numpy.hstack', 'np.hstack', (['[self.state.p_pos[0:1], self.state.p_pos[1:2] + world.world_size]'], {}), '([self.state.p_pos[0:1], self.state.p_pos[1:2] + world.world_size])\n', (1488, 1555), True, 'import numpy as np\n'), ((1586, 1685), 'numpy.hstack', 'np.hstack', (['[self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2] + world.\n world_size]'], {}), '([self.state.p_pos[0:1] + world.world_size, self.state.p_pos[1:2] +\n world.world_size])\n', (1595, 1685), True, 'import numpy as np\n'), ((1714, 1790), 'numpy.hstack', 'np.hstack', (['[points_center[:, 0:1] + world.world_size, points_center[:, 1:2]]'], {}), '([points_center[:, 0:1] + world.world_size, points_center[:, 1:2]])\n', (1723, 1790), True, 'import numpy as np\n'), ((1820, 1896), 'numpy.hstack', 'np.hstack', (['[points_center[:, 0:1], points_center[:, 1:2] + world.world_size]'], {}), '([points_center[:, 0:1], points_center[:, 1:2] + world.world_size])\n', (1829, 1896), True, 'import numpy as np\n'), ((1927, 2026), 'numpy.hstack', 'np.hstack', (['[points_center[:, 0:1] + world.world_size, points_center[:, 1:2] + world.\n world_size]'], {}), '([points_center[:, 0:1] + world.world_size, points_center[:, 1:2] +\n world.world_size])\n', (1936, 2026), True, 'import numpy as np\n'), ((2061, 2236), 'numpy.vstack', 'np.vstack', (['[world.agent_states[:, 0:2], pursuers_down_right, pursuers_up_left,\n pursuers_up_right, self.state.p_pos, evader_down_right, evader_up_left,\n evader_up_right]'], {}), '([world.agent_states[:, 0:2], pursuers_down_right,\n pursuers_up_left, pursuers_up_right, self.state.p_pos,\n evader_down_right, evader_up_left, evader_up_right])\n', (2070, 2236), True, 'import numpy as np\n'), ((2478, 2507), 'swarms.commons.utils.get_euclid_distances', 'U.get_euclid_distances', (['nodes'], {}), '(nodes)\n', (2500, 2507), True, 'import swarms.commons.utils as U\n'), ((2538, 2586), 'numpy.sign', 'np.sign', (['(self.state.p_pos - world.world_size / 2)'], {}), '(self.state.p_pos - world.world_size / 2)\n', (2545, 2586), True, 'import numpy as np\n'), ((3480, 3505), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (3487, 3505), True, 'import numpy as np\n'), ((3628, 3653), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (3635, 3653), True, 'import numpy as np\n'), ((3777, 3802), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (3784, 3802), True, 'import numpy as np\n'), ((3922, 3947), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (3929, 3947), True, 'import numpy as np\n'), ((4062, 4138), 'numpy.vstack', 'np.vstack', (['[nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up]'], {}), '([nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up])\n', (4071, 4138), True, 'import numpy as np\n'), ((4174, 4231), 'numpy.vstack', 'np.vstack', (['[world.agent_states[:, 0:2], self.state.p_pos]'], {}), '([world.agent_states[:, 0:2], self.state.p_pos])\n', (4183, 4231), True, 'import numpy as np\n'), ((4320, 4349), 'swarms.commons.utils.get_euclid_distances', 'U.get_euclid_distances', (['nodes'], {}), '(nodes)\n', (4342, 4349), True, 'import swarms.commons.utils as U\n'), ((4805, 4830), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (4812, 4830), True, 'import numpy as np\n'), ((4953, 4978), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (4960, 4978), True, 'import numpy as np\n'), ((5102, 5127), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (5109, 5127), True, 'import numpy as np\n'), ((5247, 5272), 'numpy.copy', 'np.copy', (['nodes_center_sub'], {}), '(nodes_center_sub)\n', (5254, 5272), True, 'import numpy as np\n'), ((5387, 5463), 'numpy.vstack', 'np.vstack', (['[nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up]'], {}), '([nodes_center_sub, nodes_down, nodes_left, nodes_right, nodes_up])\n', (5396, 5463), True, 'import numpy as np\n'), ((5622, 5664), 'numpy.all', 'np.all', (['[(r <= evader_sub) for r in ridge]'], {}), '([(r <= evader_sub) for r in ridge])\n', (5628, 5664), True, 'import numpy as np\n'), ((2627, 2643), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2635, 2643), True, 'import numpy as np\n'), ((3076, 3115), 'numpy.where', 'np.where', (['(evader_dist < self.obs_radius)'], {}), '(evader_dist < self.obs_radius)\n', (3084, 3115), True, 'import numpy as np\n'), ((4499, 4538), 'numpy.where', 'np.where', (['(evader_dist < self.obs_radius)'], {}), '(evader_dist < self.obs_radius)\n', (4507, 4538), True, 'import numpy as np\n'), ((6176, 6207), 'numpy.linalg.norm', 'np.linalg.norm', (['line_of_control'], {}), '(line_of_control)\n', (6190, 6207), True, 'import numpy as np\n'), ((6516, 6551), 'numpy.array', 'np.array', (['[-eta_h_i[1], eta_h_i[0]]'], {}), '([-eta_h_i[1], eta_h_i[0]])\n', (6524, 6551), True, 'import numpy as np\n'), ((6842, 6863), 'shapely.geometry.LineString', 'sg.LineString', (['[a, b]'], {}), '([a, b])\n', (6855, 6863), True, 'import shapely.geometry as sg\n'), ((2724, 2741), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (2732, 2741), True, 'import numpy as np\n'), ((3187, 3210), 'numpy.argsort', 'np.argsort', (['evader_dist'], {}), '(evader_dist)\n', (3197, 3210), True, 'import numpy as np\n'), ((4610, 4633), 'numpy.argsort', 'np.argsort', (['evader_dist'], {}), '(evader_dist)\n', (4620, 4633), True, 'import numpy as np\n'), ((6467, 6485), 'numpy.linalg.norm', 'np.linalg.norm', (['xi'], {}), '(xi)\n', (6481, 6485), True, 'import numpy as np\n'), ((6620, 6705), 'shapely.geometry.LineString', 'sg.LineString', (['[nodes[4 * world.nr_agents + evader_quadrant], nodes[neighbor, :]]'], {}), '([nodes[4 * world.nr_agents + evader_quadrant], nodes[neighbor,\n :]])\n', (6633, 6705), True, 'import shapely.geometry as sg\n'), ((6760, 6813), 'shapely.geometry.LineString', 'sg.LineString', (['[self.state.p_pos, nodes[neighbor, :]]'], {}), '([self.state.p_pos, nodes[neighbor, :]])\n', (6773, 6813), True, 'import shapely.geometry as sg\n'), ((7014, 7040), 'numpy.hstack', 'np.hstack', (['intersection.xy'], {}), '(intersection.xy)\n', (7023, 7040), True, 'import numpy as np\n'), ((7687, 7727), 'numpy.sqrt', 'np.sqrt', (['(alpha_h_i ** 2 + alpha_v_i ** 2)'], {}), '(alpha_h_i ** 2 + alpha_v_i ** 2)\n', (7694, 7727), True, 'import numpy as np\n'), ((2822, 2839), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (2830, 2839), True, 'import numpy as np\n'), ((7151, 7182), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - inter_point)'], {}), '(a - inter_point)\n', (7165, 7182), True, 'import numpy as np\n'), ((7247, 7278), 'numpy.linalg.norm', 'np.linalg.norm', (['(b - inter_point)'], {}), '(b - inter_point)\n', (7261, 7278), True, 'import numpy as np\n'), ((7596, 7614), 'numpy.linalg.norm', 'np.linalg.norm', (['xi'], {}), '(xi)\n', (7610, 7614), True, 'import numpy as np\n'), ((2920, 2938), 'numpy.array', 'np.array', (['[-1, -1]'], {}), '([-1, -1])\n', (2928, 2938), True, 'import numpy as np\n')] |
import numpy
from ..codecs.InflTCorpFileCodec import InflTCorpFileCodec
from ..slexicon.SKey import *
class ModelInflInData(object):
WVEC_LEN = 9 # keep 8 letters from the lemma + 1 for the category
MAX_LETTER_IDX = 28 # a-z plus <oov> and <>
def __init__(self, fn):
self.entries = InflTCorpFileCodec.load(fn)
# Lower-case the word and turn it arround (so the last char is always in position 0)
# Empty characters are labeled 0, characters not a-z are labeled 1
@classmethod
def wordToVec(cls, word, category):
vec = numpy.zeros(shape=(cls.WVEC_LEN, cls.MAX_LETTER_IDX), dtype='float32')
word = list(word.lower())[::-1] # lower-case, list, inverted-order
for i, letter in enumerate(word):
if i >= cls.WVEC_LEN-1:
break
val = ord(letter)
one_hot = val-95 if val>=97 and val<=122 else 1
vec[i+1, one_hot] = 1
# Now prepend the category one-hot
if category == SKey.NOUN:
vec[0, 0] = 1
elif category == SKey.VERB:
vec[0, 1] = 1
elif category == SKey.ADJ:
vec[0, 2] = 1
elif category == SKey.ADV:
vec[0, 3] = 1
else:
raise ValueError('Unhandled category: %s' % category)
return vec
# Input letters classes
@staticmethod
def getLetterClasses():
classes = ['<>', '<oov>'] + [chr(i) for i in range(97,123)]
return classes
| [
"numpy.zeros"
] | [((583, 653), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(cls.WVEC_LEN, cls.MAX_LETTER_IDX)', 'dtype': '"""float32"""'}), "(shape=(cls.WVEC_LEN, cls.MAX_LETTER_IDX), dtype='float32')\n", (594, 653), False, 'import numpy\n')] |
import itertools
from itertools import chain, combinations
from copy import deepcopy
import math
import numpy as np
from openfermion.linalg import LinearQubitOperator
from openfermion.ops import FermionOperator, QubitOperator
import utils.cs_vqe_tools as c
import utils.qonversion_tools as qonvert
def bin_to_int(bits):
bit_string = deepcopy(bits)
if type(bit_string) == str:
bit_string = [int(b) for b in bit_string]
for index, b in enumerate(bit_string):
bit_string[index] = b * 2 ** (len(bit_string)-index-1)
return sum(bit_string)
def int_to_bin(integer, num_qubits):
if integer >= 2**num_qubits:
raise ValueError('Input integer larger than specified number of bits.')
bin_str=bin(integer)[2:]
leading_0 = ''.join(['0' for i in range(num_qubits-len(bin_str))])
return leading_0 + bin_str
def A_action(molecule, num_qubits, basis_index, rot=False):
"""This will be computed programmatically from A operator in the future***
"""
B = list(itertools.product([0,1], repeat=num_qubits))
b1 = list(B[basis_index])
b2 = deepcopy(b1)
i1 = bin_to_int(b1)
if molecule == 'H2O':
b2[5] = (b2[5]+1)%2
i2 = bin_to_int(b2)
parity = b1[9]+b1[8]+b1[7]+b1[6]+b1[4]
Z_loc = b1[5]
elif molecule == 'HeH+':
if not rot:
b2[0] = (b2[0]+1)%2
b2[6] = (b2[6]+1)%2
i2 = bin_to_int(b2)
parity = 1+sum(b1)
Z_loc = b1[6]
else:
b2[6] = (b2[6]+1)%2
i2 = bin_to_int(b2)
parity = b1[1]+b1[2]+b1[3]+b1[4]+b1[5]
Z_loc = b1[6]
else:
raise ValueError('Molecule is not recognised.')
return i1, i2, parity, Z_loc
def add_eigenstate(molecule, r1, r2, index, num_qubits, theta=0, custom_amp=None, rot=False):
"""
"""
i1, i2, parity, Z_loc = A_action(molecule, num_qubits, index, rot)
amp_ratio = (1 + r2 * (-1)**Z_loc) / (r1 * (-1)**(parity))
t = np.arctan(amp_ratio)
#print(q4, t)
#print(i1, ':', np.sin(t), i2, ':', np.cos(t))
psi = [0 for i in range(2**num_qubits)]
if custom_amp is None:
psi[i1] = np.sin(t)*np.exp(1j*theta)
psi[i2] = np.cos(t)*np.exp(1j*theta)
else:
psi[i1] = custom_amp[0]
psi[i2] = custom_amp[1]
return np.array(psi)
def expectation(op, state, num_qubits):
assert(type(op)==QubitOperator)
state = np.array(state)
conj_state = np.conjugate(state)
O = LinearQubitOperator(op, num_qubits)
O_state = O.matvec(state)
expect = conj_state.dot(O_state)
return expect
def discard_generator(ham_noncon, ham_context, generators):
new_ham_noncon = deepcopy(ham_noncon)
new_ham_context = deepcopy(ham_context)
Z_indices = [g.index('Z') for g in generators]
removed=[]
for index in Z_indices:
for p in ham_noncon:
if p not in removed:
if p[index] == 'Z':
new_ham_context[p] = ham_noncon[p]
del new_ham_noncon[p]
removed.append(p)
return new_ham_noncon, new_ham_context
def rotate_operator(rotations, op):
rot_op = {}
for p in op.keys():
p_ref = deepcopy(p)
parity = 1
coeff = op[p]
for r in rotations:
rotate_p = c.apply_rotation(r, p)
p = list(rotate_p.keys())[0]
parity *= rotate_p[p]
rot_op[p] = parity * coeff
return rot_op
def rotate_hamiltonian(rotations, ham, ham_noncon, ham_context):
rot_ham={}
rot_ham_noncon={}
rot_ham_context={}
for p in ham.keys():
p_ref = deepcopy(p)
parity = 1
coeff = ham[p]
for r in rotations:
rotate_p = c.apply_rotation(r, p)
p = list(rotate_p.keys())[0]
parity *= rotate_p[p]
rot_ham[p] = parity * coeff
if p_ref in ham_noncon.keys():
rot_ham_noncon[p] = parity * coeff
else:
rot_ham_context[p] = parity * coeff
return rot_ham, rot_ham_noncon, rot_ham_context
def rotate_state(rotations, state, num_qubits):
rot_state = deepcopy(state)
for r in rotations:
r_op = QubitOperator('', 1/np.sqrt(2)) - qonvert.dict_to_QubitOperator({r[1]: 1/np.sqrt(2)*1j}, num_qubits)
r_op = LinearQubitOperator(r_op, num_qubits)
rot_state = r_op.matvec(rot_state)
return rot_state
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def qubit_map(molecule, num_qubits, rot=False):
qubit_map={}
B = list(itertools.product([0,1], repeat=num_qubits))
for i in range(2**(num_qubits)):
i1, i2 = A_action(molecule, num_qubits, i, rot)[0:2]
b1 = int_to_bin(i1, num_qubits)
b2 = int_to_bin(i2, num_qubits)
qubit_map[i1] = [(i1, b1), (i2, b2)]
return qubit_map
def find_eigenstate_indices(initial, removed_Z_indices, include_complement=False, num_qubits = None, molecule=None, rot=False):
indices = []
index_powerset = list(powerset(removed_Z_indices))
for comb in index_powerset:
initial_ref = list(deepcopy(initial))
for c in comb:
initial_ref[c] = str((int(initial_ref[c])+1)%2)
indices.append(bin_to_int(''.join(initial_ref)))
# Complement is simply the negative state, so is the same eigenvector
if include_complement:
indices_ref = deepcopy(indices)
for i in indices_ref:
maps_to = A_action(molecule, num_qubits, basis_index=i, rot=rot)[1]
indices.append(maps_to)
return indices
def random_vector(n):
components = [np.random.normal() for i in range(n)]
r = math.sqrt(sum(x*x for x in components))
v = [x/r for x in components]
return v
def random_complex_unit():
rand_vec = random_vector(2)
x = rand_vec[0]
y = rand_vec[1]
return x + y*1j
def expectation_optimiser(molecule, ham_n, ham_c, r1, r2, amps, initial_state,
Z_indices, num_qubits, rotations=None, include_complement = False, rot = False):
"""
"""
eigenstate_indices = find_eigenstate_indices(initial_state, Z_indices, include_complement, num_qubits, molecule, rot)
psi = np.array([0 for i in range(2**num_qubits)], dtype=complex)
for index, i in enumerate(eigenstate_indices):
psi += (amps[index])*add_eigenstate(molecule=molecule, r1=r1, r2=r2, theta=0, index=i, num_qubits=num_qubits, rot=rot)
if rotations is not None:
psi = rotate_state(rotations, psi, num_qubits)
expect_noncon = expectation(ham_n, psi, num_qubits)
expect_context = expectation(ham_c, psi, num_qubits)
return expect_noncon, expect_context | [
"numpy.random.normal",
"numpy.sqrt",
"openfermion.linalg.LinearQubitOperator",
"numpy.conjugate",
"itertools.product",
"utils.cs_vqe_tools.apply_rotation",
"numpy.exp",
"numpy.array",
"itertools.combinations",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"numpy.arctan"
] | [((351, 365), 'copy.deepcopy', 'deepcopy', (['bits'], {}), '(bits)\n', (359, 365), False, 'from copy import deepcopy\n'), ((1153, 1165), 'copy.deepcopy', 'deepcopy', (['b1'], {}), '(b1)\n', (1161, 1165), False, 'from copy import deepcopy\n'), ((2105, 2125), 'numpy.arctan', 'np.arctan', (['amp_ratio'], {}), '(amp_ratio)\n', (2114, 2125), True, 'import numpy as np\n'), ((2470, 2483), 'numpy.array', 'np.array', (['psi'], {}), '(psi)\n', (2478, 2483), True, 'import numpy as np\n'), ((2585, 2600), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2593, 2600), True, 'import numpy as np\n'), ((2619, 2638), 'numpy.conjugate', 'np.conjugate', (['state'], {}), '(state)\n', (2631, 2638), True, 'import numpy as np\n'), ((2648, 2683), 'openfermion.linalg.LinearQubitOperator', 'LinearQubitOperator', (['op', 'num_qubits'], {}), '(op, num_qubits)\n', (2667, 2683), False, 'from openfermion.linalg import LinearQubitOperator\n'), ((2871, 2891), 'copy.deepcopy', 'deepcopy', (['ham_noncon'], {}), '(ham_noncon)\n', (2879, 2891), False, 'from copy import deepcopy\n'), ((2915, 2936), 'copy.deepcopy', 'deepcopy', (['ham_context'], {}), '(ham_context)\n', (2923, 2936), False, 'from copy import deepcopy\n'), ((4465, 4480), 'copy.deepcopy', 'deepcopy', (['state'], {}), '(state)\n', (4473, 4480), False, 'from copy import deepcopy\n'), ((1067, 1111), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': 'num_qubits'}), '([0, 1], repeat=num_qubits)\n', (1084, 1111), False, 'import itertools\n'), ((3449, 3460), 'copy.deepcopy', 'deepcopy', (['p'], {}), '(p)\n', (3457, 3460), False, 'from copy import deepcopy\n'), ((3914, 3925), 'copy.deepcopy', 'deepcopy', (['p'], {}), '(p)\n', (3922, 3925), False, 'from copy import deepcopy\n'), ((4645, 4682), 'openfermion.linalg.LinearQubitOperator', 'LinearQubitOperator', (['r_op', 'num_qubits'], {}), '(r_op, num_qubits)\n', (4664, 4682), False, 'from openfermion.linalg import LinearQubitOperator\n'), ((4974, 5018), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': 'num_qubits'}), '([0, 1], repeat=num_qubits)\n', (4991, 5018), False, 'import itertools\n'), ((5837, 5854), 'copy.deepcopy', 'deepcopy', (['indices'], {}), '(indices)\n', (5845, 5854), False, 'from copy import deepcopy\n'), ((6084, 6102), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (6100, 6102), True, 'import numpy as np\n'), ((2298, 2307), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2304, 2307), True, 'import numpy as np\n'), ((2308, 2328), 'numpy.exp', 'np.exp', (['(1.0j * theta)'], {}), '(1.0j * theta)\n', (2314, 2328), True, 'import numpy as np\n'), ((2344, 2353), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2350, 2353), True, 'import numpy as np\n'), ((2354, 2374), 'numpy.exp', 'np.exp', (['(1.0j * theta)'], {}), '(1.0j * theta)\n', (2360, 2374), True, 'import numpy as np\n'), ((3557, 3579), 'utils.cs_vqe_tools.apply_rotation', 'c.apply_rotation', (['r', 'p'], {}), '(r, p)\n', (3573, 3579), True, 'import utils.cs_vqe_tools as c\n'), ((4023, 4045), 'utils.cs_vqe_tools.apply_rotation', 'c.apply_rotation', (['r', 'p'], {}), '(r, p)\n', (4039, 4045), True, 'import utils.cs_vqe_tools as c\n'), ((4844, 4862), 'itertools.combinations', 'combinations', (['s', 'r'], {}), '(s, r)\n', (4856, 4862), False, 'from itertools import chain, combinations\n'), ((5543, 5560), 'copy.deepcopy', 'deepcopy', (['initial'], {}), '(initial)\n', (5551, 5560), False, 'from copy import deepcopy\n'), ((4548, 4558), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4555, 4558), True, 'import numpy as np\n'), ((4601, 4611), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4608, 4611), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
A Python interface to mimic numpy.einsum
'''
import sys
import re
import ctypes
import numpy
from pyscf.lib import misc
libtblis = misc.load_library('libtblis')
libtblis.as_einsum.restype = None
libtblis.as_einsum.argtypes = (
numpy.ctypeslib.ndpointer(), ctypes.c_int,
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char),
numpy.ctypeslib.ndpointer(), ctypes.c_int,
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char),
numpy.ctypeslib.ndpointer(), ctypes.c_int,
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char),
ctypes.c_int,
numpy.ctypeslib.ndpointer(), numpy.ctypeslib.ndpointer()
)
tblis_dtype = {
numpy.dtype(numpy.float32) : 0,
numpy.dtype(numpy.double) : 1,
numpy.dtype(numpy.complex64) : 2,
numpy.dtype(numpy.complex128) : 3,
}
EINSUM_MAX_SIZE = getattr(misc.__config__, 'lib_einsum_max_size', 2000)
_numpy_einsum = numpy.einsum
def _contract(subscripts, *tensors, **kwargs):
'''
c = alpha * contract(a, b) + beta * c
Args:
tensors (list of ndarray) : Tensors for the operation.
Kwargs:
out (ndarray) : If provided, the calculation is done into this array.
dtype (ndarray) : If provided, forces the calculation to use the data
type specified.
alpha (number) : Default is 1
beta (number) : Default is 0
'''
a = numpy.asarray(tensors[0])
b = numpy.asarray(tensors[1])
if not kwargs and (a.size < EINSUM_MAX_SIZE or b.size < EINSUM_MAX_SIZE):
return _numpy_einsum(subscripts, a, b)
c_dtype = kwargs.get('dtype', numpy.result_type(a, b))
if (not (numpy.issubdtype(c_dtype, numpy.floating) or
numpy.issubdtype(c_dtype, numpy.complexfloating))):
return _numpy_einsum(subscripts, a, b)
sub_idx = re.split(',|->', subscripts)
indices = ''.join(sub_idx)
if '->' not in subscripts:
# Find chararacters which appear only once in the subscripts for c_descr
for x in set(indices):
if indices.count(x) > 1:
indices = indices.replace(x, '')
sub_idx += [indices]
alpha = kwargs.get('alpha', 1)
beta = kwargs.get('beta', 0)
c_dtype = numpy.result_type(c_dtype, alpha, beta)
alpha = numpy.asarray(alpha, dtype=c_dtype)
beta = numpy.asarray(beta , dtype=c_dtype)
a = numpy.asarray(a, dtype=c_dtype)
b = numpy.asarray(b, dtype=c_dtype)
a_shape = a.shape
b_shape = b.shape
a_descr, b_descr, c_descr = sub_idx
a_shape_dic = dict(zip(a_descr, a_shape))
b_shape_dic = dict(zip(b_descr, b_shape))
if any(a_shape_dic[x] != b_shape_dic[x]
for x in set(a_descr).intersection(b_descr)):
raise ValueError('operands dimension error for "%s" : %s %s'
% (subscripts, a_shape, b_shape))
ab_shape_dic = a_shape_dic
ab_shape_dic.update(b_shape_dic)
c_shape = tuple([ab_shape_dic[x] for x in c_descr])
out = kwargs.get('out', None)
if out is None:
order = kwargs.get('order', 'C')
c = numpy.empty(c_shape, dtype=c_dtype, order=order)
else:
assert(out.dtype == c_dtype)
assert(out.shape == c_shape)
c = out
a_shape = (ctypes.c_size_t*a.ndim)(*a_shape)
b_shape = (ctypes.c_size_t*b.ndim)(*b_shape)
c_shape = (ctypes.c_size_t*c.ndim)(*c_shape)
nbytes = c_dtype.itemsize
a_strides = (ctypes.c_size_t*a.ndim)(*[x//nbytes for x in a.strides])
b_strides = (ctypes.c_size_t*b.ndim)(*[x//nbytes for x in b.strides])
c_strides = (ctypes.c_size_t*c.ndim)(*[x//nbytes for x in c.strides])
libtblis.as_einsum(a, a.ndim, a_shape, a_strides, a_descr.encode('ascii'),
b, b.ndim, b_shape, b_strides, b_descr.encode('ascii'),
c, c.ndim, c_shape, c_strides, c_descr.encode('ascii'),
tblis_dtype[c_dtype], alpha, beta)
return c
| [
"re.split",
"ctypes.POINTER",
"numpy.result_type",
"numpy.asarray",
"numpy.issubdtype",
"pyscf.lib.misc.load_library",
"numpy.ctypeslib.ndpointer",
"numpy.empty",
"numpy.dtype"
] | [((771, 800), 'pyscf.lib.misc.load_library', 'misc.load_library', (['"""libtblis"""'], {}), "('libtblis')\n", (788, 800), False, 'from pyscf.lib import misc\n'), ((872, 899), 'numpy.ctypeslib.ndpointer', 'numpy.ctypeslib.ndpointer', ([], {}), '()\n', (897, 899), False, 'import numpy\n'), ((919, 950), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (933, 950), False, 'import ctypes\n'), ((952, 983), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (966, 983), False, 'import ctypes\n'), ((989, 1018), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char'], {}), '(ctypes.c_char)\n', (1003, 1018), False, 'import ctypes\n'), ((1024, 1051), 'numpy.ctypeslib.ndpointer', 'numpy.ctypeslib.ndpointer', ([], {}), '()\n', (1049, 1051), False, 'import numpy\n'), ((1071, 1102), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (1085, 1102), False, 'import ctypes\n'), ((1104, 1135), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (1118, 1135), False, 'import ctypes\n'), ((1141, 1170), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char'], {}), '(ctypes.c_char)\n', (1155, 1170), False, 'import ctypes\n'), ((1176, 1203), 'numpy.ctypeslib.ndpointer', 'numpy.ctypeslib.ndpointer', ([], {}), '()\n', (1201, 1203), False, 'import numpy\n'), ((1223, 1254), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (1237, 1254), False, 'import ctypes\n'), ((1256, 1287), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_size_t'], {}), '(ctypes.c_size_t)\n', (1270, 1287), False, 'import ctypes\n'), ((1293, 1322), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char'], {}), '(ctypes.c_char)\n', (1307, 1322), False, 'import ctypes\n'), ((1346, 1373), 'numpy.ctypeslib.ndpointer', 'numpy.ctypeslib.ndpointer', ([], {}), '()\n', (1371, 1373), False, 'import numpy\n'), ((1375, 1402), 'numpy.ctypeslib.ndpointer', 'numpy.ctypeslib.ndpointer', ([], {}), '()\n', (1400, 1402), False, 'import numpy\n'), ((1426, 1452), 'numpy.dtype', 'numpy.dtype', (['numpy.float32'], {}), '(numpy.float32)\n', (1437, 1452), False, 'import numpy\n'), ((1465, 1490), 'numpy.dtype', 'numpy.dtype', (['numpy.double'], {}), '(numpy.double)\n', (1476, 1490), False, 'import numpy\n'), ((1504, 1532), 'numpy.dtype', 'numpy.dtype', (['numpy.complex64'], {}), '(numpy.complex64)\n', (1515, 1532), False, 'import numpy\n'), ((1543, 1572), 'numpy.dtype', 'numpy.dtype', (['numpy.complex128'], {}), '(numpy.complex128)\n', (1554, 1572), False, 'import numpy\n'), ((2143, 2168), 'numpy.asarray', 'numpy.asarray', (['tensors[0]'], {}), '(tensors[0])\n', (2156, 2168), False, 'import numpy\n'), ((2177, 2202), 'numpy.asarray', 'numpy.asarray', (['tensors[1]'], {}), '(tensors[1])\n', (2190, 2202), False, 'import numpy\n'), ((2573, 2601), 're.split', 're.split', (['""",|->"""', 'subscripts'], {}), "(',|->', subscripts)\n", (2581, 2601), False, 'import re\n'), ((2976, 3015), 'numpy.result_type', 'numpy.result_type', (['c_dtype', 'alpha', 'beta'], {}), '(c_dtype, alpha, beta)\n', (2993, 3015), False, 'import numpy\n'), ((3028, 3063), 'numpy.asarray', 'numpy.asarray', (['alpha'], {'dtype': 'c_dtype'}), '(alpha, dtype=c_dtype)\n', (3041, 3063), False, 'import numpy\n'), ((3076, 3110), 'numpy.asarray', 'numpy.asarray', (['beta'], {'dtype': 'c_dtype'}), '(beta, dtype=c_dtype)\n', (3089, 3110), False, 'import numpy\n'), ((3120, 3151), 'numpy.asarray', 'numpy.asarray', (['a'], {'dtype': 'c_dtype'}), '(a, dtype=c_dtype)\n', (3133, 3151), False, 'import numpy\n'), ((3160, 3191), 'numpy.asarray', 'numpy.asarray', (['b'], {'dtype': 'c_dtype'}), '(b, dtype=c_dtype)\n', (3173, 3191), False, 'import numpy\n'), ((2363, 2386), 'numpy.result_type', 'numpy.result_type', (['a', 'b'], {}), '(a, b)\n', (2380, 2386), False, 'import numpy\n'), ((3831, 3879), 'numpy.empty', 'numpy.empty', (['c_shape'], {'dtype': 'c_dtype', 'order': 'order'}), '(c_shape, dtype=c_dtype, order=order)\n', (3842, 3879), False, 'import numpy\n'), ((2401, 2442), 'numpy.issubdtype', 'numpy.issubdtype', (['c_dtype', 'numpy.floating'], {}), '(c_dtype, numpy.floating)\n', (2417, 2442), False, 'import numpy\n'), ((2459, 2507), 'numpy.issubdtype', 'numpy.issubdtype', (['c_dtype', 'numpy.complexfloating'], {}), '(c_dtype, numpy.complexfloating)\n', (2475, 2507), False, 'import numpy\n')] |
import numpy as np
np.set_printoptions(precision=3, linewidth=200, suppress=True)
from random import seed
from copy import deepcopy
def feature_normalize(features, n_type='z-score'):
"""
:param features:
:param n_type:
:return:
"""
answer = np.array([])
if n_type == 'z-score':
mean = np.mean(features, axis=0)
std = np.std(features, axis=0)
if std != 0:
answer = (features - mean) / std
else:
answer = features
elif n_type == 'min-max':
minimum = features.min(axis=0)
maximum = features.max(axis=0)
if maximum != minimum:
answer = (features - minimum)/(maximum-minimum)
else:
answer = features
return answer
def normalize_data(train_features, n_type='z-score'):
"""
Feature scaling
:param train_features: Training features
:param n_type: Type of normalization
:return:
"""
row_no, col_no = train_features.shape
normalize_train_features = deepcopy(train_features)
for column_no in range(col_no):
test = train_features[:, column_no]
normalize_train_features[:, column_no] = feature_normalize(test, n_type=n_type)
return normalize_train_features
| [
"numpy.mean",
"copy.deepcopy",
"numpy.array",
"numpy.std",
"numpy.set_printoptions"
] | [((19, 81), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'linewidth': '(200)', 'suppress': '(True)'}), '(precision=3, linewidth=200, suppress=True)\n', (38, 81), True, 'import numpy as np\n'), ((268, 280), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (276, 280), True, 'import numpy as np\n'), ((1024, 1048), 'copy.deepcopy', 'deepcopy', (['train_features'], {}), '(train_features)\n', (1032, 1048), False, 'from copy import deepcopy\n'), ((324, 349), 'numpy.mean', 'np.mean', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (331, 349), True, 'import numpy as np\n'), ((364, 388), 'numpy.std', 'np.std', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (370, 388), True, 'import numpy as np\n')] |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.externals import joblib
import azureml.automl.core
from azureml.automl.core.shared import logging_utilities, log_server
from azureml.telemetry import INSTRUMENTATION_KEY
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
input_sample = pd.DataFrame({"age": pd.Series(["24"], dtype="int64"), "job": pd.Series(["technician"], dtype="object"), "marital": pd.Series(["single"], dtype="object"), "education": pd.Series(["university.degree"], dtype="object"), "default": pd.Series(["no"], dtype="object"), "housing": pd.Series(["no"], dtype="object"), "loan": pd.Series(["yes"], dtype="object"), "contact": pd.Series(["cellular"], dtype="object"), "month": pd.Series(["jul"], dtype="object"), "duration": pd.Series(["109"], dtype="int64"), "campaign": pd.Series(["3"], dtype="int64"), "pdays": pd.Series(["999"], dtype="int64"), "previous": pd.Series(["0"], dtype="int64"), "poutcome": pd.Series(["nonexistent"], dtype="object"), "emp.var.rate": pd.Series(["1.4"], dtype="float64"), "cons.price.idx": pd.Series(["93.918"], dtype="float64"), "cons.conf.idx": pd.Series(["-42.7"], dtype="float64"), "euribor3m": pd.Series(["4.963"], dtype="float64"), "nr.employed": pd.Series(["5228.1"], dtype="float64")})
output_sample = np.array([0])
try:
log_server.enable_telemetry(INSTRUMENTATION_KEY)
log_server.set_verbosity('INFO')
logger = logging.getLogger('azureml.automl.core.scoring_script')
except:
pass
def init():
global model
# This name is model.id of model that we want to deploy deserialize the model file back
# into a sklearn model
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model.pkl')
try:
model = joblib.load(model_path)
except Exception as e:
path = os.path.normpath(model_path)
path_split = path.split(os.sep)
log_server.update_custom_dimensions({'model_name': path_split[1], 'model_version': path_split[2]})
logging_utilities.log_traceback(e, logger)
raise
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
return json.dumps({"result": result.tolist()})
except Exception as e:
result = str(e)
return json.dumps({"error": result})
| [
"azureml.automl.core.shared.log_server.set_verbosity",
"azureml.automl.core.shared.log_server.enable_telemetry",
"logging.getLogger",
"pandas.Series",
"os.getenv",
"inference_schema.parameter_types.pandas_parameter_type.PandasParameterType",
"sklearn.externals.joblib.load",
"inference_schema.parameter... | [((1698, 1711), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1706, 1711), True, 'import numpy as np\n'), ((1721, 1769), 'azureml.automl.core.shared.log_server.enable_telemetry', 'log_server.enable_telemetry', (['INSTRUMENTATION_KEY'], {}), '(INSTRUMENTATION_KEY)\n', (1748, 1769), False, 'from azureml.automl.core.shared import logging_utilities, log_server\n'), ((1774, 1806), 'azureml.automl.core.shared.log_server.set_verbosity', 'log_server.set_verbosity', (['"""INFO"""'], {}), "('INFO')\n", (1798, 1806), False, 'from azureml.automl.core.shared import logging_utilities, log_server\n'), ((1820, 1875), 'logging.getLogger', 'logging.getLogger', (['"""azureml.automl.core.scoring_script"""'], {}), "('azureml.automl.core.scoring_script')\n", (1837, 1875), False, 'import logging\n'), ((2474, 2507), 'inference_schema.parameter_types.pandas_parameter_type.PandasParameterType', 'PandasParameterType', (['input_sample'], {}), '(input_sample)\n', (2493, 2507), False, 'from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType\n'), ((2524, 2557), 'inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType', 'NumpyParameterType', (['output_sample'], {}), '(output_sample)\n', (2542, 2557), False, 'from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n'), ((740, 772), 'pandas.Series', 'pd.Series', (["['24']"], {'dtype': '"""int64"""'}), "(['24'], dtype='int64')\n", (749, 772), True, 'import pandas as pd\n'), ((781, 822), 'pandas.Series', 'pd.Series', (["['technician']"], {'dtype': '"""object"""'}), "(['technician'], dtype='object')\n", (790, 822), True, 'import pandas as pd\n'), ((835, 872), 'pandas.Series', 'pd.Series', (["['single']"], {'dtype': '"""object"""'}), "(['single'], dtype='object')\n", (844, 872), True, 'import pandas as pd\n'), ((887, 935), 'pandas.Series', 'pd.Series', (["['university.degree']"], {'dtype': '"""object"""'}), "(['university.degree'], dtype='object')\n", (896, 935), True, 'import pandas as pd\n'), ((948, 981), 'pandas.Series', 'pd.Series', (["['no']"], {'dtype': '"""object"""'}), "(['no'], dtype='object')\n", (957, 981), True, 'import pandas as pd\n'), ((994, 1027), 'pandas.Series', 'pd.Series', (["['no']"], {'dtype': '"""object"""'}), "(['no'], dtype='object')\n", (1003, 1027), True, 'import pandas as pd\n'), ((1037, 1071), 'pandas.Series', 'pd.Series', (["['yes']"], {'dtype': '"""object"""'}), "(['yes'], dtype='object')\n", (1046, 1071), True, 'import pandas as pd\n'), ((1084, 1123), 'pandas.Series', 'pd.Series', (["['cellular']"], {'dtype': '"""object"""'}), "(['cellular'], dtype='object')\n", (1093, 1123), True, 'import pandas as pd\n'), ((1134, 1168), 'pandas.Series', 'pd.Series', (["['jul']"], {'dtype': '"""object"""'}), "(['jul'], dtype='object')\n", (1143, 1168), True, 'import pandas as pd\n'), ((1182, 1215), 'pandas.Series', 'pd.Series', (["['109']"], {'dtype': '"""int64"""'}), "(['109'], dtype='int64')\n", (1191, 1215), True, 'import pandas as pd\n'), ((1229, 1260), 'pandas.Series', 'pd.Series', (["['3']"], {'dtype': '"""int64"""'}), "(['3'], dtype='int64')\n", (1238, 1260), True, 'import pandas as pd\n'), ((1271, 1304), 'pandas.Series', 'pd.Series', (["['999']"], {'dtype': '"""int64"""'}), "(['999'], dtype='int64')\n", (1280, 1304), True, 'import pandas as pd\n'), ((1318, 1349), 'pandas.Series', 'pd.Series', (["['0']"], {'dtype': '"""int64"""'}), "(['0'], dtype='int64')\n", (1327, 1349), True, 'import pandas as pd\n'), ((1363, 1405), 'pandas.Series', 'pd.Series', (["['nonexistent']"], {'dtype': '"""object"""'}), "(['nonexistent'], dtype='object')\n", (1372, 1405), True, 'import pandas as pd\n'), ((1423, 1458), 'pandas.Series', 'pd.Series', (["['1.4']"], {'dtype': '"""float64"""'}), "(['1.4'], dtype='float64')\n", (1432, 1458), True, 'import pandas as pd\n'), ((1478, 1516), 'pandas.Series', 'pd.Series', (["['93.918']"], {'dtype': '"""float64"""'}), "(['93.918'], dtype='float64')\n", (1487, 1516), True, 'import pandas as pd\n'), ((1535, 1572), 'pandas.Series', 'pd.Series', (["['-42.7']"], {'dtype': '"""float64"""'}), "(['-42.7'], dtype='float64')\n", (1544, 1572), True, 'import pandas as pd\n'), ((1587, 1624), 'pandas.Series', 'pd.Series', (["['4.963']"], {'dtype': '"""float64"""'}), "(['4.963'], dtype='float64')\n", (1596, 1624), True, 'import pandas as pd\n'), ((1641, 1679), 'pandas.Series', 'pd.Series', (["['5228.1']"], {'dtype': '"""float64"""'}), "(['5228.1'], dtype='float64')\n", (1650, 1679), True, 'import pandas as pd\n'), ((2073, 2103), 'os.getenv', 'os.getenv', (['"""AZUREML_MODEL_DIR"""'], {}), "('AZUREML_MODEL_DIR')\n", (2082, 2103), False, 'import os\n'), ((2143, 2166), 'sklearn.externals.joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (2154, 2166), False, 'from sklearn.externals import joblib\n'), ((2209, 2237), 'os.path.normpath', 'os.path.normpath', (['model_path'], {}), '(model_path)\n', (2225, 2237), False, 'import os\n'), ((2286, 2388), 'azureml.automl.core.shared.log_server.update_custom_dimensions', 'log_server.update_custom_dimensions', (["{'model_name': path_split[1], 'model_version': path_split[2]}"], {}), "({'model_name': path_split[1],\n 'model_version': path_split[2]})\n", (2321, 2388), False, 'from azureml.automl.core.shared import logging_utilities, log_server\n'), ((2393, 2435), 'azureml.automl.core.shared.logging_utilities.log_traceback', 'logging_utilities.log_traceback', (['e', 'logger'], {}), '(e, logger)\n', (2424, 2435), False, 'from azureml.automl.core.shared import logging_utilities, log_server\n'), ((2741, 2770), 'json.dumps', 'json.dumps', (["{'error': result}"], {}), "({'error': result})\n", (2751, 2770), False, 'import json\n')] |
import os
import numpy as np
import torch
from .alignment import load_net, batch_detect
def get_project_dir():
current_path = os.path.abspath(os.path.join(__file__, "../"))
return current_path
def relative(path):
path = os.path.join(get_project_dir(), path)
return os.path.abspath(path)
class RetinaFace:
def __init__(
self,
gpu_id=-1,
model_path=relative("weights/mobilenet0.25_Final.pth"),
network="mobilenet",
):
self.gpu_id = gpu_id
self.device = (
torch.device("cpu") if gpu_id == -1 else torch.device("cuda", gpu_id)
)
self.model = load_net(model_path, self.device, network)
def detect(self, images):
if isinstance(images, np.ndarray):
if len(images.shape) == 3:
return batch_detect(self.model, [images], self.device)[0]
elif len(images.shape) == 4:
return batch_detect(self.model, images, self.device)
elif isinstance(images, list):
return batch_detect(self.model, np.array(images), self.device)
elif isinstance(images, torch.Tensor):
if len(images.shape) == 3:
return batch_detect(self.model, images.unsqueeze(0), self.device)[0]
elif len(images.shape) == 4:
return batch_detect(self.model, images, self.device)
else:
raise NotImplementedError()
def __call__(self, images):
return self.detect(images)
| [
"os.path.abspath",
"numpy.array",
"os.path.join",
"torch.device"
] | [((287, 308), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (302, 308), False, 'import os\n'), ((150, 179), 'os.path.join', 'os.path.join', (['__file__', '"""../"""'], {}), "(__file__, '../')\n", (162, 179), False, 'import os\n'), ((545, 564), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (557, 564), False, 'import torch\n'), ((586, 614), 'torch.device', 'torch.device', (['"""cuda"""', 'gpu_id'], {}), "('cuda', gpu_id)\n", (598, 614), False, 'import torch\n'), ((1069, 1085), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1077, 1085), True, 'import numpy as np\n')] |
'''Script for testing trained models
'''
import os
from heapq import nlargest
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from scikitplot.metrics import plot_confusion_matrix
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms, datasets
import utils as ut
TRN_DATA_DIR = "assets/flower_data/train"
VAL_DATA_DIR = "assets/flower_data/valid"
BATCH_SIZE = 32
# 'inception', 'resnet18', 'resnet152',
# 'densenet121', 'densenet201'
MODEL_TO_USE = 'densenet201'
#MODEL_TO_USE = 'resnet152'
INPUT_SIZE = {'inception': (299, 299, 3),
'resnet': (224, 224, 3),
'densenet': (224, 224, 3)}
MODEL_DIR = 'checkpoints'
MODEL_FN = 'densenet201_classifier_20e_final.pth'
NUM_TEST = 5
def get_data_gen(input_size):
'''Initialize data loader
'''
side_len = min(input_size[:2])
# for training
train_transforms = transforms.Compose([transforms.Resize(side_len),
transforms.RandomCrop(side_len),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(TRN_DATA_DIR, transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_data,
batch_size=BATCH_SIZE, shuffle=True)
# for validation
valid_transforms = transforms.Compose([transforms.Resize(side_len),
transforms.RandomCrop(side_len),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
valid_data = datasets.ImageFolder(VAL_DATA_DIR, transform=valid_transforms)
validloader = torch.utils.data.DataLoader(valid_data,
batch_size=BATCH_SIZE, shuffle=True)
return trainloader, validloader
def get_top_misclassified(labels, preds, k=5):
'''Get top K misclassified classes
'''
labels = np.array(labels)
preds = np.array(preds)
# get labels where there is mismatch
mis_labels = np.unique(labels[labels != preds])
# for tracking of misclassification
err_list = [0] * ut.OUTPUT_SIZE
err_detail_map = {}
mis_dict = {}
for lab in mis_labels:
lab_idxs = (labels == lab)
total = lab_idxs.sum()
preds_for_lab = preds[lab_idxs]
mis_preds = (preds_for_lab != lab)
wrong = mis_preds.sum()
err_list[lab-1] = float(wrong / total)
err_detail_map[lab] = {'wrong': wrong, 'total': total}
mis_dict[lab] = preds_for_lab[mis_preds]
# get indices,value of elements with largest values
idx_err_pairs = nlargest(k, enumerate(err_list), key=lambda x: x[1])
for idx, err in idx_err_pairs:
if err == 0:
continue
lab = idx+1
print("%d: %0.2f (%d/%d), Preds: " % (lab, err, err_detail_map[lab]['wrong'],
err_detail_map[lab]['total']), mis_dict[lab])
def test(model_name, model):
'''Perform model training
'''
# prepare data generator
input_size = INPUT_SIZE[model_name]
trainloader, validloader = get_data_gen(input_size)
# load model
ckpt = torch.load(os.path.join(MODEL_DIR, MODEL_FN))
model.load_state_dict(ckpt)
# First checking if GPU is available
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
print('Testing on GPU.')
model.cuda()
else:
print('No GPU available, testing on CPU.')
# prepare loss function
loss_f = nn.CrossEntropyLoss()
all_labels = []
all_preds = []
with torch.no_grad():
total = 0
corrects = 0
val_loss = 0
model.eval()
for images, labels in tqdm(validloader):
# move data to gpu
if train_on_gpu:
images, labels = images.cuda(), labels.cuda()
logits = model.forward(images)
val_loss += loss_f(logits, labels)
probs = F.softmax(logits, dim=1)
preds = probs.cpu().numpy().argmax(axis=1)
preds = torch.from_numpy(preds)
if train_on_gpu:
preds = preds.cuda()
corrects += torch.sum(preds == labels).type(torch.FloatTensor)
total += len(labels)
all_labels.extend(labels.cpu().numpy().squeeze().tolist())
all_preds.extend(preds.cpu().numpy().squeeze().tolist())
accuracy = float(corrects / total)
print("Validation Loss: {:.3f}.. ".format(val_loss/len(validloader)),
"Validation Accuracy: {:.3f}".format(accuracy))
# display confusion matrix (WARNING: SLOW!)
# print("Plotting confusion matrix...")
# plot_confusion_matrix(all_labels, all_preds, normalize=True)
# plt.show()
#get_top_misclassified(all_labels, all_preds)
return accuracy
def main():
'''Main
'''
global MODEL_TO_USE
if MODEL_TO_USE == 'inception':
model = ut.get_modified_inception(pretrained=False)
elif 'resnet' in MODEL_TO_USE:
sz = int(MODEL_TO_USE.strip('resnet'))
model = ut.get_modified_resnet(sz, pretrained=False)
MODEL_TO_USE = 'resnet'
elif 'densenet' in MODEL_TO_USE:
sz = int(MODEL_TO_USE.strip('densenet'))
model = ut.get_modified_densenet(sz, pretrained=False)
MODEL_TO_USE = 'densenet'
else:
print("Unsupported model type!")
return -1
acc = 0
for i in range(NUM_TEST):
acc += test(MODEL_TO_USE, model)
acc /= NUM_TEST
print("Average Accuracy: {:.3f}".format(acc))
return 0
if __name__ == "__main__":
main()
| [
"utils.get_modified_inception",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"utils.get_modified_resnet",
"torch.nn.functional.softmax",
"utils.get_modified_densenet",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.ToTensor"... | [((1472, 1534), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['TRN_DATA_DIR'], {'transform': 'train_transforms'}), '(TRN_DATA_DIR, transform=train_transforms)\n', (1492, 1534), False, 'from torchvision import transforms, datasets\n'), ((1553, 1629), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(train_data, batch_size=BATCH_SIZE, shuffle=True)\n', (1580, 1629), False, 'import torch\n'), ((2115, 2177), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['VAL_DATA_DIR'], {'transform': 'valid_transforms'}), '(VAL_DATA_DIR, transform=valid_transforms)\n', (2135, 2177), False, 'from torchvision import transforms, datasets\n'), ((2196, 2272), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_data'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(valid_data, batch_size=BATCH_SIZE, shuffle=True)\n', (2223, 2272), False, 'import torch\n'), ((2464, 2480), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2472, 2480), True, 'import numpy as np\n'), ((2493, 2508), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (2501, 2508), True, 'import numpy as np\n'), ((2568, 2602), 'numpy.unique', 'np.unique', (['labels[labels != preds]'], {}), '(labels[labels != preds])\n', (2577, 2602), True, 'import numpy as np\n'), ((3858, 3883), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3881, 3883), False, 'import torch\n'), ((4062, 4083), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4081, 4083), False, 'from torch import nn\n'), ((3730, 3763), 'os.path.join', 'os.path.join', (['MODEL_DIR', 'MODEL_FN'], {}), '(MODEL_DIR, MODEL_FN)\n', (3742, 3763), False, 'import os\n'), ((4134, 4149), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4147, 4149), False, 'import torch\n'), ((4262, 4279), 'tqdm.tqdm', 'tqdm', (['validloader'], {}), '(validloader)\n', (4266, 4279), False, 'from tqdm import tqdm\n'), ((5485, 5528), 'utils.get_modified_inception', 'ut.get_modified_inception', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5510, 5528), True, 'import utils as ut\n'), ((946, 973), 'torchvision.transforms.Resize', 'transforms.Resize', (['side_len'], {}), '(side_len)\n', (963, 973), False, 'from torchvision import transforms, datasets\n'), ((1018, 1049), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['side_len'], {}), '(side_len)\n', (1039, 1049), False, 'from torchvision import transforms, datasets\n'), ((1094, 1127), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1125, 1127), False, 'from torchvision import transforms, datasets\n'), ((1172, 1201), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (1197, 1201), False, 'from torchvision import transforms, datasets\n'), ((1246, 1267), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1265, 1267), False, 'from torchvision import transforms, datasets\n'), ((1312, 1387), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1332, 1387), False, 'from torchvision import transforms, datasets\n'), ((1741, 1768), 'torchvision.transforms.Resize', 'transforms.Resize', (['side_len'], {}), '(side_len)\n', (1758, 1768), False, 'from torchvision import transforms, datasets\n'), ((1813, 1844), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['side_len'], {}), '(side_len)\n', (1834, 1844), False, 'from torchvision import transforms, datasets\n'), ((1889, 1910), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1908, 1910), False, 'from torchvision import transforms, datasets\n'), ((1955, 2030), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1975, 2030), False, 'from torchvision import transforms, datasets\n'), ((4515, 4539), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (4524, 4539), True, 'import torch.nn.functional as F\n'), ((4615, 4638), 'torch.from_numpy', 'torch.from_numpy', (['preds'], {}), '(preds)\n', (4631, 4638), False, 'import torch\n'), ((5627, 5671), 'utils.get_modified_resnet', 'ut.get_modified_resnet', (['sz'], {'pretrained': '(False)'}), '(sz, pretrained=False)\n', (5649, 5671), True, 'import utils as ut\n'), ((5806, 5852), 'utils.get_modified_densenet', 'ut.get_modified_densenet', (['sz'], {'pretrained': '(False)'}), '(sz, pretrained=False)\n', (5830, 5852), True, 'import utils as ut\n'), ((4729, 4755), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (4738, 4755), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Neviim - 2017
# V-0.1.5
__version__ = 'v-0.1.5'
from pymongo import MongoClient
import numpy as np
import pymongo
class GtsCalculos(object):
"""docstring da classe GtsCalculos.
- Uso:
from gtsmath import GtsCalculos
gtsCalculos = GtsCalculos()
"""
def __init__(self, idkey=0):
super(GtsCalculos, self).__init__()
self.idkey = idkey
def distancia_euclidiana(self, v1, v2):
""" calcula a distancia eucllidiana entre dois conjuntos de array
- Parametro:
array1 = vetor de dados a ser comparado com array2.
array2 = vetor de dados usado para base de GtsCalculos.
- Uso:
v1 = [1.2, 2.3, 4.5]
v2 = [[0.5, 0.7, 0.2],
[0.7, 0.2, 2.2],
[1.5, 4.7, 0.1]]
gtsCalculos = GtsCalculos()
dist_euclidiana = gtsCalculos.distancia_euclidiana(v1, v2)
- Retorno:
uma lista com as distancias euclidianas entre o conjuntos de
dados de entrada da lista no vetor2 (v2).
"""
# converte a lista em um array
xi = np.array(v1)
resultado = []
for item in v2:
yi = np.array(item)
# dubtrai as arrays
dif = xi - yi
# eveva ao quadrado para remover o sinal negativo.
quad_dist = np.dot(dif, dif)
dist_eucl = np.sqrt(quad_dist)
# adiciona a uma lista o resultado da operacao.
resultado.append(dist_eucl)
return resultado
class MongoConecta(object):
"""docstring for MongoConecta."""
def __init__(self, server='localhost', port=27017):
super(MongoConecta, self).__init__()
self.server = server
self.port = port
clientdb = MongoClient(server, port)
db = clientdb.GranTurismoSport
#db.carros.create_index('idkey', unique=True)
if __name__ == '__main__':
#
gtsCalculos = GtsCalculos()
# Parametro da entrada v1
v1 = [3.7, 4.8, 2.3]
# Comparados com parametros das entradas v2
v2 = [[3.7, 6.2, 4.7],
[2.7, 3.3, 0.2],
[3.7, 5.2, 1.4],
[4.6, 2.4, 3.4]
]
resultado = gtsCalculos.distancia_euclidiana(v1, v2)
print(" ")
print("# ------------- ")
print("Virsão: "+__version__+"\n")
print("Resultado:")
print(" ")
print(resultado)
print(" ")
print("Saida:")
print(" ")
print("Entrada v1 .....: "+ str(v1))
print("Escolha v2 .....: "+ str(v2[resultado.index(np.min(resultado))]))
print("Dist. Euclidiana: "+ str(np.min(resultado)))
print("# ------------------------------- ")
# resultado esperado 4.47
| [
"numpy.sqrt",
"numpy.array",
"numpy.dot",
"numpy.min",
"pymongo.MongoClient"
] | [((1277, 1289), 'numpy.array', 'np.array', (['v1'], {}), '(v1)\n', (1285, 1289), True, 'import numpy as np\n'), ((1943, 1968), 'pymongo.MongoClient', 'MongoClient', (['server', 'port'], {}), '(server, port)\n', (1954, 1968), False, 'from pymongo import MongoClient\n'), ((1355, 1369), 'numpy.array', 'np.array', (['item'], {}), '(item)\n', (1363, 1369), True, 'import numpy as np\n'), ((1515, 1531), 'numpy.dot', 'np.dot', (['dif', 'dif'], {}), '(dif, dif)\n', (1521, 1531), True, 'import numpy as np\n'), ((1556, 1574), 'numpy.sqrt', 'np.sqrt', (['quad_dist'], {}), '(quad_dist)\n', (1563, 1574), True, 'import numpy as np\n'), ((2762, 2779), 'numpy.min', 'np.min', (['resultado'], {}), '(resultado)\n', (2768, 2779), True, 'import numpy as np\n'), ((2703, 2720), 'numpy.min', 'np.min', (['resultado'], {}), '(resultado)\n', (2709, 2720), True, 'import numpy as np\n')] |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from IPython import embed
from multiprocessing import Pool, cpu_count
#import mega_nn
import numpy as np
import scipy as sc
import scipy.stats as stats
import pandas as pd
from itertools import product, chain
import pickle
import os
import sys
import time
networks_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../networks'))
NNDB_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../NNDB'))
training_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../training'))
qlk4D_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../../QLK4DNN'))
sys.path.append(networks_path)
sys.path.append(NNDB_path)
sys.path.append(training_path)
sys.path.append(qlk4D_path)
from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db
from run_model import QuaLiKizNDNN, QuaLiKizDuoNN
from train_NDNN import shuffle_panda
from functools import partial
if __name__ == '__main__':
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
from matplotlib import gridspec, cycler
pretty = False
from load_data import nameconvert
from load_data import load_data, load_nn, prettify_df
from collections import OrderedDict
from peewee import AsIs, fn, SQL
import re
import gc
def mode_to_settings(mode):
settings = {}
if mode == 'debug':
settings['plot'] = True
settings['plot_pop'] = True
settings['plot_nns'] = True
settings['plot_slice'] = True
settings['plot_poplines'] = True
settings['plot_threshlines'] = True
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = True
settings['parallel'] = False
settings['plot_threshslope'] = False
elif mode == 'quick':
settings['plot'] = False
settings['plot_pop'] = False
settings['plot_nns'] = False
settings['plot_slice'] = False
settings['plot_poplines'] = False
settings['plot_threshlines'] = False
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = False
settings['parallel'] = True
settings['plot_threshslope'] = False
elif mode == 'pretty':
settings['plot'] = True
settings['plot_pop'] = False
settings['plot_nns'] = True
settings['plot_slice'] = False
settings['plot_poplines'] = False
settings['plot_threshlines'] = False
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = True
settings['parallel'] = False
settings['plot_threshslope'] = True
return settings
def get_similar_not_in_table(table, max=20, only_dim=None, only_sep=False, no_particle=False, no_divsum=False,
no_mixed=True):
for cls, field_name in [(Network, 'network'),
(ComboNetwork, 'combo_network'),
(MultiNetwork, 'multi_network')
]:
non_sliced = (cls
.select()
.where(~fn.EXISTS(table.select().where(getattr(table, field_name) == cls.id)))
)
if only_dim is not None:
non_sliced &= cls.select().where(SQL("array_length(feature_names, 1)=" + str(only_dim)))
if no_mixed:
non_sliced &= cls.select().where(~(SQL("(array_to_string(target_names, ',') like %s)", ['%pf%']) &
(SQL("(array_to_string(target_names, ',') like %s)", ['%ef%'])))
)
tags = []
if no_divsum is True:
tags.extend(["div", "plus"])
if no_particle is True:
tags.append('pf')
if len(tags) != 0:
non_sliced &= no_elements_in_list(cls, 'target_names', tags)
if only_sep is True:
non_sliced &= any_element_in_list(cls, 'target_names', ['TEM', 'ITG', 'ETG'])
if non_sliced.count() > 0:
network = non_sliced.get()
break
non_sliced &= (cls.select()
.where(cls.target_names == AsIs(network.target_names))
.where(cls.feature_names == AsIs(network.feature_names))
)
non_sliced = non_sliced.limit(max)
return non_sliced
def nns_from_NNDB(max=20, only_dim=None):
db.connect()
non_sliced = get_similar_not_in_table(PostprocessSlice, max=max, only_sep=True, no_particle=False, no_divsum=True, only_dim=only_dim)
network = non_sliced.get()
style = 'mono'
if len(network.target_names) == 2:
match_0 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[0])
match_1 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[1])
if len(match_0) == 1 and len(match_1) == 1:
group_0 = match_0[0]
group_1 = match_1[0]
if ((group_0[1] == 'e' and group_1[1] == 'i') or
(group_0[1] == 'i' and group_1[1] == 'e')):
style='duo'
else:
raise Exception('non-matching target_names. Not sure what to do.. {s}'
.format(network.target_names))
matches = []
for target_name in network.target_names:
matches.extend(re.compile('^.f.(ITG|ETG|TEM)_GB').findall(target_name))
if matches[1:] == matches[:-1]:
if matches[0] == 'ITG':
slicedim = 'Ati'
elif matches[0] == 'TEM' or matches[0] == 'ETG':
slicedim = 'Ate'
else:
raise Exception('Unequal stability regime. Cannot determine slicedim')
nn_list = {network.id: str(network.id) for network in non_sliced}
print('Found {:d} {!s} with target {!s}'.format(non_sliced.count(), network.__class__, network.target_names))
nns = OrderedDict()
for dbnn in non_sliced:
nn = dbnn.to_QuaLiKizNN()
nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
nns[nn.label] = nn
db.close()
return slicedim, style, nns
def populate_nn_list(nn_set):
if nn_set == 'c_L2':
nn_list = OrderedDict([(61, '$c_{L2} = 0.0$'),
# (48, '$c_{L2} = 0.05$'),
(37, '$c_{L2} = 0.1$'),
# (50, '$c_{L2} = 0.2$'),
# (51, '$c_{L2} = 0.35$'),
(49, '$c_{L2} = 0.5$'),
# (52, '$c_{L2} = 1.0$'),
(53, '$c_{L2} = 2.0$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'topo':
nn_list = OrderedDict([(65, 'neurons = $(10, 10)$'),
(64, 'neurons = $(30, 30)$'),
(73, 'neurons = $(30, 30, 30)$'),
(83, 'neurons = $(45, 45)$'),
(34, 'neurons = $(60, 60)$'),
(38, 'neurons = $(80, 80)$'),
(66, 'neurons = $(120, 120)$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'filter':
#nn_list = OrderedDict([(37, 'filter = 3'),
# (58, 'filter = 4'),
# (60, 'filter = 5')])
nn_list = OrderedDict([(37, '$max(\chi_{ETG,e}) = 60$'),
(60, '$max(\chi_{ETG,e}) = 100$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'goodness':
nn_list = OrderedDict([(62, 'goodness = mabse'),
(37, 'goodness = mse')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'early_stop':
nn_list = OrderedDict([(37, 'stop measure = loss'),
#(11, '$early_stop = mse'),
(18, 'stop measure = MSE')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'similar':
nn_list = OrderedDict([
(37, '37'),
(67, '67'),
(68, '68'),
(69, '69'),
(70, '70'),
(71, '71'),
(72, '72'),
(73, '73'),
(74, '74'),
])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'best':
nn_list = OrderedDict([(46, '')]) #efeETG
nn_list = OrderedDict([(88, '')]) #efiITG
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'duo':
nn_list = OrderedDict([
(205, 'es_20'),
(204, 'es_5'),
(203, 'es_wrong')
])
slicedim = 'Ati'
style = 'duo'
return slicedim, style, nn_list
def nns_from_nn_list(nn_list, slicedim, labels=True):
nns = OrderedDict()
for nn_index, nn_label in nn_list.items():
nn = nns[nn_index] = load_nn(nn_index)
if labels:
nn.label = nn_label
else:
nn.label = ''
return nns
def nns_from_manual():
nns = OrderedDict()
#div_nn = load_nn(405)
#sum_nn = load_nn(406)
#nn = QuaLiKizDuoNN(['efiITG_GB', 'efeITG_GB'], div_nn, sum_nn, [lambda x, y: x * y/(x + 1), lambda x, y: y/(x + 1)])
#nn.label = 'div_style'
#nns[nn.label] = nn
#nn_efi = load_nn(88)
#nn_efe = load_nn(89)
#nn = QuaLiKizDuoNN(['efiITG_GB', 'efeITG_GB'], nn_efi, nn_efe, [lambda x, y: x, lambda x, y: y])
#nn.label = 'sep_style'
#nns[nn.label] = nn
#nn = load_nn(205)
#nn.label = 'combo_style'
#nns[nn.label] = nn
#subnn = (ComboNetwork.select()
# .where(ComboNetwork.id == 78)
# ).get()
#nn = subnn.to_QuaLiKizComboNN()
#nn.label = 'bla'
#nns[nn.label] = nn
#dbnn = Network.by_id(135).get()
dbnns = []
#dbnns.append(MultiNetwork.by_id(119).get())
dbnns.append(ComboNetwork.by_id(3333).get())
#dbnns.append(ComboNetwork.by_id(1050).get())
#dbnns.append(MultiNetwork.by_id(102).get())
for dbnn in dbnns:
nn = dbnn.to_QuaLiKizNN()
nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
nns[nn.label] = nn
#nns[nn.label] = QuaLiKizNDNN.from_json('nn.json')
slicedim = 'Ati'
style='duo'
style='mono'
#from qlkANNk import QuaLiKiz4DNN
#nns['4D'] = QuaLiKiz4DNN()
#nns['4D'].label = '4D'
#nns['4D']._target_names = ['efeITG_GB', 'efiITG_GB']
db.close()
return slicedim, style, nns
def prep_df(store, nns, unstack, filter_less=np.inf, filter_geq=-np.inf, shuffle=True, calc_maxgam=False, clip=False, slice=None, frac=1):
nn0 = list(nns.values())[0]
target_names = nn0._target_names
feature_names = nn0._feature_names
input = store['megarun1/input']
try:
input['logNustar'] = np.log10(input['Nustar'])
del input['Nustar']
except KeyError:
print('No Nustar in dataset')
if ('Zeffx' == feature_names).any() and not ('Zeffx' in input.columns):
print('WARNING! creating Zeffx. You should use a 9D dataset')
input['Zeffx'] = np.full_like(input['Ati'], 1.)
raise Exception
if ('logNustar' == feature_names).any() and not ('logNustar' in input.columns):
print('WARNING! creating logNustar. You should use a 9D dataset')
input['logNustar'] = np.full_like(input['Ati'], np.log10(0.009995))
if len(feature_names) == 4:
print('WARNING! Slicing 7D to 4D dataset. You should use a 4D dataset')
idx = input.index[(
np.isclose(input['Ate'], 5.75, atol=1e-5, rtol=1e-3) &
np.isclose(input['An'], 2, atol=1e-5, rtol=1e-3) &
np.isclose(input['x'], .45, atol=1e-5, rtol=1e-3)
)]
else:
idx = input.index
input = input[feature_names]
data = store.select('megarun1/flattened', columns=target_names)
input = input.loc[idx]
data = data.loc[input.index]
df = input.join(data[target_names])
if calc_maxgam is True:
df_gam = store.select('/megarun1/flattened', columns=['gam_leq_GB', 'gam_great_GB'])
df_gam = (df_gam.max(axis=1)
.to_frame('maxgam')
)
df = df.join(df_gam)
#itor = zip(['An', 'Ate', 'Ti_Te', 'qx', 'smag', 'x'], ['0.00', '10.00', '1.00', '5.00', '0.40', '0.45'])
#itor = zip(['Zeffx', 'Ate', 'An', 'qx', 'smag', 'x', 'Ti_Te', 'logNustar'], [1.0, 5.75, 2.5, 2.0, 0.10000000149011612, 0.33000001311302185, 1.0, -2.000217201545864])
if slice is not None:
for name, val in slice:
df = df[np.isclose(df[name], float(val), atol=1e-5, rtol=1e-3)]
if clip is True:
df[target_names] = df[target_names].clip(filter_less, filter_geq, axis=1)
else:
# filter
df = df[(df[target_names] < filter_less).all(axis=1)]
df = df[(df[target_names] >= filter_geq).all(axis=1)]
#print(np.sum(df['target'] < 0)/len(df), ' frac < 0')
#print(np.sum(df['target'] == 0)/len(df), ' frac == 0')
#print(np.sum(df['target'] > 0)/len(df), ' frac > 0')
#uni = {col: input[col].unique() for col in input}
#uni_len = {key: len(value) for key, value in uni.items()}
#input['index'] = input.index
df.set_index([col for col in input], inplace=True)
df = df.astype('float64')
df = df.sort_index(level=unstack)
df = df.unstack(unstack)
if shuffle:
df = shuffle_panda(df)
#df.sort_values('smag', inplace=True)
#input, data = prettify_df(input, data)
#input = input.astype('float64')
# Filter
if frac < 1:
idx = int(frac * len(df))
df = df.iloc[:idx, :]
#df = df.iloc[1040:2040,:]
print('dataset loaded!')
return df, target_names
def is_unsafe(df, nns, slicedim):
unsafe = True
for nn in nns.values():
slicedim_idx = nn._feature_names[nn._feature_names == slicedim].index[0]
varlist = list(df.index.names)
varlist.insert(slicedim_idx, slicedim)
try:
if ~np.all(varlist == nn._feature_names):
unsafe = False
except ValueError:
raise Exception('Dataset has features {!s} but dataset has features {!s}'.format(varlist, list(nn._feature_names)))
return unsafe
def calculate_thresh1(x, feature, target, debug=False):
try:
idx = target.index[target == 0][-1] #index of last zero
slope, intercept, r_value, p_value, std_err = stats.linregress(feature[(target.index > idx) & ~target.isnull()], target[(target.index > idx) & ~target.isnull()])
thresh_pred = x * slope + intercept
thresh1 = x[thresh_pred < 0][-1]
except (ValueError, IndexError):
thresh1 = np.NaN
if debug:
print('No threshold1')
return thresh1
def calculate_thresh2(feature, target, debug=False):
if len(target.shape) > 1:
raise NotImplementedError('2D threshold not implemented yet')
try:
idx = np.where(target == 0)[0][-1] #Only works for 1D
idx2 = np.where(~np.isnan(target[idx+1:]))[0][0] + idx + 1
#idx = np.arange(target.shape[0]),target.shape[1] - 1 - (target[:,::-1]==0).argmax(1) #Works for 2D
thresh2 = (feature[idx] + feature[idx2]) / 2
except IndexError:
thresh2 = np.NaN
if debug:
print('No threshold2')
return thresh2
#5.4 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) total
def process_chunk(target_names, chunck, settings=None, unsafe=False):
res = []
for ii, row in enumerate(chunck.iterrows()):
res.append(process_row(target_names, row, settings=settings, unsafe=unsafe))
return res
def process_row(target_names, row, ax1=None, unsafe=False, settings=None):
index, slice_ = row
feature = slice_.index.levels[1]
#target = slice.loc[target_names]
target = slice_.values[:len(feature) * len(target_names)].reshape(len(target_names), len(feature))
if np.all(np.logical_or(target == 0, np.isnan(target))):
return (1,)
else:
# 156 µs ± 10.4 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) (no zerocolors)
thresh_nn = np.empty(len(target_names) * len(nns))
thresh_nn_i = np.empty_like(thresh_nn, dtype='int64')
popbacks = np.empty_like(thresh_nn)
thresh1_misses = np.empty_like(thresh_nn)
thresh2_misses = np.empty_like(thresh_nn)
if settings['plot_zerocolors']:
maxgam = slice_['maxgam']
# Create slice, assume sorted
# 14.8 µs ± 1.27 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
x = np.linspace(feature.values[0],
feature.values[-1],
200)
#if plot:
if not ax1 and settings['plot']:
fig = plt.figure()
if settings['plot_pop'] and settings['plot_slice']:
gs = gridspec.GridSpec(2, 2, height_ratios=[10, 1], width_ratios=[5,1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax2 = plt.subplot(gs[1,0])
ax3 = plt.subplot(gs[0,1])
if not settings['plot_pop'] and settings['plot_slice']:
gs = gridspec.GridSpec(2, 1, height_ratios=[10, 2], width_ratios=[1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax2 = plt.subplot(gs[1,0])
if not settings['plot_pop'] and not settings['plot_slice']:
gs = gridspec.GridSpec(1, 1, height_ratios=[1], width_ratios=[1],
left=0.05, right=0.95, wspace=0.05, hspace=0.05)
ax1 = plt.subplot(gs[0,0])
#ax1.set_prop_cycle(cycler('color', ['#f1eef6','#d7b5d8','#df65b0','#dd1c77','#980043']))
# http://tristen.ca/hcl-picker/#/clh/5/273/2A0A75/D59FEB
#ax1.set_prop_cycle(cycler('color', ['#2A0A75','#6330B8','#9F63E2','#D59FEB']))
if len(nns) == 1:
color_range = np.array([.7])
else:
color_range = np.linspace(0, 0.9, len(nns))
ax1.set_prop_cycle(cycler('color', plt.cm.plasma(color_range)))
ax1.set_xlabel(nameconvert[slicedim])
ax1.set_ylabel(nameconvert[list(nns.items())[0][1]._target_names[0]])
if settings['calc_thresh1']:
thresh1 = calculate_thresh1(x, feature, target, debug=settings['debug'])
print('whyyy?')
# 12.5 µs ± 970 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
if all(['ef' in name for name in target_names]):
thresh2 = calculate_thresh2(feature.values, target[0,:], debug=settings['debug'])
elif all(['pf' in name for name in target_names]):
thresh2 = calculate_thresh2(feature.values, np.abs(target[0,:]), debug=settings['debug'])
else:
thresh2 = np.nan
print('No thresh2!')
embed()
print('Weird stuff')
if settings['plot'] and settings['plot_threshlines']:
ax1.axvline(thresh2, c='black', linestyle='dashed')
if settings['plot'] and settings['plot_threshslope']:
if ~np.isnan(thresh2):
pre_thresh = x[x <= thresh2]
ax1.plot(pre_thresh, np.zeros_like(pre_thresh), c='gray', linestyle='dashed')
post_thresh = x[x > thresh2]
se = slice_.loc[target_names]
se.index = se.index.droplevel()
se = se.loc[se.index > thresh2].dropna()
a = sc.optimize.curve_fit(lambda x, a: a * x, se.index-thresh2, se.values)[0][0]
ax1.plot(post_thresh, a * (post_thresh-thresh2), c='gray', linestyle='dashed')
# 13.7 µs ± 1.1 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
if unsafe:
slice_list = [np.full_like(x, val) for val in index]
slicedim_idx = np.nonzero(list(nns.values())[0]._feature_names.values == slicedim)[0][0]
slice_list.insert(slicedim_idx, x)
else:
slice_dict = {name: np.full_like(x, val) for name, val in zip(df.index.names, index)}
slice_dict[slicedim] = x
# Plot target points
if settings['plot'] and settings['plot_slice']:
table = ax2.table(cellText=[[nameconvert[name] for name in df.index.names],
['{:.2f}'.format(xx) for xx in index]],cellLoc='center')
table.auto_set_font_size(False)
table.scale(1, 1.5)
#table.set_fontsize(20)
ax2.axis('tight')
ax2.axis('off')
#fig.subplots_adjust(bottom=0.2, transform=ax1.transAxes)
# Plot nn lines
nn_preds = np.ndarray([x.shape[0], 0])
for ii, (nn_index, nn) in enumerate(nns.items()):
if all(['ef' in name for name in nn._target_names]):
clip_low = True
low_bound = np.zeros((len(nn._target_names), 1))
#high_bound = np.full((len(nn._target_names), 1), np.inf)
clip_high = False
high_bound = None
elif all(['pf' in name for name in nn._target_names]):
#raise NotImplementedError('Particle bounds')
clip_low = False
low_bound = np.full((len(nn._target_names), 1), -80)
clip_high = False
high_bound = np.full((len(nn._target_names), 1), 80)
else:
clip_low = False
low_bound = None
clip_high = False
high_bound = None
print('Mixed target!')
embed()
print('Weird stuff')
if unsafe:
nn_pred = nn.get_output(np.array(slice_list).T, clip_low=clip_low, low_bound=low_bound, clip_high=clip_high, high_bound=high_bound, safe=not unsafe, output_pandas=False)
else:
nn_pred = nn.get_output(pd.DataFrame(slice_dict), clip_low=clip_low, low_bound=low_bound, clip_high=clip_high, high_bound=high_bound, safe=not unsafe, output_pandas=True).values
nn_preds = np.concatenate([nn_preds, nn_pred], axis=1)
if settings['plot'] and settings['plot_nns']:
lines = []
if style == 'duo':
labels = np.repeat([nn.label for nn in nns.values()], 2)
for ii in range(0, nn_preds.shape[1], 2):
lines.append(ax1.plot(x, nn_preds[:, ii], label=labels[ii])[0])
lines.append(ax1.plot(x, nn_preds[:, ii+1], label=labels[ii+1], c=lines[-1].get_color(), linestyle='dashed')[0])
else:
for ii, (nn, row) in enumerate(zip(nns.values(), nn_preds.T)):
pass
lines.append(ax1.plot(x, row, label=nn.label)[0])
matrix_style = False
if matrix_style:
thresh_i = (np.arange(nn_preds.shape[1]),nn_preds.shape[0] - 1 - (nn_preds[::-1,:]==0).argmax(0))[1]
thresh = x[thresh_i]
thresh[thresh == x[-1]] = np.nan
else:
for ii, row in enumerate(nn_preds.T):
try:
if row[-1] == 0:
thresh_nn[ii] = np.nan
else:
thresh_i = thresh_nn_i[ii] = np.where(np.diff(np.sign(row)))[0][-1]
thresh_nn[ii] = x[thresh_i]
except IndexError:
thresh_nn[ii] = np.nan
if settings['plot'] and settings['plot_threshlines']:
for ii, row in enumerate(thresh_nn):
ax1.axvline(row, c=lines[ii].get_color(), linestyle='dotted')
if settings['debug']:
print('network ', ii, 'threshold ', row)
if matrix_style:
masked = np.ma.masked_where(x[:, np.newaxis] > thresh, nn_preds)
#popback_i = (masked.shape[0] - 1 - (masked[::1,:]!=0)).argmax(0)
popback_i = masked.shape[0] - 1 - (masked.shape[0] - 1 - (masked[::-1,:]!=0)).argmin(0)
popback = x[popback_i]
popback[popback == x[-1]] = np.nan
else:
for ii, row in enumerate(nn_preds.T):
if not np.isnan(thresh_nn[ii]):
try:
popback_i = np.flatnonzero(row[:thresh_nn_i[ii]])
popbacks[ii] = x[popback_i[-1]]
except (IndexError):
popbacks[ii] = np.nan
else:
popbacks[ii] = np.nan
# 5.16 µs ± 188 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
wobble = np.abs(np.diff(nn_preds, n=2,axis=0))
wobble_unstab = np.array([np.mean(col[ind:]) for ind, col in zip(thresh_nn_i + 1, wobble.T)])
wobble_tot = np.mean(wobble, axis=0)
if settings['plot'] and settings['plot_pop']:
thresh2_misses = thresh_nn - thresh2
thresh2_popback = popbacks - thresh2
slice_stats = np.array([thresh2_misses, thresh2_popback, np.log10(wobble_tot), np.log10(wobble_unstab)]).T
slice_strings = np.array(['{:.1f}'.format(xx) for xx in slice_stats.reshape(slice_stats.size)])
slice_strings = slice_strings.reshape(slice_stats.shape)
slice_strings = np.insert(slice_strings, 0, ['thre_mis', 'pop_mis', 'wobble_tot', 'wobble_unstb'], axis=0)
table = ax3.table(cellText=slice_strings, loc='center')
table.auto_set_font_size(False)
ax3.axis('tight')
ax3.axis('off')
if settings['debug']:
print(slice_stats.flatten())
if settings['plot']:
if settings['plot_zerocolors']:
color = target.copy()
color[(target == 0) & (maxgam == 0)] = 'green'
color[(target != 0) & (maxgam == 0)] = 'red'
color[(target == 0) & (maxgam != 0)] = 'magenta'
color[(target != 0) & (maxgam != 0)] = 'blue'
else:
color='blue'
if settings['hide_qualikiz']:
color='white'
zorder=1
label=''
else:
zorder=1000
label = 'QuaLiKiz'
#label = 'Turbulence model'
#label=''
markers = ['x', '+']
for column, marker in zip(target, markers):
ax1.scatter(feature[column != 0],
column[column != 0], c=color, label=label, marker=marker, zorder=zorder)
ax1.scatter(feature[column==0],
column[column==0], edgecolors=color, marker='o', facecolors='none', zorder=zorder)
# Plot regression
if settings['plot'] and settings['plot_thresh1line'] and not np.isnan(thresh1):
#plot_min = ax1.get_ylim()[0]
plot_min = -0.1
x_plot = x[(thresh_pred > plot_min) & (thresh_pred < ax1.get_ylim()[1])]
y_plot = thresh_pred[(thresh_pred > plot_min) & (thresh_pred < ax1.get_ylim()[1])]
ax1.plot(x_plot, y_plot, c='gray', linestyle='dotted')
ax1.plot(x[x< thresh1], np.zeros_like(x[x< thresh1]), c='gray', linestyle='dotted')
#ax1.axvline(thresh1, c='black', linestyle='dotted')
slice_res = np.array([thresh_nn, popbacks, wobble_tot, wobble_unstab]).T
if settings['plot']:
ax1.legend()
ax1.set_ylim(bottom=min(ax1.get_ylim()[0], 0))
plt.show()
fig.savefig('slice.pdf', format='pdf', bbox_inches='tight')
qlk_data = pd.DataFrame(target.T, columns=target_names, index=feature)
cols = pd.MultiIndex.from_product([[nn.label for nn in nns.values()], target_names])
nn_data = pd.DataFrame(nn_preds, columns=cols)
nn_data.index = x
nn_data.index.name = feature.name
slice_data = pd.Series(dict(zip(df.index.names, index)))
slice_latex = (' {!s} &' * len(df.index.names)).format(*[nameconvert[name] for name in df.index.names]).strip(' &')
slice_latex += ('\\\\\n' + ' {:.2f} &' * len(index)).format(*index).strip(' &')
embed()
plt.close(fig)
return (0, thresh2, slice_res.flatten())
#sliced += 1
#if sliced % 1000 == 0:
# print(sliced, 'took ', time.time() - starttime, ' seconds')
def extract_stats(totstats, style):
df = totstats.copy()
df = df.reorder_levels([2,0,1], axis=1)
results = pd.DataFrame()
for relabs, measure in zip(['rel', 'abs'], ['thresh', 'pop']):
df2 = df[measure]
qlk_data = df2['QLK']
network_data = df2.drop('QLK', axis=1)
if relabs == 'rel':
mis = network_data.subtract(qlk_data, level=1).divide(qlk_data, level=1)
elif relabs == 'abs':
mis = network_data.subtract(qlk_data, level=1)
quant1 = 0.025
quant2 = 1 - quant1
quant = mis.quantile([quant1, quant2])
results['_'.join([measure, relabs, 'mis', 'median'])] = mis.median()
results['_'.join([measure, relabs, 'mis', '95width'])] = quant.loc[quant2] - quant.loc[quant1]
results['_'.join(['no', measure, 'frac'])] = mis.isnull().sum() / len(mis)
results['wobble_unstab'] = df['wobble_unstab'].mean()
results['wobble_tot'] = df['wobble_tot'].mean()
if style == 'duo':
duo_results = pd.DataFrame()
measure = 'thresh'
df2 = df[measure]
network_data = df2.drop('QLK', axis=1)
network_data = network_data.reorder_levels([1, 0], axis=1)
efelike_name = network_data.columns[1][0]
efilike_name = network_data.columns[0][0]
mis = network_data[efilike_name] - network_data[efelike_name]
quant = mis.quantile([quant1, quant2])
duo_results['dual_thresh_mismatch_median'] = mis.median()
duo_results['dual_thresh_mismatch_95width'] = quant.loc[quant2] - quant.loc[quant1]
duo_results['no_dual_thresh_frac'] = mis.isnull().sum() / len(mis)
else:
duo_results = pd.DataFrame()
return results, duo_results
def extract_nn_stats(results, duo_results, nns, frac, submit_to_nndb=False):
db.connect()
for network_name, res in results.unstack().iterrows():
network_class, network_number = network_name.split('_')
nn = nns[network_name]
if network_class == 'Network':
res_dict = {'network': network_number}
elif network_class == 'ComboNetwork':
res_dict = {'combo_network': network_number}
elif network_class == 'MultiNetwork':
res_dict = {'multi_network': network_number}
if all([name not in res_dict for name in ['network', 'combo_network', 'multi_network']]):
raise Exception(''.join('Error! No network found for ', network_name))
res_dict['frac'] = frac
for stat, val in res.unstack(level=0).iteritems():
res_dict[stat] = val.loc[nn._target_names].values
try :
duo_res = duo_results.loc[network_name]
res_dict.update(duo_res)
except KeyError:
pass
postprocess_slice = PostprocessSlice(**res_dict)
if submit_to_nndb is True:
postprocess_slice.save()
db.close()
if __name__ == '__main__':
nn_set = 'duo'
nn_set = 'best'
mode = 'pretty'
mode = 'debug'
submit_to_nndb = False
mode = 'quick'
submit_to_nndb = True
store = pd.HDFStore('../gen2_7D_nions0_flat.h5')
#store = pd.HDFStore('../sane_gen2_7D_nions0_flat_filter7.h5')
#data = data.join(store['megarun1/combo'])
#slicedim, style, nn_list = populate_nn_list(nn_set)
slicedim, style, nns = nns_from_NNDB(100, only_dim=7)
#slicedim, style, nns = nns_from_manual()
#slicedim = 'An'
#nns = nns_from_nn_list(nn_list, slicedim, labels=labels)
if style != 'similar':
labels=True
else:
labels=False
if mode == 'quick':
filter_geq = -np.inf
filter_less = np.inf
else:
filter_geq = -120
filter_less = 120
itor = None
frac = 0.05
df, target_names = prep_df(store, nns, slicedim, filter_less=filter_less, filter_geq=filter_geq, slice=itor, frac=frac)
gc.collect()
unsafe = is_unsafe(df, nns, slicedim)
if not unsafe:
print('Warning! Cannot use unsafe mode')
settings = mode_to_settings(mode)
if mode == 'pretty':
plt.style.use('./thesis.mplstyle')
mpl.rcParams.update({'font.size': 16})
else:
nameconvert = {name: name for name in nameconvert}
if settings['parallel']:
num_processes = cpu_count()
chunk_size = int(df.shape[0]/num_processes)
chunks = [df.ix[df.index[i:i + chunk_size]] for i in range(0, df.shape[0], chunk_size)]
pool = Pool(processes=num_processes)
print('Starting {:d} slices for {:d} networks'.format(len(df), len(nns)))
starttime = time.time()
#n=20
#newind = np.hstack([np.repeat(np.array([*df.index]), n, axis=0), np.tile(np.linspace(df.columns.levels[1][0], df.columns.levels[1][-1], n), len(df))[:, None]])
#embed()
if not settings['parallel']:
results = process_chunk(target_names, df, settings=settings, unsafe=unsafe)
else:
results = pool.map(partial(process_chunk, target_names, settings=settings, unsafe=unsafe), chunks)
#for row in df.iterrows():
# process_row(row)
print(len(df), 'took ', time.time() - starttime, ' seconds')
zero_slices = 0
totstats = []
qlk_thresh = []
for result in chain(*results):
if result[0] == 1:
zero_slices += 1
else:
totstats.append(result[2])
qlk_thresh.append(result[1])
stats = ['thresh', 'pop', 'wobble_tot', 'wobble_unstab']
totstats = pd.DataFrame(totstats, columns=pd.MultiIndex.from_tuples(list(product([nn.label for nn in nns.values()], target_names, stats))))
qlk_columns = list(product(['QLK'], target_names, stats))
qlk_data = np.full([len(totstats), len(qlk_columns)], np.nan)
qlk_data[:, ::] = np.tile(qlk_thresh, np.array([len(qlk_columns),1])).T
qlk_data = pd.DataFrame(qlk_data, columns=pd.MultiIndex.from_tuples(qlk_columns))
totstats = totstats.join(qlk_data)
res, duo_res = extract_stats(totstats, style)
extract_nn_stats(res, duo_res, nns, frac, submit_to_nndb=submit_to_nndb)
#print('WARNING! If you continue, you will overwrite ', 'totstats_' + style + '.pkl')
#embed()
#totstats._metadata = {'zero_slices': zero_slices}
#with open('totstats_' + style + '.pkl', 'wb') as file_:
# pickle.dump(totstats, file_)
| [
"itertools.chain",
"numpy.log10",
"load_data.load_nn",
"re.compile",
"multiprocessing.cpu_count",
"model.no_elements_in_list",
"model.any_element_in_list",
"numpy.array",
"pandas.MultiIndex.from_tuples",
"pandas.HDFStore",
"sys.path.append",
"peewee.AsIs",
"matplotlib.pyplot.cm.plasma",
"n... | [((667, 697), 'sys.path.append', 'sys.path.append', (['networks_path'], {}), '(networks_path)\n', (682, 697), False, 'import sys\n'), ((698, 724), 'sys.path.append', 'sys.path.append', (['NNDB_path'], {}), '(NNDB_path)\n', (713, 724), False, 'import sys\n'), ((725, 755), 'sys.path.append', 'sys.path.append', (['training_path'], {}), '(training_path)\n', (740, 755), False, 'import sys\n'), ((756, 783), 'sys.path.append', 'sys.path.append', (['qlk4D_path'], {}), '(qlk4D_path)\n', (771, 783), False, 'import sys\n'), ((1093, 1107), 'matplotlib.use', 'mpl.use', (['"""pdf"""'], {}), "('pdf')\n", (1100, 1107), True, 'import matplotlib as mpl\n'), ((4969, 4981), 'model.db.connect', 'db.connect', ([], {}), '()\n', (4979, 4981), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((6433, 6446), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6444, 6446), False, 'from collections import OrderedDict\n'), ((6626, 6636), 'model.db.close', 'db.close', ([], {}), '()\n', (6634, 6636), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((9562, 9575), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9573, 9575), False, 'from collections import OrderedDict\n'), ((9810, 9823), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9821, 9823), False, 'from collections import OrderedDict\n'), ((11221, 11231), 'model.db.close', 'db.close', ([], {}), '()\n', (11229, 11231), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((29361, 29375), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (29373, 29375), True, 'import pandas as pd\n'), ((31061, 31073), 'model.db.connect', 'db.connect', ([], {}), '()\n', (31071, 31073), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((32139, 32149), 'model.db.close', 'db.close', ([], {}), '()\n', (32147, 32149), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((32341, 32381), 'pandas.HDFStore', 'pd.HDFStore', (['"""../gen2_7D_nions0_flat.h5"""'], {}), "('../gen2_7D_nions0_flat.h5')\n", (32352, 32381), True, 'import pandas as pd\n'), ((33126, 33138), 'gc.collect', 'gc.collect', ([], {}), '()\n', (33136, 33138), False, 'import gc\n'), ((33826, 33837), 'time.time', 'time.time', ([], {}), '()\n', (33835, 33837), False, 'import time\n'), ((34459, 34474), 'itertools.chain', 'chain', (['*results'], {}), '(*results)\n', (34464, 34474), False, 'from itertools import product, chain\n'), ((350, 375), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (365, 375), False, 'import os\n'), ((439, 464), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (454, 464), False, 'import os\n'), ((528, 553), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (543, 553), False, 'import os\n'), ((618, 643), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (633, 643), False, 'import os\n'), ((6744, 6857), 'collections.OrderedDict', 'OrderedDict', (["[(61, '$c_{L2} = 0.0$'), (37, '$c_{L2} = 0.1$'), (49, '$c_{L2} = 0.5$'), (\n 53, '$c_{L2} = 2.0$')]"], {}), "([(61, '$c_{L2} = 0.0$'), (37, '$c_{L2} = 0.1$'), (49,\n '$c_{L2} = 0.5$'), (53, '$c_{L2} = 2.0$')])\n", (6755, 6857), False, 'from collections import OrderedDict\n'), ((9652, 9669), 'load_data.load_nn', 'load_nn', (['nn_index'], {}), '(nn_index)\n', (9659, 9669), False, 'from load_data import load_data, load_nn, prettify_df\n'), ((11587, 11612), 'numpy.log10', 'np.log10', (["input['Nustar']"], {}), "(input['Nustar'])\n", (11595, 11612), True, 'import numpy as np\n'), ((11873, 11904), 'numpy.full_like', 'np.full_like', (["input['Ati']", '(1.0)'], {}), "(input['Ati'], 1.0)\n", (11885, 11904), True, 'import numpy as np\n'), ((14171, 14188), 'train_NDNN.shuffle_panda', 'shuffle_panda', (['df'], {}), '(df)\n', (14184, 14188), False, 'from train_NDNN import shuffle_panda\n'), ((16967, 17006), 'numpy.empty_like', 'np.empty_like', (['thresh_nn'], {'dtype': '"""int64"""'}), "(thresh_nn, dtype='int64')\n", (16980, 17006), True, 'import numpy as np\n'), ((17026, 17050), 'numpy.empty_like', 'np.empty_like', (['thresh_nn'], {}), '(thresh_nn)\n', (17039, 17050), True, 'import numpy as np\n'), ((17076, 17100), 'numpy.empty_like', 'np.empty_like', (['thresh_nn'], {}), '(thresh_nn)\n', (17089, 17100), True, 'import numpy as np\n'), ((17126, 17150), 'numpy.empty_like', 'np.empty_like', (['thresh_nn'], {}), '(thresh_nn)\n', (17139, 17150), True, 'import numpy as np\n'), ((17365, 17420), 'numpy.linspace', 'np.linspace', (['feature.values[0]', 'feature.values[-1]', '(200)'], {}), '(feature.values[0], feature.values[-1], 200)\n', (17376, 17420), True, 'import numpy as np\n'), ((21516, 21543), 'numpy.ndarray', 'np.ndarray', (['[x.shape[0], 0]'], {}), '([x.shape[0], 0])\n', (21526, 21543), True, 'import numpy as np\n'), ((25629, 25652), 'numpy.mean', 'np.mean', (['wobble'], {'axis': '(0)'}), '(wobble, axis=0)\n', (25636, 25652), True, 'import numpy as np\n'), ((30268, 30282), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (30280, 30282), True, 'import pandas as pd\n'), ((30932, 30946), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (30944, 30946), True, 'import pandas as pd\n'), ((32034, 32062), 'model.PostprocessSlice', 'PostprocessSlice', ([], {}), '(**res_dict)\n', (32050, 32062), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((33321, 33355), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""./thesis.mplstyle"""'], {}), "('./thesis.mplstyle')\n", (33334, 33355), True, 'import matplotlib.pyplot as plt\n'), ((33364, 33402), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (33383, 33402), True, 'import matplotlib as mpl\n'), ((33526, 33537), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (33535, 33537), False, 'from multiprocessing import Pool, cpu_count\n'), ((33701, 33730), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_processes'}), '(processes=num_processes)\n', (33705, 33730), False, 'from multiprocessing import Pool, cpu_count\n'), ((34856, 34893), 'itertools.product', 'product', (["['QLK']", 'target_names', 'stats'], {}), "(['QLK'], target_names, stats)\n", (34863, 34893), False, 'from itertools import product, chain\n'), ((4402, 4448), 'model.no_elements_in_list', 'no_elements_in_list', (['cls', '"""target_names"""', 'tags'], {}), "(cls, 'target_names', tags)\n", (4421, 4448), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((4504, 4567), 'model.any_element_in_list', 'any_element_in_list', (['cls', '"""target_names"""', "['TEM', 'ITG', 'ETG']"], {}), "(cls, 'target_names', ['TEM', 'ITG', 'ETG'])\n", (4523, 4567), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((4812, 4839), 'peewee.AsIs', 'AsIs', (['network.feature_names'], {}), '(network.feature_names)\n', (4816, 4839), False, 'from peewee import AsIs, fn, SQL\n'), ((7266, 7508), 'collections.OrderedDict', 'OrderedDict', (["[(65, 'neurons = $(10, 10)$'), (64, 'neurons = $(30, 30)$'), (73,\n 'neurons = $(30, 30, 30)$'), (83, 'neurons = $(45, 45)$'), (34,\n 'neurons = $(60, 60)$'), (38, 'neurons = $(80, 80)$'), (66,\n 'neurons = $(120, 120)$')]"], {}), "([(65, 'neurons = $(10, 10)$'), (64, 'neurons = $(30, 30)$'), (\n 73, 'neurons = $(30, 30, 30)$'), (83, 'neurons = $(45, 45)$'), (34,\n 'neurons = $(60, 60)$'), (38, 'neurons = $(80, 80)$'), (66,\n 'neurons = $(120, 120)$')])\n", (7277, 7508), False, 'from collections import OrderedDict\n'), ((12143, 12161), 'numpy.log10', 'np.log10', (['(0.009995)'], {}), '(0.009995)\n', (12151, 12161), True, 'import numpy as np\n'), ((16737, 16753), 'numpy.isnan', 'np.isnan', (['target'], {}), '(target)\n', (16745, 16753), True, 'import numpy as np\n'), ((17546, 17558), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17556, 17558), True, 'import matplotlib.pyplot as plt\n'), ((18421, 18442), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (18432, 18442), True, 'import matplotlib.pyplot as plt\n'), ((22937, 22980), 'numpy.concatenate', 'np.concatenate', (['[nn_preds, nn_pred]'], {'axis': '(1)'}), '([nn_preds, nn_pred], axis=1)\n', (22951, 22980), True, 'import numpy as np\n'), ((24631, 24686), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(x[:, np.newaxis] > thresh)', 'nn_preds'], {}), '(x[:, np.newaxis] > thresh, nn_preds)\n', (24649, 24686), True, 'import numpy as np\n'), ((25475, 25505), 'numpy.diff', 'np.diff', (['nn_preds'], {'n': '(2)', 'axis': '(0)'}), '(nn_preds, n=2, axis=0)\n', (25482, 25505), True, 'import numpy as np\n'), ((26129, 26223), 'numpy.insert', 'np.insert', (['slice_strings', '(0)', "['thre_mis', 'pop_mis', 'wobble_tot', 'wobble_unstb']"], {'axis': '(0)'}), "(slice_strings, 0, ['thre_mis', 'pop_mis', 'wobble_tot',\n 'wobble_unstb'], axis=0)\n", (26138, 26223), True, 'import numpy as np\n'), ((28157, 28215), 'numpy.array', 'np.array', (['[thresh_nn, popbacks, wobble_tot, wobble_unstab]'], {}), '([thresh_nn, popbacks, wobble_tot, wobble_unstab])\n', (28165, 28215), True, 'import numpy as np\n'), ((28343, 28353), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28351, 28353), True, 'import matplotlib.pyplot as plt\n'), ((28449, 28508), 'pandas.DataFrame', 'pd.DataFrame', (['target.T'], {'columns': 'target_names', 'index': 'feature'}), '(target.T, columns=target_names, index=feature)\n', (28461, 28508), True, 'import pandas as pd\n'), ((28628, 28664), 'pandas.DataFrame', 'pd.DataFrame', (['nn_preds'], {'columns': 'cols'}), '(nn_preds, columns=cols)\n', (28640, 28664), True, 'import pandas as pd\n'), ((29043, 29050), 'IPython.embed', 'embed', ([], {}), '()\n', (29048, 29050), False, 'from IPython import embed\n'), ((29063, 29077), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (29072, 29077), True, 'import matplotlib.pyplot as plt\n'), ((34180, 34250), 'functools.partial', 'partial', (['process_chunk', 'target_names'], {'settings': 'settings', 'unsafe': 'unsafe'}), '(process_chunk, target_names, settings=settings, unsafe=unsafe)\n', (34187, 34250), False, 'from functools import partial\n'), ((34345, 34356), 'time.time', 'time.time', ([], {}), '()\n', (34354, 34356), False, 'import time\n'), ((35083, 35121), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['qlk_columns'], {}), '(qlk_columns)\n', (35108, 35121), True, 'import pandas as pd\n'), ((5227, 5265), 're.compile', 're.compile', (['"""^(.f)(.)(ITG|ETG|TEM)_GB"""'], {}), "('^(.f)(.)(ITG|ETG|TEM)_GB')\n", (5237, 5265), False, 'import re\n'), ((5317, 5355), 're.compile', 're.compile', (['"""^(.f)(.)(ITG|ETG|TEM)_GB"""'], {}), "('^(.f)(.)(ITG|ETG|TEM)_GB')\n", (5327, 5355), False, 'import re\n'), ((7934, 8022), 'collections.OrderedDict', 'OrderedDict', (["[(37, '$max(\\\\chi_{ETG,e}) = 60$'), (60, '$max(\\\\chi_{ETG,e}) = 100$')]"], {}), "([(37, '$max(\\\\chi_{ETG,e}) = 60$'), (60,\n '$max(\\\\chi_{ETG,e}) = 100$')])\n", (7945, 8022), False, 'from collections import OrderedDict\n'), ((10650, 10674), 'model.ComboNetwork.by_id', 'ComboNetwork.by_id', (['(3333)'], {}), '(3333)\n', (10668, 10674), False, 'from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db\n'), ((12450, 12502), 'numpy.isclose', 'np.isclose', (["input['x']", '(0.45)'], {'atol': '(1e-05)', 'rtol': '(0.001)'}), "(input['x'], 0.45, atol=1e-05, rtol=0.001)\n", (12460, 12502), True, 'import numpy as np\n'), ((14772, 14808), 'numpy.all', 'np.all', (['(varlist == nn._feature_names)'], {}), '(varlist == nn._feature_names)\n', (14778, 14808), True, 'import numpy as np\n'), ((15711, 15732), 'numpy.where', 'np.where', (['(target == 0)'], {}), '(target == 0)\n', (15719, 15732), True, 'import numpy as np\n'), ((17644, 17765), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(2)'], {'height_ratios': '[10, 1]', 'width_ratios': '[5, 1]', 'left': '(0.05)', 'right': '(0.95)', 'wspace': '(0.05)', 'hspace': '(0.05)'}), '(2, 2, height_ratios=[10, 1], width_ratios=[5, 1], left=\n 0.05, right=0.95, wspace=0.05, hspace=0.05)\n', (17661, 17765), False, 'from matplotlib import gridspec, cycler\n'), ((17818, 17839), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (17829, 17839), True, 'import matplotlib.pyplot as plt\n'), ((17861, 17882), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (17872, 17882), True, 'import matplotlib.pyplot as plt\n'), ((17971, 18088), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[10, 2]', 'width_ratios': '[1]', 'left': '(0.05)', 'right': '(0.95)', 'wspace': '(0.05)', 'hspace': '(0.05)'}), '(2, 1, height_ratios=[10, 2], width_ratios=[1], left=0.05,\n right=0.95, wspace=0.05, hspace=0.05)\n', (17988, 18088), False, 'from matplotlib import gridspec, cycler\n'), ((18143, 18164), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (18154, 18164), True, 'import matplotlib.pyplot as plt\n'), ((18257, 18370), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {'height_ratios': '[1]', 'width_ratios': '[1]', 'left': '(0.05)', 'right': '(0.95)', 'wspace': '(0.05)', 'hspace': '(0.05)'}), '(1, 1, height_ratios=[1], width_ratios=[1], left=0.05,\n right=0.95, wspace=0.05, hspace=0.05)\n', (18274, 18370), False, 'from matplotlib import gridspec, cycler\n'), ((18765, 18780), 'numpy.array', 'np.array', (['[0.7]'], {}), '([0.7])\n', (18773, 18780), True, 'import numpy as np\n'), ((19701, 19708), 'IPython.embed', 'embed', ([], {}), '()\n', (19706, 19708), False, 'from IPython import embed\n'), ((19948, 19965), 'numpy.isnan', 'np.isnan', (['thresh2'], {}), '(thresh2)\n', (19956, 19965), True, 'import numpy as np\n'), ((20626, 20646), 'numpy.full_like', 'np.full_like', (['x', 'val'], {}), '(x, val)\n', (20638, 20646), True, 'import numpy as np\n'), ((20859, 20879), 'numpy.full_like', 'np.full_like', (['x', 'val'], {}), '(x, val)\n', (20871, 20879), True, 'import numpy as np\n'), ((25540, 25558), 'numpy.mean', 'np.mean', (['col[ind:]'], {}), '(col[ind:])\n', (25547, 25558), True, 'import numpy as np\n'), ((27639, 27656), 'numpy.isnan', 'np.isnan', (['thresh1'], {}), '(thresh1)\n', (27647, 27656), True, 'import numpy as np\n'), ((28011, 28040), 'numpy.zeros_like', 'np.zeros_like', (['x[x < thresh1]'], {}), '(x[x < thresh1])\n', (28024, 28040), True, 'import numpy as np\n'), ((4738, 4764), 'peewee.AsIs', 'AsIs', (['network.target_names'], {}), '(network.target_names)\n', (4742, 4764), False, 'from peewee import AsIs, fn, SQL\n'), ((5909, 5943), 're.compile', 're.compile', (['"""^.f.(ITG|ETG|TEM)_GB"""'], {}), "('^.f.(ITG|ETG|TEM)_GB')\n", (5919, 5943), False, 'import re\n'), ((8145, 8208), 'collections.OrderedDict', 'OrderedDict', (["[(62, 'goodness = mabse'), (37, 'goodness = mse')]"], {}), "([(62, 'goodness = mabse'), (37, 'goodness = mse')])\n", (8156, 8208), False, 'from collections import OrderedDict\n'), ((12316, 12370), 'numpy.isclose', 'np.isclose', (["input['Ate']", '(5.75)'], {'atol': '(1e-05)', 'rtol': '(0.001)'}), "(input['Ate'], 5.75, atol=1e-05, rtol=0.001)\n", (12326, 12370), True, 'import numpy as np\n'), ((12387, 12437), 'numpy.isclose', 'np.isclose', (["input['An']", '(2)'], {'atol': '(1e-05)', 'rtol': '(0.001)'}), "(input['An'], 2, atol=1e-05, rtol=0.001)\n", (12397, 12437), True, 'import numpy as np\n'), ((18905, 18931), 'matplotlib.pyplot.cm.plasma', 'plt.cm.plasma', (['color_range'], {}), '(color_range)\n', (18918, 18931), True, 'import matplotlib.pyplot as plt\n'), ((19567, 19587), 'numpy.abs', 'np.abs', (['target[0, :]'], {}), '(target[0, :])\n', (19573, 19587), True, 'import numpy as np\n'), ((20050, 20075), 'numpy.zeros_like', 'np.zeros_like', (['pre_thresh'], {}), '(pre_thresh)\n', (20063, 20075), True, 'import numpy as np\n'), ((22448, 22455), 'IPython.embed', 'embed', ([], {}), '()\n', (22453, 22455), False, 'from IPython import embed\n'), ((23709, 23737), 'numpy.arange', 'np.arange', (['nn_preds.shape[1]'], {}), '(nn_preds.shape[1])\n', (23718, 23737), True, 'import numpy as np\n'), ((25034, 25057), 'numpy.isnan', 'np.isnan', (['thresh_nn[ii]'], {}), '(thresh_nn[ii])\n', (25042, 25057), True, 'import numpy as np\n'), ((3976, 4037), 'peewee.SQL', 'SQL', (['"""(array_to_string(target_names, \',\') like %s)"""', "['%pf%']"], {}), '("(array_to_string(target_names, \',\') like %s)", [\'%pf%\'])\n', (3979, 4037), False, 'from peewee import AsIs, fn, SQL\n'), ((4087, 4148), 'peewee.SQL', 'SQL', (['"""(array_to_string(target_names, \',\') like %s)"""', "['%ef%']"], {}), '("(array_to_string(target_names, \',\') like %s)", [\'%ef%\'])\n', (4090, 4148), False, 'from peewee import AsIs, fn, SQL\n'), ((8339, 8409), 'collections.OrderedDict', 'OrderedDict', (["[(37, 'stop measure = loss'), (18, 'stop measure = MSE')]"], {}), "([(37, 'stop measure = loss'), (18, 'stop measure = MSE')])\n", (8350, 8409), False, 'from collections import OrderedDict\n'), ((20324, 20396), 'scipy.optimize.curve_fit', 'sc.optimize.curve_fit', (['(lambda x, a: a * x)', '(se.index - thresh2)', 'se.values'], {}), '(lambda x, a: a * x, se.index - thresh2, se.values)\n', (20345, 20396), True, 'import scipy as sc\n'), ((22556, 22576), 'numpy.array', 'np.array', (['slice_list'], {}), '(slice_list)\n', (22564, 22576), True, 'import numpy as np\n'), ((22760, 22784), 'pandas.DataFrame', 'pd.DataFrame', (['slice_dict'], {}), '(slice_dict)\n', (22772, 22784), True, 'import pandas as pd\n'), ((25120, 25157), 'numpy.flatnonzero', 'np.flatnonzero', (['row[:thresh_nn_i[ii]]'], {}), '(row[:thresh_nn_i[ii]])\n', (25134, 25157), True, 'import numpy as np\n'), ((25874, 25894), 'numpy.log10', 'np.log10', (['wobble_tot'], {}), '(wobble_tot)\n', (25882, 25894), True, 'import numpy as np\n'), ((25896, 25919), 'numpy.log10', 'np.log10', (['wobble_unstab'], {}), '(wobble_unstab)\n', (25904, 25919), True, 'import numpy as np\n'), ((8596, 8722), 'collections.OrderedDict', 'OrderedDict', (["[(37, '37'), (67, '67'), (68, '68'), (69, '69'), (70, '70'), (71, '71'), (\n 72, '72'), (73, '73'), (74, '74')]"], {}), "([(37, '37'), (67, '67'), (68, '68'), (69, '69'), (70, '70'), (\n 71, '71'), (72, '72'), (73, '73'), (74, '74')])\n", (8607, 8722), False, 'from collections import OrderedDict\n'), ((9124, 9147), 'collections.OrderedDict', 'OrderedDict', (["[(46, '')]"], {}), "([(46, '')])\n", (9135, 9147), False, 'from collections import OrderedDict\n'), ((9174, 9197), 'collections.OrderedDict', 'OrderedDict', (["[(88, '')]"], {}), "([(88, '')])\n", (9185, 9197), False, 'from collections import OrderedDict\n'), ((15784, 15810), 'numpy.isnan', 'np.isnan', (['target[idx + 1:]'], {}), '(target[idx + 1:])\n', (15792, 15810), True, 'import numpy as np\n'), ((9299, 9362), 'collections.OrderedDict', 'OrderedDict', (["[(205, 'es_20'), (204, 'es_5'), (203, 'es_wrong')]"], {}), "([(205, 'es_20'), (204, 'es_5'), (203, 'es_wrong')])\n", (9310, 9362), False, 'from collections import OrderedDict\n'), ((24141, 24153), 'numpy.sign', 'np.sign', (['row'], {}), '(row)\n', (24148, 24153), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import random, math, os, time
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# set the random seeds for reproducability
SEED = 1234
random.seed(SEED)
torch.manual_seed(SEED)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
########## Support
def init_weights(m):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def numpy_to_tvar(x):
return Variable(torch.from_numpy(x).type(torch.FloatTensor).to(device))
def plot_result(pred, true):
pred_array = pred.data.numpy()
true_array = true.data.numpy()
plt.figure()
plt.plot(pred_array, label='Predicted')
plt.plot(true_array, label="True")
plt.legend(loc='upper left')
plt.pause(0.0001)
# def show_attention(input_sentence, output_words, attentions):
# input_sentence = input_sentence.data.numpy()
# output_words = output_words.data.numpy()
# # Set up figure with colorbar
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # print('here')
# # print(attentions.data.numpy())
# cax = ax.matshow(attentions.numpy(), cmap='bone')
# fig.colorbar(cax)
# # Set up axes
# ax.set_xticklabels(input_sentence, rotation=90)
# ax.set_yticklabels(output_words)
# # Show label at every tick
# ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# # show_plot_visdom()
def show_attention(input_left, input_right, output_words, attentions):
input_left = input_left.squeeze().data.numpy()
input_right = input_right.squeeze().data.numpy()
input_sentence = np.concatenate((input_left, input_right), axis=0)
input_sentence = input_sentence[:, 3]
output_words = output_words.data.numpy()
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.squeeze().numpy(), cmap='jet')
fig.colorbar(cax)
# set label with number
x_tick = np.concatenate((np.arange(
0, input_left.shape[0] + 1), np.arange(1, input_left.shape[0] + 1)),
axis=0)
y_tick = np.arange(0, output_words.shape[0] + 1)
ax.set_xticklabels(x_tick, rotation=90)
ax.set_yticklabels(y_tick)
# # Set up axes
# ax.set_xticklabels(input_sentence, rotation=90)
# ax.set_yticklabels(output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_aspect('auto')
# show_plot_visdom() | [
"torch.manual_seed",
"torch.nn.init.constant_",
"numpy.arange",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.plot",
"random.seed",
"torch.from_numpy",
"torch.nn.init.normal_",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"numpy.concatenate",
"matplotlib.pyplot.pause",
"m... | [((371, 388), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (382, 388), False, 'import random, math, os, time\n'), ((389, 412), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (406, 412), False, 'import torch\n'), ((1248, 1260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1258, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1304), 'matplotlib.pyplot.plot', 'plt.plot', (['pred_array'], {'label': '"""Predicted"""'}), "(pred_array, label='Predicted')\n", (1273, 1304), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1343), 'matplotlib.pyplot.plot', 'plt.plot', (['true_array'], {'label': '"""True"""'}), "(true_array, label='True')\n", (1317, 1343), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1358, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1381, 1398), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (1390, 1398), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2345), 'numpy.concatenate', 'np.concatenate', (['(input_left, input_right)'], {'axis': '(0)'}), '((input_left, input_right), axis=0)\n', (2310, 2345), True, 'import numpy as np\n'), ((2479, 2491), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2489, 2491), True, 'import matplotlib.pyplot as plt\n'), ((2803, 2842), 'numpy.arange', 'np.arange', (['(0)', '(output_words.shape[0] + 1)'], {}), '(0, output_words.shape[0] + 1)\n', (2812, 2842), True, 'import numpy as np\n'), ((446, 471), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (469, 471), False, 'import torch\n'), ((3096, 3121), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (3118, 3121), True, 'import matplotlib.ticker as ticker\n'), ((3154, 3179), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (3176, 3179), True, 'import matplotlib.ticker as ticker\n'), ((613, 658), 'torch.nn.init.normal_', 'nn.init.normal_', (['param.data'], {'mean': '(0)', 'std': '(0.01)'}), '(param.data, mean=0, std=0.01)\n', (628, 658), True, 'import torch.nn as nn\n'), ((685, 717), 'torch.nn.init.constant_', 'nn.init.constant_', (['param.data', '(0)'], {}), '(param.data, 0)\n', (702, 717), True, 'import torch.nn as nn\n'), ((2666, 2703), 'numpy.arange', 'np.arange', (['(0)', '(input_left.shape[0] + 1)'], {}), '(0, input_left.shape[0] + 1)\n', (2675, 2703), True, 'import numpy as np\n'), ((2714, 2751), 'numpy.arange', 'np.arange', (['(1)', '(input_left.shape[0] + 1)'], {}), '(1, input_left.shape[0] + 1)\n', (2723, 2751), True, 'import numpy as np\n'), ((1086, 1105), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1102, 1105), False, 'import torch\n')] |
def plot():
import numpy as np
from matplotlib import pyplot as plt
fig = plt.figure()
x = np.ma.arange(0, 2 * np.pi, 0.4)
y = np.ma.sin(x)
y1 = np.sin(2 * x)
y2 = np.sin(3 * x)
ym1 = np.ma.masked_where(y1 > 0.5, y1)
ym2 = np.ma.masked_where(y2 < -0.5, y2)
lines = plt.plot(x, y, "r", x, ym1, "g", x, ym2, "bo")
plt.setp(lines[0], linewidth=4)
plt.setp(lines[1], linewidth=2)
plt.setp(lines[2], markersize=10)
plt.legend(("No mask", "Masked if > 0.5", "Masked if < -0.5"), loc="upper right")
plt.title("Masked line demo")
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
| [
"matplotlib.pyplot.setp",
"matplotlib.pyplot.plot",
"numpy.ma.masked_where",
"matplotlib.pyplot.figure",
"numpy.ma.arange",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.ma.sin"
] | [((87, 99), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (97, 99), True, 'from matplotlib import pyplot as plt\n'), ((109, 140), 'numpy.ma.arange', 'np.ma.arange', (['(0)', '(2 * np.pi)', '(0.4)'], {}), '(0, 2 * np.pi, 0.4)\n', (121, 140), True, 'import numpy as np\n'), ((149, 161), 'numpy.ma.sin', 'np.ma.sin', (['x'], {}), '(x)\n', (158, 161), True, 'import numpy as np\n'), ((171, 184), 'numpy.sin', 'np.sin', (['(2 * x)'], {}), '(2 * x)\n', (177, 184), True, 'import numpy as np\n'), ((194, 207), 'numpy.sin', 'np.sin', (['(3 * x)'], {}), '(3 * x)\n', (200, 207), True, 'import numpy as np\n'), ((218, 250), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(y1 > 0.5)', 'y1'], {}), '(y1 > 0.5, y1)\n', (236, 250), True, 'import numpy as np\n'), ((261, 294), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(y2 < -0.5)', 'y2'], {}), '(y2 < -0.5, y2)\n', (279, 294), True, 'import numpy as np\n'), ((308, 354), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""', 'x', 'ym1', '"""g"""', 'x', 'ym2', '"""bo"""'], {}), "(x, y, 'r', x, ym1, 'g', x, ym2, 'bo')\n", (316, 354), True, 'from matplotlib import pyplot as plt\n'), ((359, 390), 'matplotlib.pyplot.setp', 'plt.setp', (['lines[0]'], {'linewidth': '(4)'}), '(lines[0], linewidth=4)\n', (367, 390), True, 'from matplotlib import pyplot as plt\n'), ((395, 426), 'matplotlib.pyplot.setp', 'plt.setp', (['lines[1]'], {'linewidth': '(2)'}), '(lines[1], linewidth=2)\n', (403, 426), True, 'from matplotlib import pyplot as plt\n'), ((431, 464), 'matplotlib.pyplot.setp', 'plt.setp', (['lines[2]'], {'markersize': '(10)'}), '(lines[2], markersize=10)\n', (439, 464), True, 'from matplotlib import pyplot as plt\n'), ((470, 556), 'matplotlib.pyplot.legend', 'plt.legend', (["('No mask', 'Masked if > 0.5', 'Masked if < -0.5')"], {'loc': '"""upper right"""'}), "(('No mask', 'Masked if > 0.5', 'Masked if < -0.5'), loc=\n 'upper right')\n", (480, 556), True, 'from matplotlib import pyplot as plt\n'), ((556, 585), 'matplotlib.pyplot.title', 'plt.title', (['"""Masked line demo"""'], {}), "('Masked line demo')\n", (565, 585), True, 'from matplotlib import pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 16:31:49 2020
@author: enzo
"""
import cv2
import matplotlib.pyplot as plt
def single_channel_gray(BRG_input_image):
gray_image = cv2.cvtColor(BRG_input_image, cv2.COLOR_BGR2GRAY)
#gray_equlized = cv2.equalizeHist(gray_image)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(4,3))
gray_equlized = clahe.apply(gray_image)
return gray_equlized
def compute_laplac(input_image):
input_image = single_channel_gray(input_image)
#Gaussian Filter
#denoised = cv2.GaussianBlur(input_image, (3,3), 3);
laplacian = cv2.Laplacian(input_image,cv2.CV_64F).astype('uint8')
#denoised_laplacian = cv2.GaussianBlur(laplacian, (7,7), 5);
sobelx = cv2.Sobel(input_image,cv2.CV_64F,1,0,ksize=3) # x
sobely = cv2.Sobel(input_image,cv2.CV_64F,0,1,ksize=3) # y
laplacian = sobelx+ sobely
ret,thresh1 = cv2.threshold(laplacian,200,255,cv2.THRESH_BINARY)
return laplacian
from well_plate_project.config import data_dir
path_query = data_dir / 'raw' / 'Match'
image_file = path_query / 'aswana_cropped.jpg'
assert image_file.is_file()
queryImage = cv2.imread(str(image_file)) #aswana_cropped_2 aswana_cropped
#plt.imshow(queryImage); plt.show()
path_train = data_dir / 'raw' / 'EXPERIMENTS_Crp' #foto_tel1 EXPERIMENTS foto_tel1
jpg = path_train / 'a2_c_cropped.jpg' #20201118_090416 IMG_20201118_090440
jpg_bad = path_train / 'b1_a_cropped.jpg' #20201118_090416 IMG_20201118_090440
good = cv2.imread(str(jpg)) #aswana_cropped_2 aswana_cropped
bad = cv2.imread(str(jpg_bad)) #aswana_cropped_2 aswana_cropped
lap_orig = compute_laplac(queryImage)
plt.figure(figsize=(10,10))
plt.imshow(lap_orig);plt.show()
lap_good = compute_laplac(good)
plt.figure(figsize=(10,10))
plt.imshow(lap_good);plt.show()
lap_bad = compute_laplac(bad)
plt.figure(figsize=(10,10))
plt.imshow(lap_bad);plt.show()
import numpy as np
diff_lap_good = np.linalg.norm(lap_orig -lap_good, ord = np.inf) #np.inf 'fro'
diff_lap_bad =np.linalg.norm(lap_orig - lap_bad, ord = np.inf)
jpg_good_2 = path_train / 'd2_a_cropped.jpg'
good_2 = cv2.imread(str(jpg_good_2))
lap_good_2 = compute_laplac(good_2)
diff_lap_good_2 = np.linalg.norm(lap_orig -lap_good_2, ord = np.inf) #np.inf 'fro'
jpg_good_3 = path_train / 'e2_b_cropped.jpg'
good_3 = cv2.imread(str(jpg_good_3))
lap_good_3 = compute_laplac(good_3)
diff_lap_good_3 = np.linalg.norm(lap_orig -lap_good_3, ord = np.inf) #np.inf 'fro'
jpg_bad_2 = path_train / 'd1_a_cropped.jpg'
bad_2 = cv2.imread(str(jpg_bad_2))
lap_bad_2 = compute_laplac(bad_2)
diff_lap_bad_2 = np.linalg.norm(lap_orig -lap_bad_2, ord = np.inf) #np.inf 'fro'
| [
"matplotlib.pyplot.imshow",
"cv2.Laplacian",
"cv2.threshold",
"cv2.createCLAHE",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"numpy.linalg.norm",
"cv2.Sobel",
"matplotlib.pyplot.show"
] | [((1683, 1711), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1693, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1731), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lap_orig'], {}), '(lap_orig)\n', (1721, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1740, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1776, 1804), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1786, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1824), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lap_good'], {}), '(lap_good)\n', (1814, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1835), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1833, 1835), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1895), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1877, 1895), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1914), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lap_bad'], {}), '(lap_bad)\n', (1905, 1914), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1925), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1923, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1964, 2011), 'numpy.linalg.norm', 'np.linalg.norm', (['(lap_orig - lap_good)'], {'ord': 'np.inf'}), '(lap_orig - lap_good, ord=np.inf)\n', (1978, 2011), True, 'import numpy as np\n'), ((2042, 2088), 'numpy.linalg.norm', 'np.linalg.norm', (['(lap_orig - lap_bad)'], {'ord': 'np.inf'}), '(lap_orig - lap_bad, ord=np.inf)\n', (2056, 2088), True, 'import numpy as np\n'), ((2231, 2280), 'numpy.linalg.norm', 'np.linalg.norm', (['(lap_orig - lap_good_2)'], {'ord': 'np.inf'}), '(lap_orig - lap_good_2, ord=np.inf)\n', (2245, 2280), True, 'import numpy as np\n'), ((2438, 2487), 'numpy.linalg.norm', 'np.linalg.norm', (['(lap_orig - lap_good_3)'], {'ord': 'np.inf'}), '(lap_orig - lap_good_3, ord=np.inf)\n', (2452, 2487), True, 'import numpy as np\n'), ((2638, 2686), 'numpy.linalg.norm', 'np.linalg.norm', (['(lap_orig - lap_bad_2)'], {'ord': 'np.inf'}), '(lap_orig - lap_bad_2, ord=np.inf)\n', (2652, 2686), True, 'import numpy as np\n'), ((209, 258), 'cv2.cvtColor', 'cv2.cvtColor', (['BRG_input_image', 'cv2.COLOR_BGR2GRAY'], {}), '(BRG_input_image, cv2.COLOR_BGR2GRAY)\n', (221, 258), False, 'import cv2\n'), ((322, 373), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(4, 3)'}), '(clipLimit=1.0, tileGridSize=(4, 3))\n', (337, 373), False, 'import cv2\n'), ((762, 811), 'cv2.Sobel', 'cv2.Sobel', (['input_image', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(3)'}), '(input_image, cv2.CV_64F, 1, 0, ksize=3)\n', (771, 811), False, 'import cv2\n'), ((826, 875), 'cv2.Sobel', 'cv2.Sobel', (['input_image', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(3)'}), '(input_image, cv2.CV_64F, 0, 1, ksize=3)\n', (835, 875), False, 'import cv2\n'), ((926, 979), 'cv2.threshold', 'cv2.threshold', (['laplacian', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(laplacian, 200, 255, cv2.THRESH_BINARY)\n', (939, 979), False, 'import cv2\n'), ((627, 665), 'cv2.Laplacian', 'cv2.Laplacian', (['input_image', 'cv2.CV_64F'], {}), '(input_image, cv2.CV_64F)\n', (640, 665), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# wITHOUT wARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test operator sparsing."""
import numpy as np
from openfermion import get_sparse_operator
from openfermion.chem import MolecularData
from mindquantum.algorithm.nisq.chem.transform import Transform
from mindquantum.core.operators.utils import get_fermion_operator
from mindquantum.third_party.interaction_operator import InteractionOperator
def test_sparsing_operator():
"""
Description: Test sparsing operator
Expectation: success
"""
molecular = "./tests/st/H4.hdf5"
mol = MolecularData(filename=molecular)
mol.load()
ham_of = mol.get_molecular_hamiltonian()
inter_ops = InteractionOperator(*ham_of.n_body_tensors.values())
ham_hiq = get_fermion_operator(inter_ops)
ham = Transform(ham_hiq).jordan_wigner()
h = ham.to_openfermion()
m1 = get_sparse_operator(h).toarray()
m2 = ham.matrix().toarray()
m3 = ham_hiq.matrix().toarray()
v1 = np.real(np.linalg.eigvals(m1))
v2 = np.real(np.linalg.eigvals(m2))
v3 = np.real(np.linalg.eigvals(m3))
v1.sort()
v2.sort()
v3.sort()
assert np.allclose(v1, v2)
assert np.allclose(v1, v3)
| [
"mindquantum.core.operators.utils.get_fermion_operator",
"numpy.allclose",
"numpy.linalg.eigvals",
"openfermion.chem.MolecularData",
"openfermion.get_sparse_operator",
"mindquantum.algorithm.nisq.chem.transform.Transform"
] | [((1197, 1230), 'openfermion.chem.MolecularData', 'MolecularData', ([], {'filename': 'molecular'}), '(filename=molecular)\n', (1210, 1230), False, 'from openfermion.chem import MolecularData\n'), ((1375, 1406), 'mindquantum.core.operators.utils.get_fermion_operator', 'get_fermion_operator', (['inter_ops'], {}), '(inter_ops)\n', (1395, 1406), False, 'from mindquantum.core.operators.utils import get_fermion_operator\n'), ((1766, 1785), 'numpy.allclose', 'np.allclose', (['v1', 'v2'], {}), '(v1, v2)\n', (1777, 1785), True, 'import numpy as np\n'), ((1797, 1816), 'numpy.allclose', 'np.allclose', (['v1', 'v3'], {}), '(v1, v3)\n', (1808, 1816), True, 'import numpy as np\n'), ((1610, 1631), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['m1'], {}), '(m1)\n', (1627, 1631), True, 'import numpy as np\n'), ((1650, 1671), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['m2'], {}), '(m2)\n', (1667, 1671), True, 'import numpy as np\n'), ((1690, 1711), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['m3'], {}), '(m3)\n', (1707, 1711), True, 'import numpy as np\n'), ((1418, 1436), 'mindquantum.algorithm.nisq.chem.transform.Transform', 'Transform', (['ham_hiq'], {}), '(ham_hiq)\n', (1427, 1436), False, 'from mindquantum.algorithm.nisq.chem.transform import Transform\n'), ((1492, 1514), 'openfermion.get_sparse_operator', 'get_sparse_operator', (['h'], {}), '(h)\n', (1511, 1514), False, 'from openfermion import get_sparse_operator\n')] |
import numpy as np
import math
import random
from network.convolution.ConvolutionWrapper import ConvolutionWrapper
class LSTMWrapper(ConvolutionWrapper):
def __init__(self, agent, history_size=10):
super(LSTMWrapper, self).__init__(agent)
self.history_size = history_size
def request_action(self):
#get old state
self.state_old = self.get_state()
reward_old = self.get_reward()
self.total_moves += 1
# print(f"state = {state_old}")
#perform random actions based on agent.epsilon, or choose the action
if random.randint(0, 500) > self.player.max_survival_time or random.randint(0, 10) == 0:
self.last_move = random.randint(0, 3)
self.random_moves += 1
# print("random move")
else:
# predict action based on the old state
states = []
states_index = 0
states.append(self.state_old)
while states_index > -len(self.memory) and states_index > -self.history_size - 1 and self.memory[states_index][-1] != True:
states.append(self.memory[states_index][0])
states_index -= 1
prediction = self.model.predict(states)
self.last_move = np.argmax(prediction)
#perform new move and get new state
self.player.do_action(int(self.last_move))
def replay_new(self):
# print(f'random moves : {100 * float(self.random_moves) / self.total_moves}')
self.random_moves = 0
self.total_moves = 0
# minibatch = [a for a in self.memory if a[2] != 0]
minibatch = range(len(self.memory))
if len(minibatch) > 1000:
minibatch = random.sample(range(len(minibatch)), 1000)
for index in minibatch:
state, action, reward, next_state, done = self.memory[index]
states = []
states_index = 0
while states_index + index >= 0 and states_index > -self.history_size - 1 and self.memory[states_index + index][-1] != True:
states.append(self.memory[states_index + index][0])
states_index -= 1
if len(states) != 0:
target = reward
target_f = self.model.predict(states)
target_f[action] = target
self.model.fit(states, target_f)
def train_short_memory(self):
state, action, reward, next_state, done = self.memory[-1]
states = []
states_index = 0
while states_index > -len(self.memory) and states_index > -self.history_size - 1 and self.memory[states_index][-1] != True:
states.append(self.memory[states_index][0])
states_index -= 1
if len(states) != 0:
target = reward
target_f = self.model.predict(states)
target_f[action] = target
self.model.fit(states, target_f)
| [
"numpy.argmax",
"random.randint"
] | [((701, 721), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (715, 721), False, 'import random\n'), ((1264, 1285), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1273, 1285), True, 'import numpy as np\n'), ((586, 608), 'random.randint', 'random.randint', (['(0)', '(500)'], {}), '(0, 500)\n', (600, 608), False, 'import random\n'), ((644, 665), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (658, 665), False, 'import random\n')] |
#!/usr/bin/env python
u"""
wavelets.py
Written by <NAME> (02/2021)
Function to apply a wavelets analysis, code based on (Torrence and Compo, 1998)
"""
import numpy as np
import scipy.special
def wave_bases(mother, k, scale, param=-1):
"""Computes the wavelet function as a function of Fourier frequency
used for the CWT in Fourier space (Torrence and Compo, 1998)
Arguments
---------
mother: str equal to 'MORLET' or 'DOG' to choose the wavelet type
k: vector of the Fourier frequencies
scale: wavelet scales
param: nondimensional parameter for the wavelet function
Returns
-------
daughter: the wavelet function
fourier_factor: the ratio of Fourier period to scale
coi: cone-of-influence size at the scale
dofmin: degrees of freedom for each point in the wavelet power (Morlet = 2)
"""
mother = mother.upper()
n = len(k) # length of Fourier frequencies
k = np.array(k) # turn k to array
if mother == 'MORLET': # choose the wavelet function, in this case Morlet
if param == -1:
param = 6 # For Morlet this is k0 (wavenumber), default is 6
expnt = -(scale*k - param)**2/2*(k > 0) # table 1 Torrence and Compo (1998)
norm = np.sqrt(scale*k[1])*(np.pi** -0.25)*np.sqrt(len(k))
daughter = [] # define daughter as a list
for ex in expnt: # for each value scale (equal to next pow of 2)
daughter.append(norm*np.exp(ex))
daughter = np.array(daughter) # transform in array
daughter = daughter*(k > 0) # Heaviside step function
fourier_factor = (4*np.pi)/(param + np.sqrt(2 + param * param)) # scale --> Fourier period
coi = fourier_factor/np.sqrt(2) # cone-of-influence
dofmin = 2 # degrees of freedom
elif mother == 'DOG': # DOG Wavelet
if param == -1:
param = 2 # For DOG this is m (wavenumber), default is 2
m = param
expnt = -(scale*k)**2/2
pws = np.array((scale*k)**m)
# gamma(m+0.5) = 1.3293
norm = np.sqrt(scale*k[1]/1.3293*np.sqrt(n))
daughter = []
for ex in expnt:
daughter.append(-norm* 1j**m * np.exp(ex))
daughter = np.array(daughter)
daughter = daughter[:]*pws
fourier_factor = 2*np.pi/np.sqrt(m + .5)
coi = fourier_factor/np.sqrt(2)
dofmin = 1
elif mother == 'PAUL': # Paul Wavelet
if param == -1:
param = 4
m = param
expnt = -(scale*k)*(k > 0)
norm = np.sqrt(scale*k[1]) *(2**m /np.sqrt(m*(np.math.factorial(2*m - 1))))*np.sqrt(n)
pws = np.array((scale*k)**m)
daughter = []
for ex in expnt:
daughter.append(norm*np.exp(ex))
daughter = np.array(daughter)
daughter = daughter[:]*pws
daughter = daughter*(k > 0) # Heaviside step function
fourier_factor = 4*np.pi/(2*m + 1)
coi = fourier_factor*np.sqrt(2)
dofmin = 2
return daughter, fourier_factor, coi, dofmin
def wavelet(Y, dt, pad=1, dj=.25, s0=-1, J1=-1, mother='MORLET', param=-1):
"""Computes the wavelet continuous transform of the vector Y,
by definition:
W(a,b) = sum(f(t)*psi[a,b](t) dt) a dilate/contract
psi[a,b](t) = 1/sqrt(a) psi(t-b/a) b displace
The wavelet basis is normalized to have total energy = 1 at all scales
Arguments
---------
Y: time series
dt: sampling rate
pad: bool for zero padding or not
dj: spacing between discrete scales
s0: smallest scale of the wavelet
J1: total number of scales
mother: the mother wavelet function
param: the mother wavelet parameter
Returns
-------
wave: wavelet transform of Y
period: the vector of "Fourier" periods (in time units) that correspond to the scales
scale: vector of scale indices, given by S0*2(j*DJ), j =0 ...J1
coi: cone of influence
"""
n1 = len(Y) # time series length
if s0 == -1: # define s0 as 2 times dt (Shannon criteria) if s0 is not given
s0 = 2 * dt
if J1 == -1: # define J1 if not provide
J1 = int((np.log(n1*dt/s0) / np.log(2))/dj)
x = Y - np.mean(Y) # remove mean of the time serie
if pad: # if zero padding, add zeros to x
base2 = int(np.log(n1)/np.log(2) + 0.4999)
x = np.concatenate((x, np.zeros(2**(base2 + 1) - n1)))
n = len(x) #update length of x
k = np.arange(0, int(n/2))
k = k*(2*np.pi) / (n*dt)
k = np.concatenate((k, -k[int((n - 1)/2)::-1])) # be careful for parity
f = np.fft.fft(x) # fft on the padded time series
scale = s0 * 2**(np.arange(0, J1 + 1, 1)*dj)
# define wavelet array
wave = np.zeros((int(J1 + 1), n))
wave = wave + 1j * wave # make it complex
for a1 in range(0, int(J1 + 1)):
daughter, fourier_factor, coi, dofmin = wave_bases(mother, k, scale[a1], param)
wave[a1, :] = np.fft.ifft(f * daughter)
period = fourier_factor * scale
# cone-of-influence, differ for uneven len of timeseries:
if n1%2: # uneven
coi = coi * dt * np.concatenate((np.arange(0, n1/2 - 1), np.arange(0, n1/2)[::-1]))
else: # even
coi = coi * dt * np.concatenate((np.arange(0, n1/2), np.arange(0, n1/2)[::-1]))
# cut zero padding
wave = wave[:, :n1]
return wave, period, scale, coi
def wave_signif(Y, dt, scale, dof=-1, lag1=0, siglvl=0.95, mother='MORLET', param=-1):
"""Computes the wavelet significance test at a level of confidence siglvl%
Arguments
---------
Y: time series
dt: sampling rate
scale: scales of the wavelet decomposition
dof: degrees of freedom
lag1: assuming lag-1 autocorrelation of the serie (0 for white noise RECOMMENDED, 0.72 for red noise)
siglvl: percentage of the confidence level
mother: the mother wavelet function
param: the mother wavelet parameter
Returns
-------
wave: wavelet transform of Y
period: the vector of "Fourier" periods (in time units) that correspond to the scales
scale: vector of scale indices, given by S0*2(j*DJ), j =0 ...J1
coi: cone of influence
"""
mother = mother.upper()
variance = np.var(Y)
# define default param and fourier factor for the wavelet
if mother == 'MORLET':
if param == -1:
param = 6 # For Morlet this is k0 (wavenumber), default is 6
if dof == -1:
dof = 2
fourier_factor = float(4 * np.pi) / (param + np.sqrt(2 + param**2))
if mother == 'DOG':
if param == -1:
param = 2 # For DOG, default param is 2
if dof == -1:
dof = 1
fourier_factor = float(2 * np.pi / (np.sqrt(param + 0.5)))
if mother == 'PAUL':
if param == -1:
param = 4 # For PAUL, default param is 4
if dof == -1:
dof = 2
fourier_factor = float(4 * np.pi / (2 * param + 1))
# compute period from scale
period = [e * fourier_factor for e in scale]
# compute theoretical fft associated to the theoretical noise of the data given by lag1
freq = [dt / p for p in period]
fft_theor = [variance*((1 - lag1**2) / (1 - 2*lag1*np.cos(f * 2 * np.pi) + lag1**2)) for f in freq]
chisquare = scipy.special.gammaincinv(dof/2.0, siglvl)*2.0/dof
signif = [ft * chisquare for ft in fft_theor]
return signif | [
"numpy.mean",
"numpy.sqrt",
"numpy.arange",
"numpy.fft.fft",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.math.factorial",
"numpy.fft.ifft",
"numpy.var"
] | [((972, 983), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (980, 983), True, 'import numpy as np\n'), ((4685, 4698), 'numpy.fft.fft', 'np.fft.fft', (['x'], {}), '(x)\n', (4695, 4698), True, 'import numpy as np\n'), ((6421, 6430), 'numpy.var', 'np.var', (['Y'], {}), '(Y)\n', (6427, 6430), True, 'import numpy as np\n'), ((1520, 1538), 'numpy.array', 'np.array', (['daughter'], {}), '(daughter)\n', (1528, 1538), True, 'import numpy as np\n'), ((4298, 4308), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (4305, 4308), True, 'import numpy as np\n'), ((5040, 5065), 'numpy.fft.ifft', 'np.fft.ifft', (['(f * daughter)'], {}), '(f * daughter)\n', (5051, 5065), True, 'import numpy as np\n'), ((1751, 1761), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1758, 1761), True, 'import numpy as np\n'), ((2022, 2048), 'numpy.array', 'np.array', (['((scale * k) ** m)'], {}), '((scale * k) ** m)\n', (2030, 2048), True, 'import numpy as np\n'), ((2252, 2270), 'numpy.array', 'np.array', (['daughter'], {}), '(daughter)\n', (2260, 2270), True, 'import numpy as np\n'), ((1279, 1300), 'numpy.sqrt', 'np.sqrt', (['(scale * k[1])'], {}), '(scale * k[1])\n', (1286, 1300), True, 'import numpy as np\n'), ((1667, 1693), 'numpy.sqrt', 'np.sqrt', (['(2 + param * param)'], {}), '(2 + param * param)\n', (1674, 1693), True, 'import numpy as np\n'), ((2340, 2356), 'numpy.sqrt', 'np.sqrt', (['(m + 0.5)'], {}), '(m + 0.5)\n', (2347, 2356), True, 'import numpy as np\n'), ((2385, 2395), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2392, 2395), True, 'import numpy as np\n'), ((2668, 2694), 'numpy.array', 'np.array', (['((scale * k) ** m)'], {}), '((scale * k) ** m)\n', (2676, 2694), True, 'import numpy as np\n'), ((2803, 2821), 'numpy.array', 'np.array', (['daughter'], {}), '(daughter)\n', (2811, 2821), True, 'import numpy as np\n'), ((4470, 4501), 'numpy.zeros', 'np.zeros', (['(2 ** (base2 + 1) - n1)'], {}), '(2 ** (base2 + 1) - n1)\n', (4478, 4501), True, 'import numpy as np\n'), ((4753, 4776), 'numpy.arange', 'np.arange', (['(0)', '(J1 + 1)', '(1)'], {}), '(0, J1 + 1, 1)\n', (4762, 4776), True, 'import numpy as np\n'), ((6714, 6737), 'numpy.sqrt', 'np.sqrt', (['(2 + param ** 2)'], {}), '(2 + param ** 2)\n', (6721, 6737), True, 'import numpy as np\n'), ((6926, 6946), 'numpy.sqrt', 'np.sqrt', (['(param + 0.5)'], {}), '(param + 0.5)\n', (6933, 6946), True, 'import numpy as np\n'), ((1489, 1499), 'numpy.exp', 'np.exp', (['ex'], {}), '(ex)\n', (1495, 1499), True, 'import numpy as np\n'), ((2118, 2128), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2125, 2128), True, 'import numpy as np\n'), ((2643, 2653), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2650, 2653), True, 'import numpy as np\n'), ((2993, 3003), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3000, 3003), True, 'import numpy as np\n'), ((4251, 4271), 'numpy.log', 'np.log', (['(n1 * dt / s0)'], {}), '(n1 * dt / s0)\n', (4257, 4271), True, 'import numpy as np\n'), ((4270, 4279), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4276, 4279), True, 'import numpy as np\n'), ((4408, 4418), 'numpy.log', 'np.log', (['n1'], {}), '(n1)\n', (4414, 4418), True, 'import numpy as np\n'), ((4419, 4428), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4425, 4428), True, 'import numpy as np\n'), ((5229, 5253), 'numpy.arange', 'np.arange', (['(0)', '(n1 / 2 - 1)'], {}), '(0, n1 / 2 - 1)\n', (5238, 5253), True, 'import numpy as np\n'), ((5338, 5358), 'numpy.arange', 'np.arange', (['(0)', '(n1 / 2)'], {}), '(0, n1 / 2)\n', (5347, 5358), True, 'import numpy as np\n'), ((2221, 2231), 'numpy.exp', 'np.exp', (['ex'], {}), '(ex)\n', (2227, 2231), True, 'import numpy as np\n'), ((2574, 2595), 'numpy.sqrt', 'np.sqrt', (['(scale * k[1])'], {}), '(scale * k[1])\n', (2581, 2595), True, 'import numpy as np\n'), ((5253, 5273), 'numpy.arange', 'np.arange', (['(0)', '(n1 / 2)'], {}), '(0, n1 / 2)\n', (5262, 5273), True, 'import numpy as np\n'), ((5358, 5378), 'numpy.arange', 'np.arange', (['(0)', '(n1 / 2)'], {}), '(0, n1 / 2)\n', (5367, 5378), True, 'import numpy as np\n'), ((2772, 2782), 'numpy.exp', 'np.exp', (['ex'], {}), '(ex)\n', (2778, 2782), True, 'import numpy as np\n'), ((7422, 7443), 'numpy.cos', 'np.cos', (['(f * 2 * np.pi)'], {}), '(f * 2 * np.pi)\n', (7428, 7443), True, 'import numpy as np\n'), ((2613, 2641), 'numpy.math.factorial', 'np.math.factorial', (['(2 * m - 1)'], {}), '(2 * m - 1)\n', (2630, 2641), True, 'import numpy as np\n')] |
import warnings
import numpy as np
from scipy.linalg import inv
from scipy.optimize import curve_fit, basinhopping
from .elements import circuit_elements, get_element_from_name
ints = '0123456789'
def rmse(a, b):
"""
A function which calculates the root mean squared error
between two vectors.
Notes
---------
.. math::
RMSE = \\sqrt{\\frac{1}{n}(a-b)^2}
"""
n = len(a)
return np.linalg.norm(a - b) / np.sqrt(n)
def circuit_fit(frequencies, impedances, circuit, initial_guess, constants,
bounds=None, bootstrap=False, global_opt=False, seed=0,
**kwargs):
""" Main function for fitting an equivalent circuit to data.
By default, this function uses `scipy.optimize.curve_fit
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html>`_
to fit the equivalent circuit. This function generally works well for
simple circuits. However, the final results may be sensitive to
the initial conditions for more complex circuits. In these cases,
the `scipy.optimize.basinhopping
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.basinhopping.html>`_
global optimization algorithm can be used to attempt a better fit.
Parameters
-----------------
frequencies : numpy array
Frequencies
impedances : numpy array of dtype 'complex128'
Impedances
circuit : string
string defining the equivalent circuit to be fit
initial_guess : list of floats
initial guesses for the fit parameters
constants : dictionary
parameters and their values to hold constant during fitting
(e.g. {"RO": 0.1})
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to bounds on all
parameters of 0 and np.inf, except the CPE alpha
which has an upper bound of 1
global_opt : bool, optional
If global optimization should be used (uses the basinhopping
algorithm). Defaults to False
seed : int, optional
Random seed, only used for the basinhopping algorithm.
Defaults to 0
kwargs :
Keyword arguments passed to scipy.optimize.curve_fit or
scipy.optimize.basinhopping
Returns
------------
p_values : list of floats
best fit parameters for specified equivalent circuit
p_errors : list of floats
one standard deviation error estimates for fit parameters
Notes
---------
Need to do a better job of handling errors in fitting.
Currently, an error of -1 is returned.
"""
Z = impedances
# extract the elements from the circuit
extracted_elements = extract_circuit_elements(circuit)
# set upper and lower bounds on a per-element basis
if bounds is None:
lower_bounds, upper_bounds = [], []
for elem in extracted_elements:
raw_element = get_element_from_name(elem)
for i in range(check_and_eval(raw_element).num_params):
if elem in constants or elem + '_{}'.format(i) in constants:
continue
if raw_element in ['CPE', 'La'] and i == 1:
upper_bounds.append(1)
else:
upper_bounds.append(np.inf)
lower_bounds.append(0)
bounds = ((lower_bounds), (upper_bounds))
if not global_opt:
if 'maxfev' not in kwargs:
kwargs['maxfev'] = 100000
if 'ftol' not in kwargs:
kwargs['ftol'] = 1e-13
popt, pcov = curve_fit(wrapCircuit(circuit, constants), frequencies,
np.hstack([Z.real, Z.imag]),
p0=initial_guess, bounds=bounds, **kwargs)
perror = np.sqrt(np.diag(pcov))
else:
def opt_function(x):
""" Short function for basinhopping to optimize over.
We want to minimize the RMSE between the fit and the data.
Parameters
----------
x : args
Parameters for optimization.
Returns
-------
function
Returns a function (RMSE as a function of parameters).
"""
return rmse(wrapCircuit(circuit, constants)(frequencies, *x),
np.hstack([Z.real, Z.imag]))
results = basinhopping(opt_function, x0=initial_guess,
seed=seed, **kwargs)
popt = results.x
# jacobian -> covariance
# https://stats.stackexchange.com/q/231868
jac = results.lowest_optimization_result["jac"][np.newaxis]
try:
perror = inv(np.dot(jac.T, jac)) * opt_function(popt) ** 2
except np.linalg.LinAlgError:
warnings.warn("Failed to compute perror")
perror = None
return popt, perror
def wrapCircuit(circuit, constants):
""" wraps function so we can pass the circuit string """
def wrappedCircuit(frequencies, *parameters):
""" returns a stacked
Parameters
----------
circuit : string
constants : dict
parameters : list of floats
frequencies : list of floats
Returns
-------
array of floats
"""
x = eval(buildCircuit(circuit, frequencies, *parameters,
constants=constants, eval_string='',
index=0)[0],
circuit_elements)
y_real = np.real(x)
y_imag = np.imag(x)
return np.hstack([y_real, y_imag])
return wrappedCircuit
def buildCircuit(circuit, frequencies, *parameters,
constants=None, eval_string='', index=0):
""" recursive function that transforms a circuit, parameters, and
frequencies into a string that can be evaluated
Parameters
----------
circuit: str
frequencies: list/tuple/array of floats
parameters: list/tuple/array of floats
constants: dict
Returns
-------
eval_string: str
Python expression for calculating the resulting fit
index: int
Tracks parameter index through recursive calling of the function
"""
parameters = np.array(parameters).tolist()
frequencies = np.array(frequencies).tolist()
circuit = circuit.replace(' ', '')
def parse_circuit(circuit, parallel=False, series=False):
""" Splits a circuit string by either dashes (series) or commas
(parallel) outside of any paranthesis. Removes any leading 'p('
or trailing ')' when in parallel mode """
assert parallel != series, \
'Exactly one of parallel or series must be True'
def count_parens(string):
return string.count('('), string.count(')')
if parallel:
special = ','
if circuit.endswith(')') and circuit.startswith('p('):
circuit = circuit[2:-1]
if series:
special = '-'
split = circuit.split(special)
result = []
skipped = []
for i, sub_str in enumerate(split):
if i not in skipped:
if '(' not in sub_str and ')' not in sub_str:
result.append(sub_str)
else:
open_parens, closed_parens = count_parens(sub_str)
if open_parens == closed_parens:
result.append(sub_str)
else:
uneven = True
while i < len(split) - 1 and uneven:
sub_str += special + split[i+1]
open_parens, closed_parens = count_parens(sub_str)
uneven = open_parens != closed_parens
i += 1
skipped.append(i)
result.append(sub_str)
return result
parallel = parse_circuit(circuit, parallel=True)
series = parse_circuit(circuit, series=True)
if series is not None and len(series) > 1:
eval_string += "s(["
split = series
elif parallel is not None and len(parallel) > 1:
eval_string += "p(["
split = parallel
elif series == parallel:
eval_string += "(["
split = series
for i, elem in enumerate(split):
if ',' in elem or '-' in elem:
eval_string, index = buildCircuit(elem, frequencies,
*parameters,
constants=constants,
eval_string=eval_string,
index=index)
else:
param_string = ""
raw_elem = get_element_from_name(elem)
elem_number = check_and_eval(raw_elem).num_params
param_list = []
for j in range(elem_number):
if elem_number > 1:
current_elem = elem + '_{}'.format(j)
else:
current_elem = elem
if current_elem in constants.keys():
param_list.append(constants[current_elem])
else:
param_list.append(parameters[index])
index += 1
param_string += str(param_list)
new = raw_elem + '(' + param_string + ',' + str(frequencies) + ')'
eval_string += new
if i == len(split) - 1:
eval_string += '])'
else:
eval_string += ','
return eval_string, index
def extract_circuit_elements(circuit):
""" Extracts circuit elements from a circuit string.
Parameters
----------
circuit : str
Circuit string.
Returns
-------
extracted_elements : list
list of extracted elements.
"""
p_string = [x for x in circuit if x not in 'p(),-']
extracted_elements = []
current_element = []
length = len(p_string)
for i, char in enumerate(p_string):
if char not in ints:
current_element.append(char)
else:
# min to prevent looking ahead past end of list
if p_string[min(i+1, length-1)] not in ints:
current_element.append(char)
extracted_elements.append(''.join(current_element))
current_element = []
else:
current_element.append(char)
extracted_elements.append(''.join(current_element))
return extracted_elements
def calculateCircuitLength(circuit):
""" Calculates the number of elements in the circuit.
Parameters
----------
circuit : str
Circuit string.
Returns
-------
length : int
Length of circuit.
"""
length = 0
if circuit:
extracted_elements = extract_circuit_elements(circuit)
for elem in extracted_elements:
raw_element = get_element_from_name(elem)
num_params = check_and_eval(raw_element).num_params
length += num_params
return length
def check_and_eval(element):
""" Checks if an element is valid, then evaluates it.
Parameters
----------
element : str
Circuit element.
Raises
------
ValueError
Raised if an element is not in the list of allowed elements.
Returns
-------
Evaluated element.
"""
allowed_elements = circuit_elements.keys()
if element not in allowed_elements:
raise ValueError(f'{element} not in ' +
f'allowed elements ({allowed_elements})')
else:
return eval(element, circuit_elements)
| [
"numpy.sqrt",
"numpy.hstack",
"numpy.diag",
"numpy.real",
"scipy.optimize.basinhopping",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"warnings.warn",
"numpy.imag"
] | [((429, 450), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (443, 450), True, 'import numpy as np\n'), ((453, 463), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (460, 463), True, 'import numpy as np\n'), ((4430, 4495), 'scipy.optimize.basinhopping', 'basinhopping', (['opt_function'], {'x0': 'initial_guess', 'seed': 'seed'}), '(opt_function, x0=initial_guess, seed=seed, **kwargs)\n', (4442, 4495), False, 'from scipy.optimize import curve_fit, basinhopping\n'), ((5572, 5582), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (5579, 5582), True, 'import numpy as np\n'), ((5600, 5610), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (5607, 5610), True, 'import numpy as np\n'), ((5627, 5654), 'numpy.hstack', 'np.hstack', (['[y_real, y_imag]'], {}), '([y_real, y_imag])\n', (5636, 5654), True, 'import numpy as np\n'), ((3701, 3728), 'numpy.hstack', 'np.hstack', (['[Z.real, Z.imag]'], {}), '([Z.real, Z.imag])\n', (3710, 3728), True, 'import numpy as np\n'), ((3830, 3843), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (3837, 3843), True, 'import numpy as np\n'), ((6291, 6311), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (6299, 6311), True, 'import numpy as np\n'), ((6339, 6360), 'numpy.array', 'np.array', (['frequencies'], {}), '(frequencies)\n', (6347, 6360), True, 'import numpy as np\n'), ((4382, 4409), 'numpy.hstack', 'np.hstack', (['[Z.real, Z.imag]'], {}), '([Z.real, Z.imag])\n', (4391, 4409), True, 'import numpy as np\n'), ((4839, 4880), 'warnings.warn', 'warnings.warn', (['"""Failed to compute perror"""'], {}), "('Failed to compute perror')\n", (4852, 4880), False, 'import warnings\n'), ((4743, 4761), 'numpy.dot', 'np.dot', (['jac.T', 'jac'], {}), '(jac.T, jac)\n', (4749, 4761), True, 'import numpy as np\n')] |
#
# Bax-Sneppen 2D implementation by
# <NAME>, Wessel and Willem
#
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import matplotlib.animation as animation
class BaxSneppen3D(object):
def __init__(self, initial_values):
self.states = [initial_values]
self.ages = [np.zeros((len(initial_values), len(initial_values[0]), len(initial_values[0][0])))]
def execute(self, moore=False):
while self.update_state(moore):
continue
print(self.ages[-1])
print(len(self.states))
def update_state(self, moore=False):
new_ages = self.ages[-1] + 1
# Build a new state
new_state = deepcopy(self.states[-1])
min_val = np.argmin(new_state)
z = min_val // (len(new_state[0]) * len(new_state[0][0]))
y = min_val % (len(new_state[0]) * len(new_state[0][0])) // len(new_state[0][0])
x = min_val % len(new_state[0][0])
# Stopping criterium
# if new_state[y][x] > 0.205:
if new_state[z][y][x] > 0.10:
return False
# if len(self.states) > 50000:
# return False
# Modify the values around the minimum value
new_state[z][y][x] = np.random.uniform(0, 1, 1)
new_state[z - 1][y][x] = np.random.uniform(0, 1, 1)
new_state[z][y - 1][x] = np.random.uniform(0, 1, 1)
new_state[z][y][x - 1] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][y][x] = np.random.uniform(0, 1, 1)
new_state[z][(y + 1) % len(new_state[0])][x] = np.random.uniform(0, 1, 1)
new_state[z][y][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
# Modify the cell ages
new_ages[z][y][x] = 0
new_ages[z - 1][y][x] = 0
new_ages[z][y - 1][x] = 0
new_ages[z][y][x - 1] = 0
new_ages[(z + 1) % len(new_state)][y][x] = 0
new_ages[z][(y + 1) % len(new_state[0])][x] = 0
new_ages[z][y][(x + 1) % len(new_state[0][0])] = 0
if moore:
new_state[z - 1][y - 1][x - 1] = np.random.uniform(0, 1, 1)
new_state[z - 1][y - 1][x] = np.random.uniform(0, 1, 1)
new_state[z - 1][y - 1][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[z - 1][y][x - 1] = np.random.uniform(0, 1, 1)
new_state[z - 1][y][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[z - 1][(y + 1) % len(new_state[0])][x - 1] = np.random.uniform(0, 1, 1)
new_state[z - 1][(y + 1) % len(new_state[0])][x] = np.random.uniform(0, 1, 1)
new_state[z - 1][(y + 1) % len(new_state[0])][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[z][y - 1][x - 1] = np.random.uniform(0, 1, 1)
new_state[z][(y + 1) % len(new_state[0])][x - 1] = np.random.uniform(0, 1, 1)
new_state[z][y - 1][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[z][(y + 1) % len(new_state[0])][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][y - 1][x - 1] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][y - 1][x] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][y - 1][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][y][x - 1] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][y][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][(y + 1) % len(new_state[0])][(x - 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][(y + 1) % len(new_state[0])][x] = np.random.uniform(0, 1, 1)
new_state[(z + 1) % len(new_state)][(y + 1) % len(new_state[0])][(x + 1) % len(new_state[0][0])] = np.random.uniform(0, 1, 1)
new_ages[z - 1][y - 1][x - 1] = 0
new_ages[z - 1][y - 1][x] = 0
new_ages[z - 1][y - 1][(x + 1) % len(new_state[0][0])] = 0
new_ages[z - 1][y][x - 1] = 0
new_ages[z - 1][y][(x + 1) % len(new_state[0][0])] = 0
new_ages[z - 1][(y + 1) % len(new_state[0])][x - 1] = 0
new_ages[z - 1][(y + 1) % len(new_state[0])][x] = 0
new_ages[z - 1][(y + 1) % len(new_state[0])][(x + 1) % len(new_state[0][0])] = 0
new_ages[z][y - 1][x - 1] = 0
new_ages[z][(y + 1) % len(new_state[0])][x - 1] = 0
new_ages[z][y - 1][(x + 1) % len(new_state[0][0])] = 0
new_ages[z][(y + 1) % len(new_state[0])][(x + 1) % len(new_state[0][0])] = 0
new_ages[(z + 1) % len(new_state)][y - 1][x - 1] = 0
new_ages[(z + 1) % len(new_state)][y - 1][x] = 0
new_ages[(z + 1) % len(new_state)][y - 1][(x + 1) % len(new_state[0][0])] = 0
new_ages[(z + 1) % len(new_state)][y][x - 1] = 0
new_ages[(z + 1) % len(new_state)][y][(x + 1) % len(new_state[0][0])] = 0
new_ages[(z + 1) % len(new_state)][(y + 1) % len(new_state[0])][(x - 1) % len(new_state[0][0])] = 0
new_ages[(z + 1) % len(new_state)][(y + 1) % len(new_state[0])][x] = 0
new_ages[(z + 1) % len(new_state)][(y + 1) % len(new_state[0])][(x + 1) % len(new_state[0][0])] = 0
#
self.states.append(new_state)
self.ages.append(new_ages)
return True
def plot_ages(self):
plt.imshow(self.ages[-1], aspect='auto', cmap='jet_r', interpolation='nearest')
def main():
initial_values = np.random.rand(10, 10, 10)
bs3d = BaxSneppen3D(initial_values)
bs3d.execute(True)
bs3d.plot_ages()
animate_ages(bs3d.ages)
def animate_ages(ages):
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(ages[1], aspect='auto', cmap='jet_r', interpolation='nearest', vmin=0, vmax=np.max(ages[-1]))
def animate(i):
im.set_array(ages[i]) # update the data
fig.canvas.draw()
plt.title('iteration: ' + str(i))
return im
ani = animation.FuncAnimation(fig, animate, range(int(len(ages) * 0.75), len(ages)), interval=2, blit=False)
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"numpy.random.rand",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.random.uniform",
"copy.deepcopy",
"numpy.argmin",
"matplotlib.pyplot.show"
] | [((5663, 5689), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)'], {}), '(10, 10, 10)\n', (5677, 5689), True, 'import numpy as np\n'), ((5838, 5850), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5848, 5850), True, 'import matplotlib.pyplot as plt\n'), ((6268, 6278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6276, 6278), True, 'import matplotlib.pyplot as plt\n'), ((688, 713), 'copy.deepcopy', 'deepcopy', (['self.states[-1]'], {}), '(self.states[-1])\n', (696, 713), False, 'from copy import deepcopy\n'), ((733, 753), 'numpy.argmin', 'np.argmin', (['new_state'], {}), '(new_state)\n', (742, 753), True, 'import numpy as np\n'), ((1233, 1259), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1250, 1259), True, 'import numpy as np\n'), ((1293, 1319), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1310, 1319), True, 'import numpy as np\n'), ((1353, 1379), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1370, 1379), True, 'import numpy as np\n'), ((1413, 1439), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1430, 1439), True, 'import numpy as np\n'), ((1492, 1518), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1509, 1518), True, 'import numpy as np\n'), ((1574, 1600), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1591, 1600), True, 'import numpy as np\n'), ((1659, 1685), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1676, 1685), True, 'import numpy as np\n'), ((5548, 5627), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.ages[-1]'], {'aspect': '"""auto"""', 'cmap': '"""jet_r"""', 'interpolation': '"""nearest"""'}), "(self.ages[-1], aspect='auto', cmap='jet_r', interpolation='nearest')\n", (5558, 5627), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2108), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2099, 2108), True, 'import numpy as np\n'), ((2150, 2176), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2167, 2176), True, 'import numpy as np\n'), ((2247, 2273), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2264, 2273), True, 'import numpy as np\n'), ((2315, 2341), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2332, 2341), True, 'import numpy as np\n'), ((2408, 2434), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2425, 2434), True, 'import numpy as np\n'), ((2502, 2528), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2519, 2528), True, 'import numpy as np\n'), ((2592, 2618), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2609, 2618), True, 'import numpy as np\n'), ((2711, 2737), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2728, 2737), True, 'import numpy as np\n'), ((2779, 2805), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2796, 2805), True, 'import numpy as np\n'), ((2869, 2895), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2886, 2895), True, 'import numpy as np\n'), ((2962, 2988), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2979, 2988), True, 'import numpy as np\n'), ((3077, 3103), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3094, 3103), True, 'import numpy as np\n'), ((3168, 3194), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3185, 3194), True, 'import numpy as np\n'), ((3255, 3281), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3272, 3281), True, 'import numpy as np\n'), ((3371, 3397), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3388, 3397), True, 'import numpy as np\n'), ((3458, 3484), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3475, 3484), True, 'import numpy as np\n'), ((3570, 3596), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3587, 3596), True, 'import numpy as np\n'), ((3708, 3734), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3725, 3734), True, 'import numpy as np\n'), ((3817, 3843), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3834, 3843), True, 'import numpy as np\n'), ((3955, 3981), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3972, 3981), True, 'import numpy as np\n'), ((5976, 5992), 'numpy.max', 'np.max', (['ages[-1]'], {}), '(ages[-1])\n', (5982, 5992), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 18:56:30 2018
@author: <NAME>
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn.decomposition import PCA
from random import seed
from random import random
from random import gauss
import random
import copy
from scipy import stats
def data_rand(N,M,sigma,Groups=2):
# create the data container
data_rand = []
Labels = []
# seed random number generator
# generate random numbers between 0-1
for _ in range(M):
mean_random = random.randint(50,150)#create one mean value
v = []#create the sample points for each variable
for k in range(N):
v.append(gauss(mean_random, random.randint(sigma,2*sigma)))
data_rand.append(v)
for _ in range(N):
Labels.append(random.randint(0,Groups-1))
return data_rand,Labels
def add_signifficance(data,Labels,Groups,averageSig,sigma,sigvars):
sig = []
for j in Groups:
if j>0:
for v in sigvars:
k = random.randint(averageSig-2*sigma,averageSig+2*sigma) + gauss(0, random.randint(sigma,2*sigma))
sig.append(k)
data[Labels==j,v] = data[Labels==j,v] + k
return data,sig
def JSDe(X,Y,w,k):
#project the data to k
N,M = np.shape(X)
T = np.repeat([k],N,0)
xp = np.sort(np.sum(X*T,1)/np.linalg.norm(k)**2)
j = 0
JSDe = 0
C = np.unique(Y)
for c in C:
Xc = X[Y==c,:]
n,m = np.shape(Xc)
T = np.repeat([k],n,0)
xc = np.sort(np.sum(Xc*T,1)/np.linalg.norm(k)**2)
nc = np.shape(xc)[0]
jsd = 0
for i in np.arange(1,nc-1,1):
# sx = np.min([xp[i]-xp[i-1],xp[i+1]-xp[i]])
# rx = np.min(np.abs(xc[xc!=xp[i]]-xp[i]))
sx = np.min(np.abs(xp[xp!=xc[i]]-xc[i]))
rx = np.min(np.abs([xc[i]-xc[i-1],xc[i+1]-xc[i]]))
if rx == 0 or sx == 0:
jsd += (0 + np.log2(N/(nc-1)))
else:
jsd += (np.log2(sx/rx) + np.log2(N/(nc-1)))
JSDe += (w[j]/nc)*jsd
j += 1
return JSDe
def JSD(X,Y,w,k,nr_pts,hist = False):
N,M = np.shape(X)
T = np.repeat([k],N,0)
xp = np.sort(np.sum(X*T,1)/np.linalg.norm(k)**2)
# print(np.log2(Slack*nr_pts))
pts = np.linspace(np.min(xp),np.max(xp),nr_pts)
C = np.unique(Y)
j = 0
Hc = 0
jsd = 0
fmix = np.zeros(np.shape(pts)[0])
# plt.figure()
if hist == False:
for c in C:
Xc = X[Y==c,:]
n,m = np.shape(Xc)
T = np.repeat([k],n,0)
xc = np.sort(np.sum(Xc*T,1)/np.linalg.norm(k)**2)
KernelD = stats.gaussian_kde(xc,bw_method='scott')
f = KernelD.evaluate(pts)
fmix = fmix + w[c]*f
# plt.plot(pts,f)
Fx = (f[0:-1] + 0.5*np.diff(f))
Fx = Fx/np.sum(Fx)
Hc = Hc - w[c]*(np.sum(Fx[Fx>0]*np.log2(Fx[Fx>0])))
# Hc = Hc - w[c]*np.trapz(f[f>0]*np.log2(f[f>0]),pts[f>0])
j+=1
# plt.plot(pts,fmix)
Fmix = (fmix[0:-1] + 0.5*np.diff(fmix))
H = Fmix/np.sum(Fmix)
Hmix = -(np.sum(H[H>0]*np.log2(H[H>0])))
else:
for c in C:
Xc = X[Y==c,:]
n,m = np.shape(Xc)
T = np.repeat([k],n,0)
xc = np.sort(np.sum(Xc*T,1)/np.linalg.norm(k)**2)
f,_ = np.histogram(xc,bins = nr_pts,range=[pts[0],pts[-1]])
# plt.plot(pts,f)
f = f/np.sum(f)
Hc = Hc - w[c]*(np.sum(f[f>0]*np.log2(f[f>0])))
# Hc = Hc - w[c]*np.trapz(f[f>0]*np.log2(f[f>0]),pts[f>0])
j+=1
# plt.plot(pts,fmix)
n,m = np.shape(X)
T = np.repeat([k],n,0)
Xc = np.sort(np.sum(X*T,1)/np.linalg.norm(k)**2)
Fmix,_ = np.histogram(Xc,bins = nr_pts,range=[pts[0],pts[-1]])
H = Fmix/np.sum(Fmix)
Hmix = -(np.sum(H[H>0]*np.log2(H[H>0])))
jsd = Hmix - Hc
if jsd<0:
print('negative')
return jsd
def gradJSDe(X,Y,w,k):
dG = None
N,M = np.shape(X)
T = np.repeat([k],N,0)
xp = np.sort(np.sum(X*T,1)/np.linalg.norm(k)**2)
dG = 0
C = np.unique(Y)
for j,c in enumerate(C):
Xc = X[Y==c,:]
n,m = np.shape(Xc)
T = np.repeat([k],n,0)
xc = np.sort(np.sum(Xc*T,1)/np.linalg.norm(k)**2)
nc = np.shape(xc)[0]
jsd = 0
for i in np.arange(1,nc-1,1):
# sx = np.min([xp[i]-xp[i-1],xp[i+1]-xp[i]])
# rx = np.min(np.abs(xc[xc!=xp[i]]-xp[i]))
sx = np.min(np.abs(xp[xp!=xc[i]]-xc[i]))
dvc = [xc[i]-xc[i-1],xc[i+1]-xc[i]]
rx = np.min(np.abs(dvc))
if np.abs(dvc[0])<np.abs(dvc[1]):
id_xc = i-1
else:
id_xc = i+1
id_xp = np.where(np.abs(xp[xp!=xc[i]]-xc[i]) == np.min(np.abs(xp[xp!=xc[i]]-xc[i])))
redX = X[xp!=xc[i],:]
red_xp = xp[xp!=xc[i]]
aq = np.squeeze(redX[id_xp,:])
ai = Xc[id_xc,:]
aj = Xc[i]
delta_xi = xc[i] - xc[id_xc]
delta_xp = xc[i] - red_xp[id_xp]
if rx == 0 or sx == 0:
jsd += 0
else:
# jsd += (xc[i]*ai + red_xp[id_xp]*aq)/sx**2 - (xc[i]*ai + xc[id_xc]*aj)/rx**2
jsd += ((np.abs(delta_xi)*delta_xp*[ai-aq] - np.abs(delta_xp)*delta_xi*[ai-aj])/(np.abs(delta_xp)*np.abs(delta_xi)))
dG += (w[j]/nc/np.log(2))*jsd
dG = np.squeeze(dG)
dx = dG[0]
dy = dG[1]
return dx,dy,dG
if __name__ == '__main__':
__spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
N = 200
M = 2
CLASSES = 2
random.seed
data_rand,Labels = data_rand(N,M,20,Groups=CLASSES)
data_rand = np.array(data_rand, dtype='float32').T
Labels = np.array(Labels,dtype='int32')
j = 0
varSig = []
while j< M:
v = random.randint(0,M-1)
if v not in varSig:
varSig.append(v)
j += 1
averageSig = 350
sigma = 10
data_sig,sig = add_signifficance(copy.deepcopy(data_rand),Labels,np.arange(0,CLASSES,1),averageSig,sigma,varSig)
w = (1/CLASSES)*np.ones(CLASSES)
# #######PCA####
# pca = PCA(n_components=1)#create an instance of PCA
# pca.fit(data_sig)
# L_pca = pca.components_
# Xp_train_pca = np.dot(np.dot(data_sig,L_pca.T),np.linalg.pinv(np.dot(L_pca,L_pca.T)))
Span = np.arange(-1,1,0.01)
JS_es1 = np.zeros([np.shape(Span)[0],np.shape(Span)[0]])
JS_es2 = copy.deepcopy(JS_es1)
JS_es3 = copy.deepcopy(JS_es1)
dX = np.zeros([np.shape(Span)[0],np.shape(Span)[0]])
dY = np.zeros([np.shape(Span)[0],np.shape(Span)[0]])
p =0
for p,i in enumerate(Span):
q = 0
for q,j in enumerate(Span):
k = np.array([i,j])/np.linalg.norm([i,j])
# k = [0.01*random.randint(1,100) for _ in range(M)]
# k = k-np.mean(k)
# JS_estim = JSDe(data_rand,Labels,w,k)
# JS_kdens = JSD(data_rand,Labels,w,k,100)
dx,dy,_ = gradJSDe(data_sig,Labels,w,k)
JS1 = JSDe(data_sig,Labels,w,k)
JS2 = JSD(data_sig,Labels,w,k,100)
JS3 = JSD(data_sig,Labels,w,k,100,hist=True)
JS_es1[p,q] = JS1
JS_es2[p,q] = JS2
JS_es3[p,q] = JS3
dX[p,q] = dx
dY[p,q] = dy
q = q+1
X, Y = np.meshgrid(Span, Span)
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, JS_es1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.title('Eucliden dist JSd')
fig.colorbar(surf, shrink=0.5, aspect=5)
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, JS_es2, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.title('Gauss kernel JSd')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, JS_es3, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.title('Histogram JSd')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# np.delete(data_sig)
# quiver from the smoothed version
dy, dx = np.gradient(JS_es2, X[0,:], Y[:,0])
# fig, ax = plt.subplots()
# ax.quiver(X, Y, dx, dy, JS_es2)
# ax.set(aspect=1, title='Quiver Plot of the kernel density')
# plt.show()
skip = (slice(None, None, 3), slice(None, None, 3))
fig, ax = plt.subplots()
ax.quiver(X[skip], Y[skip], dx[skip], dy[skip], JS_es2[skip])
ax.set(aspect=1, title='Quiver Plot of the kernel')
plt.show()
fig, ax = plt.subplots()
im = ax.imshow(JS_es2, extent=[X.min(), X.max(), Y.min(), Y.max()],origin='lower')
ax.quiver(X[skip], Y[skip], dx[skip], dy[skip])
fig.colorbar(im)
ax.set(aspect=1, title='Quiver Plot of the kernel')
plt.show()
fig, ax = plt.subplots()
ax.streamplot(X, Y, dx, dy, color=JS_es2, density=0.5, cmap='gist_earth')
cont = ax.contour(X, Y, JS_es2, cmap='gist_earth')
ax.clabel(cont)
ax.set(aspect=1, title='Streamplot with contours kernel density')
plt.show()
#theoretical gradient from the k-nn
# fig, ax = plt.subplots()
# ax.quiver(X, Y, dX, dY, JS_es1)
# ax.set(aspect=1, title='Quiver Plot from the theoretical k-nn')
# plt.show()
skip = (slice(None, None, 3), slice(None, None, 3))
fig, ax = plt.subplots()
ax.quiver(X[skip], Y[skip], dX[skip], dY[skip], JS_es1[skip])
ax.set(aspect=1, title='Quiver Plot from theoretical k-nn')
plt.show()
fig, ax = plt.subplots()
im = ax.imshow(JS_es1, extent=[X.min(), X.max(), Y.min(), Y.max()],origin='lower')
ax.quiver(X[skip], Y[skip], dX[skip], dY[skip])
fig.colorbar(im)
ax.set(aspect=1, title='Quiver Plot from theoretical k-nn')
plt.show()
fig, ax = plt.subplots()
ax.streamplot(X, Y, dX, dY, color=JS_es1, density=0.5, cmap='gist_earth')
cont = ax.contour(X, Y, JS_es1, cmap='gist_earth')
ax.clabel(cont)
ax.set(aspect=1, title='Streamplot with contours from theoretical k-nn')
plt.show()
#gradient from the k-nn surface
dYe, dXe = np.gradient(JS_es1, X[0,:], Y[:,0])
# fig, ax = plt.subplots()
# ax.quiver(X, Y, dX, dY, JS_es1)
# ax.set(aspect=1, title='Quiver Plot from k-nn surface')
# plt.show()
skip = (slice(None, None, 3), slice(None, None, 3))
fig, ax = plt.subplots()
ax.quiver(X[skip], Y[skip], dXe[skip], dYe[skip], JS_es1[skip])
ax.set(aspect=1, title='Quiver Plot from k-nn surface')
plt.show()
fig, ax = plt.subplots()
im = ax.imshow(JS_es1, extent=[X.min(), X.max(), Y.min(), Y.max()],origin='lower')
ax.quiver(X[skip], Y[skip], dXe[skip], dYe[skip])
fig.colorbar(im)
ax.set(aspect=1, title='Quiver Plot')
plt.show()
ax.set(title='K-nn surface quiver')
fig, ax = plt.subplots()
ax.streamplot(X, Y, dXe, dYe, color=JS_es1, density=0.5, cmap='gist_earth')
cont = ax.contour(X, Y, JS_es1, cmap='gist_earth')
ax.clabel(cont)
ax.set(aspect=1, title='Streamplot with contours from k-nn surface')
plt.show()
#gradient estimation from histogram entropy
dYh, dXh = np.gradient(JS_es3, X[0,:], Y[:,0])
# fig, ax = plt.subplots()
# ax.quiver(X, Y, dXe, dYe, JS_es3)
# ax.set(aspect=1, title='Quiver Plot')
# plt.show()
skip = (slice(None, None, 3), slice(None, None, 3))
fig, ax = plt.subplots()
ax.quiver(X[skip], Y[skip], dXh[skip], dYh[skip], JS_es3[skip])
ax.set(aspect=1, title='Quiver Plot histogram entropy')
plt.show()
fig, ax = plt.subplots()
im = ax.imshow(JS_es3, extent=[X.min(), X.max(), Y.min(), Y.max()],origin='lower')
ax.quiver(X[skip], Y[skip], dXh[skip], dYh[skip])
fig.colorbar(im)
ax.set(aspect=1, title='Quiver Plot histogram entropy')
plt.show()
fig, ax = plt.subplots()
ax.streamplot(X, Y, dXh, dYh, color=JS_es3, density=0.5, cmap='gist_earth') | [
"numpy.log",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.gradient",
"numpy.arange",
"numpy.histogram",
"scipy.stats.gaussian_kde",
"numpy.repeat",
"numpy.diff",
"numpy.max",
"numpy.min",
"numpy.meshgrid",
"random.randint",
"numpy.abs",
"numpy.ones",
"numpy.squeeze",
... | [((1453, 1464), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1461, 1464), True, 'import numpy as np\n'), ((1473, 1493), 'numpy.repeat', 'np.repeat', (['[k]', 'N', '(0)'], {}), '([k], N, 0)\n', (1482, 1493), True, 'import numpy as np\n'), ((1576, 1588), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (1585, 1588), True, 'import numpy as np\n'), ((2370, 2381), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (2378, 2381), True, 'import numpy as np\n'), ((2390, 2410), 'numpy.repeat', 'np.repeat', (['[k]', 'N', '(0)'], {}), '([k], N, 0)\n', (2399, 2410), True, 'import numpy as np\n'), ((2561, 2573), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (2570, 2573), True, 'import numpy as np\n'), ((4412, 4423), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (4420, 4423), True, 'import numpy as np\n'), ((4432, 4452), 'numpy.repeat', 'np.repeat', (['[k]', 'N', '(0)'], {}), '([k], N, 0)\n', (4441, 4452), True, 'import numpy as np\n'), ((4528, 4540), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (4537, 4540), True, 'import numpy as np\n'), ((5911, 5925), 'numpy.squeeze', 'np.squeeze', (['dG'], {}), '(dG)\n', (5921, 5925), True, 'import numpy as np\n'), ((6293, 6324), 'numpy.array', 'np.array', (['Labels'], {'dtype': '"""int32"""'}), "(Labels, dtype='int32')\n", (6301, 6324), True, 'import numpy as np\n'), ((6914, 6936), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.01)'], {}), '(-1, 1, 0.01)\n', (6923, 6936), True, 'import numpy as np\n'), ((7012, 7033), 'copy.deepcopy', 'copy.deepcopy', (['JS_es1'], {}), '(JS_es1)\n', (7025, 7033), False, 'import copy\n'), ((7047, 7068), 'copy.deepcopy', 'copy.deepcopy', (['JS_es1'], {}), '(JS_es1)\n', (7060, 7068), False, 'import copy\n'), ((7929, 7952), 'numpy.meshgrid', 'np.meshgrid', (['Span', 'Span'], {}), '(Span, Span)\n', (7940, 7952), True, 'import numpy as np\n'), ((7964, 7976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7974, 7976), True, 'import matplotlib.pyplot as plt\n'), ((8136, 8166), 'matplotlib.pyplot.title', 'plt.title', (['"""Eucliden dist JSd"""'], {}), "('Eucliden dist JSd')\n", (8145, 8166), True, 'import matplotlib.pyplot as plt\n'), ((8227, 8239), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8237, 8239), True, 'import matplotlib.pyplot as plt\n'), ((8399, 8428), 'matplotlib.pyplot.title', 'plt.title', (['"""Gauss kernel JSd"""'], {}), "('Gauss kernel JSd')\n", (8408, 8428), True, 'import matplotlib.pyplot as plt\n'), ((8478, 8488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8486, 8488), True, 'import matplotlib.pyplot as plt\n'), ((8504, 8516), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8514, 8516), True, 'import matplotlib.pyplot as plt\n'), ((8676, 8702), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram JSd"""'], {}), "('Histogram JSd')\n", (8685, 8702), True, 'import matplotlib.pyplot as plt\n'), ((8752, 8762), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8760, 8762), True, 'import matplotlib.pyplot as plt\n'), ((8840, 8877), 'numpy.gradient', 'np.gradient', (['JS_es2', 'X[0, :]', 'Y[:, 0]'], {}), '(JS_es2, X[0, :], Y[:, 0])\n', (8851, 8877), True, 'import numpy as np\n'), ((9104, 9118), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9116, 9118), True, 'import matplotlib.pyplot as plt\n'), ((9245, 9255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9253, 9255), True, 'import matplotlib.pyplot as plt\n'), ((9275, 9289), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9287, 9289), True, 'import matplotlib.pyplot as plt\n'), ((9515, 9525), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9523, 9525), True, 'import matplotlib.pyplot as plt\n'), ((9550, 9564), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9562, 9564), True, 'import matplotlib.pyplot as plt\n'), ((9804, 9814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9812, 9814), True, 'import matplotlib.pyplot as plt\n'), ((10100, 10114), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10112, 10114), True, 'import matplotlib.pyplot as plt\n'), ((10249, 10259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10257, 10259), True, 'import matplotlib.pyplot as plt\n'), ((10283, 10297), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10295, 10297), True, 'import matplotlib.pyplot as plt\n'), ((10535, 10545), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10543, 10545), True, 'import matplotlib.pyplot as plt\n'), ((10577, 10591), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10589, 10591), True, 'import matplotlib.pyplot as plt\n'), ((10850, 10860), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10858, 10860), True, 'import matplotlib.pyplot as plt\n'), ((10917, 10954), 'numpy.gradient', 'np.gradient', (['JS_es1', 'X[0, :]', 'Y[:, 0]'], {}), '(JS_es1, X[0, :], Y[:, 0])\n', (10928, 10954), True, 'import numpy as np\n'), ((11185, 11199), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11197, 11199), True, 'import matplotlib.pyplot as plt\n'), ((11332, 11342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11340, 11342), True, 'import matplotlib.pyplot as plt\n'), ((11366, 11380), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11378, 11380), True, 'import matplotlib.pyplot as plt\n'), ((11598, 11608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11606, 11608), True, 'import matplotlib.pyplot as plt\n'), ((11676, 11690), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11688, 11690), True, 'import matplotlib.pyplot as plt\n'), ((11947, 11957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11955, 11957), True, 'import matplotlib.pyplot as plt\n'), ((12021, 12058), 'numpy.gradient', 'np.gradient', (['JS_es3', 'X[0, :]', 'Y[:, 0]'], {}), '(JS_es3, X[0, :], Y[:, 0])\n', (12032, 12058), True, 'import numpy as np\n'), ((12273, 12287), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12285, 12287), True, 'import matplotlib.pyplot as plt\n'), ((12420, 12430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12428, 12430), True, 'import matplotlib.pyplot as plt\n'), ((12449, 12463), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12461, 12463), True, 'import matplotlib.pyplot as plt\n'), ((12699, 12709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12707, 12709), True, 'import matplotlib.pyplot as plt\n'), ((12741, 12755), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12753, 12755), True, 'import matplotlib.pyplot as plt\n'), ((674, 697), 'random.randint', 'random.randint', (['(50)', '(150)'], {}), '(50, 150)\n', (688, 697), False, 'import random\n'), ((1642, 1654), 'numpy.shape', 'np.shape', (['Xc'], {}), '(Xc)\n', (1650, 1654), True, 'import numpy as np\n'), ((1667, 1687), 'numpy.repeat', 'np.repeat', (['[k]', 'n', '(0)'], {}), '([k], n, 0)\n', (1676, 1687), True, 'import numpy as np\n'), ((1814, 1837), 'numpy.arange', 'np.arange', (['(1)', '(nc - 1)', '(1)'], {}), '(1, nc - 1, 1)\n', (1823, 1837), True, 'import numpy as np\n'), ((2523, 2533), 'numpy.min', 'np.min', (['xp'], {}), '(xp)\n', (2529, 2533), True, 'import numpy as np\n'), ((2534, 2544), 'numpy.max', 'np.max', (['xp'], {}), '(xp)\n', (2540, 2544), True, 'import numpy as np\n'), ((4018, 4029), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (4026, 4029), True, 'import numpy as np\n'), ((4042, 4062), 'numpy.repeat', 'np.repeat', (['[k]', 'n', '(0)'], {}), '([k], n, 0)\n', (4051, 4062), True, 'import numpy as np\n'), ((4148, 4202), 'numpy.histogram', 'np.histogram', (['Xc'], {'bins': 'nr_pts', 'range': '[pts[0], pts[-1]]'}), '(Xc, bins=nr_pts, range=[pts[0], pts[-1]])\n', (4160, 4202), True, 'import numpy as np\n'), ((4607, 4619), 'numpy.shape', 'np.shape', (['Xc'], {}), '(Xc)\n', (4615, 4619), True, 'import numpy as np\n'), ((4632, 4652), 'numpy.repeat', 'np.repeat', (['[k]', 'n', '(0)'], {}), '([k], n, 0)\n', (4641, 4652), True, 'import numpy as np\n'), ((4779, 4802), 'numpy.arange', 'np.arange', (['(1)', '(nc - 1)', '(1)'], {}), '(1, nc - 1, 1)\n', (4788, 4802), True, 'import numpy as np\n'), ((6241, 6277), 'numpy.array', 'np.array', (['data_rand'], {'dtype': '"""float32"""'}), "(data_rand, dtype='float32')\n", (6249, 6277), True, 'import numpy as np\n'), ((6382, 6406), 'random.randint', 'random.randint', (['(0)', '(M - 1)'], {}), '(0, M - 1)\n', (6396, 6406), False, 'import random\n'), ((6561, 6585), 'copy.deepcopy', 'copy.deepcopy', (['data_rand'], {}), '(data_rand)\n', (6574, 6585), False, 'import copy\n'), ((6593, 6617), 'numpy.arange', 'np.arange', (['(0)', 'CLASSES', '(1)'], {}), '(0, CLASSES, 1)\n', (6602, 6617), True, 'import numpy as np\n'), ((6661, 6677), 'numpy.ones', 'np.ones', (['CLASSES'], {}), '(CLASSES)\n', (6668, 6677), True, 'import numpy as np\n'), ((949, 978), 'random.randint', 'random.randint', (['(0)', '(Groups - 1)'], {}), '(0, Groups - 1)\n', (963, 978), False, 'import random\n'), ((1509, 1525), 'numpy.sum', 'np.sum', (['(X * T)', '(1)'], {}), '(X * T, 1)\n', (1515, 1525), True, 'import numpy as np\n'), ((1765, 1777), 'numpy.shape', 'np.shape', (['xc'], {}), '(xc)\n', (1773, 1777), True, 'import numpy as np\n'), ((2426, 2442), 'numpy.sum', 'np.sum', (['(X * T)', '(1)'], {}), '(X * T, 1)\n', (2432, 2442), True, 'import numpy as np\n'), ((2631, 2644), 'numpy.shape', 'np.shape', (['pts'], {}), '(pts)\n', (2639, 2644), True, 'import numpy as np\n'), ((2754, 2766), 'numpy.shape', 'np.shape', (['Xc'], {}), '(Xc)\n', (2762, 2766), True, 'import numpy as np\n'), ((2783, 2803), 'numpy.repeat', 'np.repeat', (['[k]', 'n', '(0)'], {}), '([k], n, 0)\n', (2792, 2803), True, 'import numpy as np\n'), ((2894, 2935), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['xc'], {'bw_method': '"""scott"""'}), "(xc, bw_method='scott')\n", (2912, 2935), False, 'from scipy import stats\n'), ((3379, 3391), 'numpy.sum', 'np.sum', (['Fmix'], {}), '(Fmix)\n', (3385, 3391), True, 'import numpy as np\n'), ((3520, 3532), 'numpy.shape', 'np.shape', (['Xc'], {}), '(Xc)\n', (3528, 3532), True, 'import numpy as np\n'), ((3549, 3569), 'numpy.repeat', 'np.repeat', (['[k]', 'n', '(0)'], {}), '([k], n, 0)\n', (3558, 3569), True, 'import numpy as np\n'), ((3668, 3722), 'numpy.histogram', 'np.histogram', (['xc'], {'bins': 'nr_pts', 'range': '[pts[0], pts[-1]]'}), '(xc, bins=nr_pts, range=[pts[0], pts[-1]])\n', (3680, 3722), True, 'import numpy as np\n'), ((4219, 4231), 'numpy.sum', 'np.sum', (['Fmix'], {}), '(Fmix)\n', (4225, 4231), True, 'import numpy as np\n'), ((4468, 4484), 'numpy.sum', 'np.sum', (['(X * T)', '(1)'], {}), '(X * T, 1)\n', (4474, 4484), True, 'import numpy as np\n'), ((4730, 4742), 'numpy.shape', 'np.shape', (['xc'], {}), '(xc)\n', (4738, 4742), True, 'import numpy as np\n'), ((5377, 5403), 'numpy.squeeze', 'np.squeeze', (['redX[id_xp, :]'], {}), '(redX[id_xp, :])\n', (5387, 5403), True, 'import numpy as np\n'), ((1523, 1540), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (1537, 1540), True, 'import numpy as np\n'), ((1715, 1732), 'numpy.sum', 'np.sum', (['(Xc * T)', '(1)'], {}), '(Xc * T, 1)\n', (1721, 1732), True, 'import numpy as np\n'), ((1969, 2000), 'numpy.abs', 'np.abs', (['(xp[xp != xc[i]] - xc[i])'], {}), '(xp[xp != xc[i]] - xc[i])\n', (1975, 2000), True, 'import numpy as np\n'), ((2022, 2068), 'numpy.abs', 'np.abs', (['[xc[i] - xc[i - 1], xc[i + 1] - xc[i]]'], {}), '([xc[i] - xc[i - 1], xc[i + 1] - xc[i]])\n', (2028, 2068), True, 'import numpy as np\n'), ((2440, 2457), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (2454, 2457), True, 'import numpy as np\n'), ((3099, 3109), 'numpy.sum', 'np.sum', (['Fx'], {}), '(Fx)\n', (3105, 3109), True, 'import numpy as np\n'), ((3347, 3360), 'numpy.diff', 'np.diff', (['fmix'], {}), '(fmix)\n', (3354, 3360), True, 'import numpy as np\n'), ((3794, 3803), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (3800, 3803), True, 'import numpy as np\n'), ((4086, 4102), 'numpy.sum', 'np.sum', (['(X * T)', '(1)'], {}), '(X * T, 1)\n', (4092, 4102), True, 'import numpy as np\n'), ((4482, 4499), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (4496, 4499), True, 'import numpy as np\n'), ((4680, 4697), 'numpy.sum', 'np.sum', (['(Xc * T)', '(1)'], {}), '(Xc * T, 1)\n', (4686, 4697), True, 'import numpy as np\n'), ((4947, 4978), 'numpy.abs', 'np.abs', (['(xp[xp != xc[i]] - xc[i])'], {}), '(xp[xp != xc[i]] - xc[i])\n', (4953, 4978), True, 'import numpy as np\n'), ((5061, 5072), 'numpy.abs', 'np.abs', (['dvc'], {}), '(dvc)\n', (5067, 5072), True, 'import numpy as np\n'), ((5089, 5103), 'numpy.abs', 'np.abs', (['dvc[0]'], {}), '(dvc[0])\n', (5095, 5103), True, 'import numpy as np\n'), ((5104, 5118), 'numpy.abs', 'np.abs', (['dvc[1]'], {}), '(dvc[1])\n', (5110, 5118), True, 'import numpy as np\n'), ((5887, 5896), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (5893, 5896), True, 'import numpy as np\n'), ((6961, 6975), 'numpy.shape', 'np.shape', (['Span'], {}), '(Span)\n', (6969, 6975), True, 'import numpy as np\n'), ((6979, 6993), 'numpy.shape', 'np.shape', (['Span'], {}), '(Span)\n', (6987, 6993), True, 'import numpy as np\n'), ((7088, 7102), 'numpy.shape', 'np.shape', (['Span'], {}), '(Span)\n', (7096, 7102), True, 'import numpy as np\n'), ((7106, 7120), 'numpy.shape', 'np.shape', (['Span'], {}), '(Span)\n', (7114, 7120), True, 'import numpy as np\n'), ((7145, 7159), 'numpy.shape', 'np.shape', (['Span'], {}), '(Span)\n', (7153, 7159), True, 'import numpy as np\n'), ((7163, 7177), 'numpy.shape', 'np.shape', (['Span'], {}), '(Span)\n', (7171, 7177), True, 'import numpy as np\n'), ((7295, 7311), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (7303, 7311), True, 'import numpy as np\n'), ((7311, 7333), 'numpy.linalg.norm', 'np.linalg.norm', (['[i, j]'], {}), '([i, j])\n', (7325, 7333), True, 'import numpy as np\n'), ((844, 876), 'random.randint', 'random.randint', (['sigma', '(2 * sigma)'], {}), '(sigma, 2 * sigma)\n', (858, 876), False, 'import random\n'), ((1178, 1240), 'random.randint', 'random.randint', (['(averageSig - 2 * sigma)', '(averageSig + 2 * sigma)'], {}), '(averageSig - 2 * sigma, averageSig + 2 * sigma)\n', (1192, 1240), False, 'import random\n'), ((1730, 1747), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (1744, 1747), True, 'import numpy as np\n'), ((2140, 2161), 'numpy.log2', 'np.log2', (['(N / (nc - 1))'], {}), '(N / (nc - 1))\n', (2147, 2161), True, 'import numpy as np\n'), ((2201, 2217), 'numpy.log2', 'np.log2', (['(sx / rx)'], {}), '(sx / rx)\n', (2208, 2217), True, 'import numpy as np\n'), ((2218, 2239), 'numpy.log2', 'np.log2', (['(N / (nc - 1))'], {}), '(N / (nc - 1))\n', (2225, 2239), True, 'import numpy as np\n'), ((2835, 2852), 'numpy.sum', 'np.sum', (['(Xc * T)', '(1)'], {}), '(Xc * T, 1)\n', (2841, 2852), True, 'import numpy as np\n'), ((3067, 3077), 'numpy.diff', 'np.diff', (['f'], {}), '(f)\n', (3074, 3077), True, 'import numpy as np\n'), ((3427, 3444), 'numpy.log2', 'np.log2', (['H[H > 0]'], {}), '(H[H > 0])\n', (3434, 3444), True, 'import numpy as np\n'), ((3601, 3618), 'numpy.sum', 'np.sum', (['(Xc * T)', '(1)'], {}), '(Xc * T, 1)\n', (3607, 3618), True, 'import numpy as np\n'), ((4100, 4117), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (4114, 4117), True, 'import numpy as np\n'), ((4267, 4284), 'numpy.log2', 'np.log2', (['H[H > 0]'], {}), '(H[H > 0])\n', (4274, 4284), True, 'import numpy as np\n'), ((4695, 4712), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (4709, 4712), True, 'import numpy as np\n'), ((5223, 5254), 'numpy.abs', 'np.abs', (['(xp[xp != xc[i]] - xc[i])'], {}), '(xp[xp != xc[i]] - xc[i])\n', (5229, 5254), True, 'import numpy as np\n'), ((1243, 1275), 'random.randint', 'random.randint', (['sigma', '(2 * sigma)'], {}), '(sigma, 2 * sigma)\n', (1257, 1275), False, 'import random\n'), ((2850, 2867), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (2864, 2867), True, 'import numpy as np\n'), ((3616, 3633), 'numpy.linalg.norm', 'np.linalg.norm', (['k'], {}), '(k)\n', (3630, 3633), True, 'import numpy as np\n'), ((5261, 5292), 'numpy.abs', 'np.abs', (['(xp[xp != xc[i]] - xc[i])'], {}), '(xp[xp != xc[i]] - xc[i])\n', (5267, 5292), True, 'import numpy as np\n'), ((5828, 5844), 'numpy.abs', 'np.abs', (['delta_xp'], {}), '(delta_xp)\n', (5834, 5844), True, 'import numpy as np\n'), ((5845, 5861), 'numpy.abs', 'np.abs', (['delta_xi'], {}), '(delta_xi)\n', (5851, 5861), True, 'import numpy as np\n'), ((3159, 3178), 'numpy.log2', 'np.log2', (['Fx[Fx > 0]'], {}), '(Fx[Fx > 0])\n', (3166, 3178), True, 'import numpy as np\n'), ((3851, 3868), 'numpy.log2', 'np.log2', (['f[f > 0]'], {}), '(f[f > 0])\n', (3858, 3868), True, 'import numpy as np\n'), ((5756, 5772), 'numpy.abs', 'np.abs', (['delta_xi'], {}), '(delta_xi)\n', (5762, 5772), True, 'import numpy as np\n'), ((5792, 5808), 'numpy.abs', 'np.abs', (['delta_xp'], {}), '(delta_xp)\n', (5798, 5808), True, 'import numpy as np\n')] |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to monitoring, analyzing, and reporting info for Modules in PyTorch.
Records things like FLOPS, input and output shapes, kernel shapes, etc.
"""
from typing import List, Tuple, Union
import numpy
from torch import Tensor
from torch.nn import (
CELU,
ELU,
GLU,
SELU,
Hardtanh,
LeakyReLU,
Linear,
LogSigmoid,
Module,
PReLU,
ReLU,
ReLU6,
RReLU,
Sigmoid,
Softmax,
Softmax2d,
Tanh,
Threshold,
)
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.pooling import (
_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd,
_AvgPoolNd,
_MaxPoolNd,
)
from torch.utils.hooks import RemovableHandle
from sparseml.optim import AnalyzedLayerDesc
from sparseml.pytorch.utils import get_layer, get_prunable_layers
__all__ = ["ModuleAnalyzer"]
class ModuleAnalyzer(object):
"""
An analyzer implementation for monitoring the execution profile and graph of
a Module in PyTorch.
:param module: the module to analyze
:param enabled: True to enable the hooks for analyzing and actively track,
False to disable and not track
"""
def __init__(self, module: Module, enabled: bool = False):
super(ModuleAnalyzer, self).__init__()
self._module = module
self._hooks = None # type: List[RemovableHandle]
self._forward_called = False
self._enabled = False
self._call_count = -1
self.enabled = enabled
def __del__(self):
self._delete_hooks()
@property
def enabled(self) -> bool:
"""
:return: True if enabled and the hooks for analyzing are active, False otherwise
"""
return self._enabled
@enabled.setter
def enabled(self, value: bool):
"""
:param value: True to enable the hooks for analyzing, False to disable
"""
if value and not self._enabled:
self._create_hooks()
self._param_grad = None
elif not value and self._enabled:
self._delete_hooks()
self._enabled = value
@property
def module(self) -> Module:
"""
:return: The module that is being actively analyzed
"""
return self._module
def layer_desc(self, name: Union[str, None] = None) -> AnalyzedLayerDesc:
"""
Get a specific layer's description within the Module.
Set to None to get the overall Module's description.
:param name: name of the layer to get a description for,
None for an overall description
:return: the analyzed layer description for the given name
"""
if not self._forward_called:
raise RuntimeError(
"module must have forward called with sample input "
"before getting a layer desc"
)
mod = get_layer(name, self._module) if name is not None else self._module
return ModuleAnalyzer._mod_desc(mod)
def ks_layer_descs(self) -> List[AnalyzedLayerDesc]:
"""
Get the descriptions for all layers in the module that support kernel sparsity
(model pruning). Ex: all convolutions and linear layers.
:return: a list of descriptions for all layers in the module that support ks
"""
descs = []
for (name, _) in get_prunable_layers(self._module):
desc = self.layer_desc(name)
if desc is None:
print("analyzer: no description found for {}".format(name))
else:
descs.append(desc)
descs.sort(key=lambda val: val.execution_order)
return descs
def _create_hooks(self):
self._delete_hooks()
self._forward_called = False
self._call_count = -1
self._hooks = []
for name, mod in self._module.named_modules():
self._hooks.extend(
self._create_mod_hooks(mod, name if mod != self._module else None)
)
def _delete_hooks(self):
if self._hooks is not None:
for hook in self._hooks:
hook.remove()
self._hooks.clear()
def _create_mod_hooks(self, mod: Module, name: str) -> List[RemovableHandle]:
mod._analyzed_layer_desc = None
mod._analyzed_layer_name = name
forward_pre_hook = mod.register_forward_pre_hook(self._forward_pre_hook)
if isinstance(mod, _ConvNd):
forward_hook = mod.register_forward_hook(self._conv_hook)
elif isinstance(mod, Linear):
forward_hook = mod.register_forward_hook(self._linear_hook)
elif isinstance(mod, _BatchNorm):
forward_hook = mod.register_forward_hook(self._bn_hook)
elif isinstance(mod, _MaxPoolNd) or isinstance(mod, _AvgPoolNd):
forward_hook = mod.register_forward_hook(self._pool_hook)
elif isinstance(mod, _AdaptiveAvgPoolNd) or isinstance(mod, _AdaptiveMaxPoolNd):
forward_hook = mod.register_forward_hook(self._adaptive_pool_hook)
elif (
isinstance(mod, Threshold)
or isinstance(mod, ReLU)
or isinstance(mod, ReLU6)
or isinstance(mod, RReLU)
or isinstance(mod, LeakyReLU)
or isinstance(mod, PReLU)
or isinstance(mod, ELU)
or isinstance(mod, CELU)
or isinstance(mod, SELU)
or isinstance(mod, GLU)
or isinstance(mod, Hardtanh)
or isinstance(mod, Tanh)
or isinstance(mod, Sigmoid)
or isinstance(mod, LogSigmoid)
):
forward_hook = mod.register_forward_hook(self._activation_hook)
elif isinstance(mod, Softmax) or isinstance(mod, Softmax2d):
forward_hook = mod.register_forward_hook(self._softmax_hook)
else:
forward_hook = mod.register_forward_hook(self._module_hook)
return [forward_pre_hook, forward_hook]
def _forward_pre_hook(
self,
mod: Module,
inp: Union[Tuple[Tensor, ...], Tensor],
):
self._call_count += 1
mod._analyzed_layer_desc = AnalyzedLayerDesc(
name=mod._analyzed_layer_name,
type_=mod.__class__.__name__,
execution_order=self._call_count,
)
def _init_forward_hook(
self,
mod: Module,
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
) -> Tuple[AnalyzedLayerDesc, Tuple[Tensor, ...], Tuple[Tensor, ...]]:
self._forward_called = True
if isinstance(inp, Tensor):
inp = (inp,)
if isinstance(out, Tensor):
out = (out,)
desc = mod._analyzed_layer_desc
desc.input_shape = tuple(
tuple(ii for ii in i.shape) for i in inp if isinstance(i, Tensor)
)
desc.output_shape = tuple(
tuple(oo for oo in o.shape) for o in out if isinstance(o, Tensor)
)
return desc, inp, out
def _module_hook(
self,
mod: Union[_MaxPoolNd, _AvgPoolNd],
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
def _conv_hook(
self,
mod: _ConvNd,
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = (
{"weight": mod.weight}
if mod.bias is None
else {"weight": mod.weight, "bias": mod.bias}
)
prunable_params = {"weight": mod.weight}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
desc.stride = mod.stride
mult_per_out_pix = float(numpy.prod(mod.kernel_size)) * mod.in_channels
add_per_out_pix = 1 if mod.bias is not None else 0
out_pix = float(numpy.prod(out[0].shape[1:]))
# total flops counts the cost of summing the
# multiplications together as well
# most implementations and papers do not include this cost
desc.flops = (mult_per_out_pix + add_per_out_pix) * out_pix
desc.total_flops = (mult_per_out_pix * 2 + add_per_out_pix) * out_pix
def _linear_hook(
self,
mod: Linear,
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = (
{"weight": mod.weight}
if mod.bias is None
else {"weight": mod.weight, "bias": mod.bias}
)
prunable_params = {"weight": mod.weight}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
mult_per_out_pix = mod.in_features
add_per_out_pix = 1 if mod.bias is not None else 0
out_pix = float(numpy.prod(out[0].shape[1:]))
# total flops counts the cost of summing the
# multiplications together as well
# most implementations and papers do not include this cost
desc.flops = (mult_per_out_pix + add_per_out_pix) * out_pix
desc.total_flops = (mult_per_out_pix * 2 + add_per_out_pix) * out_pix
def _bn_hook(
self,
mod: Linear,
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = (
{"weight": mod.weight}
if mod.bias is None
else {"weight": mod.weight, "bias": mod.bias}
)
prunable_params = {}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
# 4 elementwise operations on the output space, just need to add all of them up
desc.flops = 4 * float(numpy.prod(out[0].shape[1:]))
desc.total_flops = desc.flops
def _pool_hook(
self,
mod: Union[_MaxPoolNd, _AvgPoolNd],
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = {key: val for key, val in mod.named_parameters()}
prunable_params = {}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
desc.stride = mod.stride
flops_per_out_pix = float(numpy.prod(mod.kernel_size) + 1)
out_pix = float(numpy.prod(out[0].shape[1:]))
desc.flops = flops_per_out_pix * out_pix
desc.total_flops = desc.flops
def _adaptive_pool_hook(
self,
mod: Union[_MaxPoolNd, _AvgPoolNd],
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = {key: val for key, val in mod.named_parameters()}
prunable_params = {}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
desc.stride = 1
stride = tuple(
inp[0].shape[i] // out[0].shape[i] for i in range(2, len(inp[0].shape))
)
kernel_size = tuple(
inp[0].shape[i] - (out[0].shape[i] - 1) * stride[i - 2]
for i in range(2, len(inp[0].shape))
)
flops_per_out_pix = float(numpy.prod(kernel_size))
out_pix = float(numpy.prod(out[0].shape[1:]))
desc.flops = flops_per_out_pix * out_pix
desc.total_flops = desc.flops
def _activation_hook(
self,
mod: Union[_MaxPoolNd, _AvgPoolNd],
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = {key: val for key, val in mod.named_parameters()}
prunable_params = {}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
# making assumption that flops spent is one per element
# (so swish is counted the same activation ReLU)
desc.flops = float(numpy.prod(out[0].shape[1:]))
desc.total_flops = desc.flops
def _softmax_hook(
self,
mod: Union[_MaxPoolNd, _AvgPoolNd],
inp: Union[Tuple[Tensor, ...], Tensor],
out: Union[Tuple[Tensor, ...], Tensor],
):
desc, inp, out = self._init_forward_hook(mod, inp, out)
params = {key: val for key, val in mod.named_parameters()}
prunable_params = {}
desc.params = sum([val.numel() for val in params.values()])
desc.prunable_params = sum([val.numel() for val in prunable_params.values()])
desc.zeroed_params = sum(
[(val == 0).sum().item() for val in prunable_params.values()]
)
desc.params_dims = {
key: tuple(s for s in val.shape) for key, val in params.items()
}
desc.prunable_params_dims = {
key: tuple(s for s in val.shape) for key, val in prunable_params.items()
}
flops_per_channel = (
2 if len(out[0].shape) < 3 else float(numpy.prod(out[0].shape[2:]))
)
desc.flops = flops_per_channel * out[0].shape[1]
desc.total_flops = desc.flops
@staticmethod
def _mod_desc(mod: Module) -> AnalyzedLayerDesc:
child_descs = []
for _, child in mod.named_children():
if child != mod:
child_desc = ModuleAnalyzer._mod_desc(child)
if child_desc:
child_descs.append(child_desc)
if not mod._analyzed_layer_desc:
return None
return AnalyzedLayerDesc.merge_descs(mod._analyzed_layer_desc, child_descs)
| [
"numpy.prod",
"sparseml.optim.AnalyzedLayerDesc.merge_descs",
"sparseml.pytorch.utils.get_layer",
"sparseml.pytorch.utils.get_prunable_layers",
"sparseml.optim.AnalyzedLayerDesc"
] | [((4012, 4045), 'sparseml.pytorch.utils.get_prunable_layers', 'get_prunable_layers', (['self._module'], {}), '(self._module)\n', (4031, 4045), False, 'from sparseml.pytorch.utils import get_layer, get_prunable_layers\n'), ((6814, 6931), 'sparseml.optim.AnalyzedLayerDesc', 'AnalyzedLayerDesc', ([], {'name': 'mod._analyzed_layer_name', 'type_': 'mod.__class__.__name__', 'execution_order': 'self._call_count'}), '(name=mod._analyzed_layer_name, type_=mod.__class__.\n __name__, execution_order=self._call_count)\n', (6831, 6931), False, 'from sparseml.optim import AnalyzedLayerDesc\n'), ((16999, 17067), 'sparseml.optim.AnalyzedLayerDesc.merge_descs', 'AnalyzedLayerDesc.merge_descs', (['mod._analyzed_layer_desc', 'child_descs'], {}), '(mod._analyzed_layer_desc, child_descs)\n', (17028, 17067), False, 'from sparseml.optim import AnalyzedLayerDesc\n'), ((3533, 3562), 'sparseml.pytorch.utils.get_layer', 'get_layer', (['name', 'self._module'], {}), '(name, self._module)\n', (3542, 3562), False, 'from sparseml.pytorch.utils import get_layer, get_prunable_layers\n'), ((9080, 9108), 'numpy.prod', 'numpy.prod', (['out[0].shape[1:]'], {}), '(out[0].shape[1:])\n', (9090, 9108), False, 'import numpy\n'), ((10497, 10525), 'numpy.prod', 'numpy.prod', (['out[0].shape[1:]'], {}), '(out[0].shape[1:])\n', (10507, 10525), False, 'import numpy\n'), ((12940, 12968), 'numpy.prod', 'numpy.prod', (['out[0].shape[1:]'], {}), '(out[0].shape[1:])\n', (12950, 12968), False, 'import numpy\n'), ((14264, 14287), 'numpy.prod', 'numpy.prod', (['kernel_size'], {}), '(kernel_size)\n', (14274, 14287), False, 'import numpy\n'), ((14313, 14341), 'numpy.prod', 'numpy.prod', (['out[0].shape[1:]'], {}), '(out[0].shape[1:])\n', (14323, 14341), False, 'import numpy\n'), ((15450, 15478), 'numpy.prod', 'numpy.prod', (['out[0].shape[1:]'], {}), '(out[0].shape[1:])\n', (15460, 15478), False, 'import numpy\n'), ((8950, 8977), 'numpy.prod', 'numpy.prod', (['mod.kernel_size'], {}), '(mod.kernel_size)\n', (8960, 8977), False, 'import numpy\n'), ((11883, 11911), 'numpy.prod', 'numpy.prod', (['out[0].shape[1:]'], {}), '(out[0].shape[1:])\n', (11893, 11911), False, 'import numpy\n'), ((12883, 12910), 'numpy.prod', 'numpy.prod', (['mod.kernel_size'], {}), '(mod.kernel_size)\n', (12893, 12910), False, 'import numpy\n'), ((16466, 16494), 'numpy.prod', 'numpy.prod', (['out[0].shape[2:]'], {}), '(out[0].shape[2:])\n', (16476, 16494), False, 'import numpy\n')] |
###############################################################################
#
# Sale14: extinction model from Sale et al. 2014 2014MNRAS.443.2907S
#
###############################################################################
import os, os.path
import sys
import numpy
from scipy import interpolate
import asciitable
from mwdust.util.extCurves import aebv
from mwdust.util.tools import cos_sphere_dist
from mwdust.DustMap3D import DustMap3D
_DEGTORAD= numpy.pi/180.
_saledir= os.path.join(os.getenv('DUST_DIR'),'sale14')
_ERASESTR= " "
class Sale14(DustMap3D):
"""extinction model from Sale et al. 2014 2014MNRAS.443.2907S"""
def __init__(self,filter=None,sf10=True):
"""
NAME:
__init__
PURPOSE:
Initialize the Sale14 dust map
INPUT:
filter= filter to return the extinction in
sf10= (True) if True, use the Schlafly & Finkbeiner calibrations
OUTPUT:
object
HISTORY:
2015-03-08 - Started - Bovy (IAS)
"""
DustMap3D.__init__(self,filter=filter)
self._sf10= sf10
#Read the maps
sys.stdout.write('\r'+"Reading Sale et al. (2014) data file ...\r")
sys.stdout.flush()
self._saledata= asciitable.read(os.path.join(_saledir,
'Amap.dat'),
readme=os.path.join(_saledir,
'ReadMe'),
Reader=asciitable.cds.Cds,
guess=False,
fill_values=[('', '-999')])
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Some summaries
self._dl= self._saledata['lmax']-self._saledata['lmin']
self._db= self._saledata['b_max']-self._saledata['b_min']
self._lmin= numpy.amin(self._saledata['lmin'])
self._lmax= numpy.amax(self._saledata['lmax'])
self._bmin= numpy.amin(self._saledata['b_min'])
self._bmax= numpy.amax(self._saledata['b_max'])
self._ndistbin= 150
self._ds= numpy.linspace(0.05,14.95,self._ndistbin)
# For dust_vals
self._sintheta= numpy.sin((90.-self._saledata['GLAT'])*_DEGTORAD)
self._costheta= numpy.cos((90.-self._saledata['GLAT'])*_DEGTORAD)
self._sinphi= numpy.sin(self._saledata['GLON']*_DEGTORAD)
self._cosphi= numpy.cos(self._saledata['GLON']*_DEGTORAD)
self._intps= numpy.zeros(len(self._saledata),dtype='object') #array to cache interpolated extinctions
return None
def _evaluate(self,l,b,d,_lbIndx=None):
"""
NAME:
_evaluate
PURPOSE:
evaluate the dust-map
INPUT:
l- Galactic longitude (deg)
b- Galactic latitude (deg)
d- distance (kpc) can be array
OUTPUT:
extinction
HISTORY:
2015-03-08 - Started - Bovy (IAS)
"""
if isinstance(l,numpy.ndarray) or isinstance(b,numpy.ndarray):
raise NotImplementedError("array input for l and b for Sale et al. (2014) dust map not implemented")
if _lbIndx is None: lbIndx= self._lbIndx(l,b)
else: lbIndx= _lbIndx
if self._intps[lbIndx] != 0:
out= self._intps[lbIndx](d)
else:
tlbData= self.lbData(l,b)
interpData=\
interpolate.InterpolatedUnivariateSpline(self._ds,
tlbData['a0'],
k=1)
out= interpData(d)
self._intps[lbIndx]= interpData
if self._filter is None: # Sale et al. say A0/Aks = 11
return out/11./aebv('2MASS Ks',sf10=self._sf10)
else: # if sf10, first put ebv on SFD scale
return out/11./aebv('2MASS Ks',sf10=self._sf10)\
*aebv(self._filter,sf10=self._sf10)
def dust_vals_disk(self,lcen,bcen,dist,radius):
"""
NAME:
dust_vals_disk
PURPOSE:
return the distribution of extinction within a small disk as samples
INPUT:
lcen, bcen - Galactic longitude and latitude of the center of the disk (deg)
dist - distance in kpc
radius - radius of the disk (deg)
OUTPUT:
(pixarea,extinction) - arrays of pixel-area in sq rad and extinction value
HISTORY:
2015-03-07 - Written - Bovy (IAS)
"""
# Find all of the (l,b) of the pixels within radius of (lcen,bcen)
indx= cos_sphere_dist(self._sintheta,self._costheta,
self._sinphi,self._cosphi,
numpy.sin((90.-bcen)*_DEGTORAD),
numpy.cos((90.-bcen)*_DEGTORAD),
numpy.sin(lcen*_DEGTORAD),
numpy.cos(lcen*_DEGTORAD)) \
>= numpy.cos(radius*_DEGTORAD)
ll= self._saledata['GLON'][indx]
bb= self._saledata['GLAT'][indx]
# Now get the extinctions for these pixels
pixarea= []
extinction= []
for l,b in zip(ll,bb):
lbIndx= self._lbIndx(l,b)
extinction.append(self._evaluate(l,b,dist,_lbIndx=lbIndx))
pixarea.append(self._dl[lbIndx]*self._db[lbIndx]*_DEGTORAD**2.)
pixarea= numpy.array(pixarea)
extinction= numpy.array(extinction)
return (pixarea,extinction)
def dmax(self,l,b):
"""
NAME:
dmax
PURPOSE:
return the maximum distance for which to trust the Sale et al. (2014) data
INPUT:
l- Galactic longitude (deg)
b- Galactic latitude (deg)
OUTPUT:
maximum distance in kpc
HISTORY:
2015-03-08 - Written - Bovy (IAS)
"""
lbIndx= self._lbIndx(l,b)
return self._saledata['trust'][lbIndx]/1000.
def lbData(self,l,b):
"""
NAME:
lbData
PURPOSE:
return the Sale et al. (2014) data corresponding to a given
line of sight
INPUT:
l- Galactic longitude (deg)
b- Galactic latitude (deg)
OUTPUT:
HISTORY:
2015-03-08 - Written - Bovy (IAS)
"""
#Find correct entry
lbIndx= self._lbIndx(l,b)
#Build output array
out= numpy.recarray((self._ndistbin,),
dtype=[('a0', 'f8'),
('e_a0','f8')])
for ii in range(self._ndistbin):
out[ii]['a0']= self._saledata[lbIndx]['meanA%i' % (ii+1)]
out[ii]['e_a0']= self._saledata[lbIndx]['meanA%i' % (ii+1)]
return out
def _lbIndx(self,l,b):
"""Return the index in the _saledata array corresponding to this (l,b)"""
if l <= self._lmin or l >= self._lmax \
or b <= self._bmin or b >= self._bmax:
raise IndexError("Given (l,b) pair not within the region covered by the Sale et al. (2014) dust map")
return numpy.argmin((l-self._saledata['GLON'])**2./self._dl**2.\
+(b-self._saledata['GLAT'])**2./self._db**2.)
| [
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.amin",
"os.getenv",
"mwdust.util.extCurves.aebv",
"os.path.join",
"numpy.array",
"numpy.linspace",
"mwdust.DustMap3D.DustMap3D.__init__",
"numpy.cos",
"numpy.recarray",
"numpy.argmin",
"numpy.sin",
"sys.stdout.flush",
"numpy.amax",
... | [((498, 519), 'os.getenv', 'os.getenv', (['"""DUST_DIR"""'], {}), "('DUST_DIR')\n", (507, 519), False, 'import os, os.path\n'), ((1130, 1169), 'mwdust.DustMap3D.DustMap3D.__init__', 'DustMap3D.__init__', (['self'], {'filter': 'filter'}), '(self, filter=filter)\n', (1148, 1169), False, 'from mwdust.DustMap3D import DustMap3D\n'), ((1225, 1294), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + 'Reading Sale et al. (2014) data file ...\\r')"], {}), "('\\r' + 'Reading Sale et al. (2014) data file ...\\r')\n", (1241, 1294), False, 'import sys\n'), ((1301, 1319), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1317, 1319), False, 'import sys\n'), ((1786, 1827), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + _ERASESTR + '\\r')"], {}), "('\\r' + _ERASESTR + '\\r')\n", (1802, 1827), False, 'import sys\n'), ((1832, 1850), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1848, 1850), False, 'import sys\n'), ((2026, 2060), 'numpy.amin', 'numpy.amin', (["self._saledata['lmin']"], {}), "(self._saledata['lmin'])\n", (2036, 2060), False, 'import numpy\n'), ((2081, 2115), 'numpy.amax', 'numpy.amax', (["self._saledata['lmax']"], {}), "(self._saledata['lmax'])\n", (2091, 2115), False, 'import numpy\n'), ((2136, 2171), 'numpy.amin', 'numpy.amin', (["self._saledata['b_min']"], {}), "(self._saledata['b_min'])\n", (2146, 2171), False, 'import numpy\n'), ((2192, 2227), 'numpy.amax', 'numpy.amax', (["self._saledata['b_max']"], {}), "(self._saledata['b_max'])\n", (2202, 2227), False, 'import numpy\n'), ((2274, 2317), 'numpy.linspace', 'numpy.linspace', (['(0.05)', '(14.95)', 'self._ndistbin'], {}), '(0.05, 14.95, self._ndistbin)\n', (2288, 2317), False, 'import numpy\n'), ((2364, 2418), 'numpy.sin', 'numpy.sin', (["((90.0 - self._saledata['GLAT']) * _DEGTORAD)"], {}), "((90.0 - self._saledata['GLAT']) * _DEGTORAD)\n", (2373, 2418), False, 'import numpy\n'), ((2438, 2492), 'numpy.cos', 'numpy.cos', (["((90.0 - self._saledata['GLAT']) * _DEGTORAD)"], {}), "((90.0 - self._saledata['GLAT']) * _DEGTORAD)\n", (2447, 2492), False, 'import numpy\n'), ((2510, 2555), 'numpy.sin', 'numpy.sin', (["(self._saledata['GLON'] * _DEGTORAD)"], {}), "(self._saledata['GLON'] * _DEGTORAD)\n", (2519, 2555), False, 'import numpy\n'), ((2576, 2621), 'numpy.cos', 'numpy.cos', (["(self._saledata['GLON'] * _DEGTORAD)"], {}), "(self._saledata['GLON'] * _DEGTORAD)\n", (2585, 2621), False, 'import numpy\n'), ((5589, 5609), 'numpy.array', 'numpy.array', (['pixarea'], {}), '(pixarea)\n', (5600, 5609), False, 'import numpy\n'), ((5630, 5653), 'numpy.array', 'numpy.array', (['extinction'], {}), '(extinction)\n', (5641, 5653), False, 'import numpy\n'), ((6648, 6719), 'numpy.recarray', 'numpy.recarray', (['(self._ndistbin,)'], {'dtype': "[('a0', 'f8'), ('e_a0', 'f8')]"}), "((self._ndistbin,), dtype=[('a0', 'f8'), ('e_a0', 'f8')])\n", (6662, 6719), False, 'import numpy\n'), ((7326, 7453), 'numpy.argmin', 'numpy.argmin', (["((l - self._saledata['GLON']) ** 2.0 / self._dl ** 2.0 + (b - self.\n _saledata['GLAT']) ** 2.0 / self._db ** 2.0)"], {}), "((l - self._saledata['GLON']) ** 2.0 / self._dl ** 2.0 + (b -\n self._saledata['GLAT']) ** 2.0 / self._db ** 2.0)\n", (7338, 7453), False, 'import numpy\n'), ((1360, 1394), 'os.path.join', 'os.path.join', (['_saledir', '"""Amap.dat"""'], {}), "(_saledir, 'Amap.dat')\n", (1372, 1394), False, 'import os, os.path\n'), ((3576, 3646), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interpolate.InterpolatedUnivariateSpline', (['self._ds', "tlbData['a0']"], {'k': '(1)'}), "(self._ds, tlbData['a0'], k=1)\n", (3616, 3646), False, 'from scipy import interpolate\n'), ((5152, 5181), 'numpy.cos', 'numpy.cos', (['(radius * _DEGTORAD)'], {}), '(radius * _DEGTORAD)\n', (5161, 5181), False, 'import numpy\n'), ((1496, 1528), 'os.path.join', 'os.path.join', (['_saledir', '"""ReadMe"""'], {}), "(_saledir, 'ReadMe')\n", (1508, 1528), False, 'import os, os.path\n'), ((3926, 3959), 'mwdust.util.extCurves.aebv', 'aebv', (['"""2MASS Ks"""'], {'sf10': 'self._sf10'}), "('2MASS Ks', sf10=self._sf10)\n", (3930, 3959), False, 'from mwdust.util.extCurves import aebv\n'), ((4089, 4124), 'mwdust.util.extCurves.aebv', 'aebv', (['self._filter'], {'sf10': 'self._sf10'}), '(self._filter, sf10=self._sf10)\n', (4093, 4124), False, 'from mwdust.util.extCurves import aebv\n'), ((4907, 4943), 'numpy.sin', 'numpy.sin', (['((90.0 - bcen) * _DEGTORAD)'], {}), '((90.0 - bcen) * _DEGTORAD)\n', (4916, 4943), False, 'import numpy\n'), ((4970, 5006), 'numpy.cos', 'numpy.cos', (['((90.0 - bcen) * _DEGTORAD)'], {}), '((90.0 - bcen) * _DEGTORAD)\n', (4979, 5006), False, 'import numpy\n'), ((5033, 5060), 'numpy.sin', 'numpy.sin', (['(lcen * _DEGTORAD)'], {}), '(lcen * _DEGTORAD)\n', (5042, 5060), False, 'import numpy\n'), ((5090, 5117), 'numpy.cos', 'numpy.cos', (['(lcen * _DEGTORAD)'], {}), '(lcen * _DEGTORAD)\n', (5099, 5117), False, 'import numpy\n'), ((4038, 4071), 'mwdust.util.extCurves.aebv', 'aebv', (['"""2MASS Ks"""'], {'sf10': 'self._sf10'}), "('2MASS Ks', sf10=self._sf10)\n", (4042, 4071), False, 'from mwdust.util.extCurves import aebv\n')] |
"""
In this file, we implement the basic pipeline for tasti
Method will be:
1. Extract features using vgg16
2. Cluster based on FPF (furthest point first) algorithm
3. Use KNN-neighbors on all other datapoints to perform label propagation
"""
from torchvision import models
import random
from sklearn.neighbors import KNeighborsClassifier
import cv2
import numpy as np
import torch
from sklearn.metrics import pairwise_distances
class Tasti:
def __init__(self, num_clusters = 100, k = 1):
self.vgg16 = models.vgg16(pretrained = True).cuda() ## it is cuda by default
self.knn = KNeighborsClassifier(n_neighbors = k)
self.num_clusters = num_clusters
self.batch_size = 10
def resize_input(self, X):
## we need this to be 300x300 for vgg16
X_new = []
for i in range(len(X)):
X_new.append(cv2.resize(X[i], (300,300)))
return np.stack(X_new, axis = 0)
def preprocess_input(self, X):
"""
We will have to resize the input images,
then convert to tensor (set device, permute to channel first)
so that we can use vgg16
:param X:
:return:
"""
X = self.resize_input(X)
X = torch.tensor(X, device = 'cpu').float()
X = X.permute(0,3,1,2)
return X
def run_vgg(self, X):
X = self.preprocess_input(X)
results = []
for i in range(0, len(X), self.batch_size):
batch = X[i:i+self.batch_size].cuda()
result = self.vgg16.features(batch)
result = result.detach().permute(0,2,3,1).cpu().numpy()
results.append(result)
results = np.vstack(results)
return results
def run(self, X, num_clusters = None):
"""
:param X: original data
:return: indices of the key frames w.r.t the original data array, cluster label for every frame
"""
## convert X to tensor,
x_features = self.run_vgg(X)
### need to flatten the x_features to 2d
n, h, w, channels = x_features.shape
x_features = x_features.reshape(n, h*w*channels)
if num_clusters is not None:
self.num_clusters = num_clusters
points, indices = self.run_fpf(x_features, self.num_clusters)
labels, mappings = self.run_neighbor_search(x_features, indices)
return indices, labels, mappings
def run_neighbor_search(self, points, key_frame_indices):
"""
Determine the nearest neighbor among the key frames for every point
We return the labels as the indices of the key frames in the original array (array with all the points)
:param points:
:param key_frame_indices:
:return:
"""
distance_matrix = pairwise_distances(points)
labels = [] ## this is labels respect to the original images
mappings = [] ## this is labels respect to the sampled images
for i in range(len(points)):
my_distances = distance_matrix[i]
my_key_distances = my_distances[key_frame_indices]
my_choice_index = np.argmin(my_key_distances)
my_choice_index_prop = key_frame_indices[my_choice_index]
labels.append(my_choice_index_prop)
mappings.append(my_choice_index)
return np.array(labels), np.array(mappings)
def run_fpf(self, points, k):
"""
Implementation of fpf algorithm
:param points: data
:param k: number of examples to choose
:return: selected point values, and their corresponding indices
"""
solution_set_indices = []
rand_int = random.randint(0, len(points))
solution_set_indices.append(rand_int)
distance_matrix = pairwise_distances(points)
for _ in range(k - 1):
relevant_distance_arrays = []
for solution_index in solution_set_indices:
relevant_distance_arrays.append(distance_matrix[solution_index])
## we find the minimum distances
relevant_distance_arrays = np.array(relevant_distance_arrays)
updated_distances = relevant_distance_arrays.min(axis=0)
#### we find the index of the maximum value and append to solution_set
solution_set_indices.append(np.argmax(updated_distances))
solution_set_indices = np.array(solution_set_indices)
return points[solution_set_indices], solution_set_indices
| [
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.pairwise_distances",
"numpy.argmax",
"numpy.stack",
"numpy.array",
"torch.tensor",
"numpy.vstack",
"numpy.argmin",
"cv2.resize",
"torchvision.models.vgg16"
] | [((603, 638), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (623, 638), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((913, 936), 'numpy.stack', 'np.stack', (['X_new'], {'axis': '(0)'}), '(X_new, axis=0)\n', (921, 936), True, 'import numpy as np\n'), ((1676, 1694), 'numpy.vstack', 'np.vstack', (['results'], {}), '(results)\n', (1685, 1694), True, 'import numpy as np\n'), ((2787, 2813), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['points'], {}), '(points)\n', (2805, 2813), False, 'from sklearn.metrics import pairwise_distances\n'), ((3775, 3801), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['points'], {}), '(points)\n', (3793, 3801), False, 'from sklearn.metrics import pairwise_distances\n'), ((4385, 4415), 'numpy.array', 'np.array', (['solution_set_indices'], {}), '(solution_set_indices)\n', (4393, 4415), True, 'import numpy as np\n'), ((3129, 3156), 'numpy.argmin', 'np.argmin', (['my_key_distances'], {}), '(my_key_distances)\n', (3138, 3156), True, 'import numpy as np\n'), ((3335, 3351), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3343, 3351), True, 'import numpy as np\n'), ((3353, 3371), 'numpy.array', 'np.array', (['mappings'], {}), '(mappings)\n', (3361, 3371), True, 'import numpy as np\n'), ((4096, 4130), 'numpy.array', 'np.array', (['relevant_distance_arrays'], {}), '(relevant_distance_arrays)\n', (4104, 4130), True, 'import numpy as np\n'), ((520, 549), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (532, 549), False, 'from torchvision import models\n'), ((868, 896), 'cv2.resize', 'cv2.resize', (['X[i]', '(300, 300)'], {}), '(X[i], (300, 300))\n', (878, 896), False, 'import cv2\n'), ((1231, 1260), 'torch.tensor', 'torch.tensor', (['X'], {'device': '"""cpu"""'}), "(X, device='cpu')\n", (1243, 1260), False, 'import torch\n'), ((4323, 4351), 'numpy.argmax', 'np.argmax', (['updated_distances'], {}), '(updated_distances)\n', (4332, 4351), True, 'import numpy as np\n')] |
"""
To utilize the full capability of SEAS,
the user will need to generate their own cross section database.
Run this code to generate.
rewriting the old cross section generation methods to directly output into hdf5
Also clean up all old code with relation to cross section generation
Citation for Hapi:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, HITRAN Application Programming Interface (HAPI): A comprehensive approach to working with spectroscopic data, J. Quant. Spectrosc. Radiat. Transfer 177, 15-30 (2016) [Link to article].
hdf5+gzip can compress cross section data as compared to raw npy outputs.
can further compress the data if less significant digits are used.
But how much is enough? certainly we don't need 10 digits but is 3 or 4 good enough?
Note that cross sections is usually exponentially increasing/decreasing
Take the log10 of the data further shrinks the datasize
log10 + 4 digit significantly shrinks the datasize.
This approximation will yield a ~1% error on the cross section,
which is small compare to the errorbar on the data and errorbar of the abundance retrieved ... but for JWST?
further shrink using gzip compression with compressor_opt set to 9
--- What resolution /step should be chosen?
the difference between resolution is significant and higher compare to rounding error
but once it's binned down they're the same. So there's no benefit for going ultra-high resolution.
Just enough for what you're doing.
For JWST ~ 1000 dp, the xsec has ~10000 is fine enough.
which is why the wn_bin is different for each wavelength
wn_bin = [[200,400,0.1],[400,2000,0.4],[2000,10000,2],[10000,30000,10]]
since JWST follows by wavelength and xsec is initially by wavenumber
hitran nu is different from np.arange(numin,numax,step).
For standarization, normalize to np.arange().
This doesn't matter much for low res using JWST.
Known issue: using higher resolution hitran nu is not the same as np.arange(numin,numax,step).
The Hierarchical Data Format version 5 (HDF5), is an open source file format that supports large, complex, heterogeneous data.
https://www.neonscience.org/about-hdf5
The use of hdf5 is for its balance between space, read time and compatibility
"""
import os
import sys
import h5py
from tqdm import trange
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
import SEAS_Utils.Common_Utils.configurable as config
import SEAS_Main.Cross_Section.Cross_Section_Calculator as csc
from SEAS_Main.Cross_Section.HITRAN_Match import HITRAN_Match
def inspect_generated_database(input_file,
tablename="results"):
input = h5py.File(input_file, "r")
input_data = input[tablename]
print(input_data.shape)
def generate_cross_section_molecule(molecule,
d_path = "",r_path="",
T_Grid = [200.,300.,400.],
P_Grid = [1.,0.1,0.01],
wn_bin = [[400.,2000.,0.4],[2000.,10000.,2],[10000.,30000.,5]],
SL_Flag=False):
"""
generate cross section for a given molecule
Parameters
----------
molecule : str
Name of the molecule. Has to be one which HITRAN recognize
d_path : str
filepath for where the linelist data will be stored
r_path : str
filepath for where the calculated cross section will be stored
P : array
Pressure [bar]
T : array
Temperature [K]
wn_bin : list of lists
list of wavenumber bins. Each sublist contains [numin, numax, step]
SL_Flag : bool
Flag for whether the downloaded hitran linelist will be saved or not.
Returns
-------
"""
try:
component = [HITRAN_Match[molecule],1,1]
except:
print("molecule: %s not recognized, not in HITRAN?"%molecule)
#nu_ = np.concatenate([np.arange(x[0],x[1],x[2]) for x in wn_bin])
# np.zeros maybe more adequate?
xsec_grid = [[[] for y in range(len(P_Grid))] for x in range(len(T_Grid))]
#sys.stdout = open('redirect.txt', 'w')
for i in trange(len(wn_bin), desc='wn bin', leave=True):
numin,numax,step = wn_bin[i]
nu_std = np.arange(numin,numax,step)
data = [[[] for y in range(len(P_Grid))] for x in range(len(T_Grid))]
try:
xsec_calc = csc.cross_section_calculator(d_path,molecule,component,numin,numax,step)
for i in trange(len(T_Grid),desc="Temperature"):
T = T_Grid[i]
for j in trange(len(P_Grid),desc="Pressure "):
P = P_Grid[j]
#print("Generated %s xsec from %s to %s at T: %s and P: %s"%(molecule,numin,numax,P,T))
nu,sigma = xsec_calc.hapi_calculator(P,T) # note that P for hapi_calculator is in atm unit.
data[i][j] = sigma
if not SL_Flag:
os.remove(os.path.join(d_path,"%s.header"%molecule))
os.remove(os.path.join(d_path,"%s.data"%molecule))
except Exception as e:
print("Missing HITRAN Data for %s from %s to %s"%(molecule,numin,numax))
print(e)
for i in trange(len(T_Grid),desc="Temperature"):
T = T_Grid[i]
for j in trange(len(P_Grid),desc="Pressure "):
P = P_Grid[j]
data[i][j] = np.zeros(len(nu_std))
except:
print("unknown error, terminate generation")
sys.exit()
xsec_grid = np.concatenate([xsec_grid,data],2)
#[float("%.4g"%x) for x in np.log10(data["results"][j][i])]
hdf5_store = h5py.File(os.path.join(r_path,"nu.hdf5"), "w")
hdf5_store.create_dataset("results", data=nu,compression="gzip",compression_opts=9)
hdf5_store.close()
hdf5_store = h5py.File(os.path.join(r_path,"%s.hdf5"%molecule), "w")
hdf5_store.create_dataset("results", data=xsec_grid,compression="gzip",compression_opts=9)
hdf5_store.close()
if __name__ == "__main__":
# Test Execution. Always advised to run some test first before execution to avoid losing hours of progress
#generate_cross_section_molecule("CO2")
inspect_generated_database("CO2.hdf5")
sys.exit()
molecule = "HNO3"
# generate cross section database that will can be used by SEAS
d_path = "../../SEAS_Input/Line_List/HITRAN_Line_List/%s"%molecule
r_path = "../../SEAS_Input/Cross_Section/HDF5_New"
# calculate T_Grid and P_Grid
T_Grid = csc.calculate_temperature_layers(T_Min=100, T_Max=800, Step=25)
P_Grid = csc.calculate_pressure_layers(P_surface = 1e5, P_Cutoff = 1e-5)/101300
# load predefined T_Grid and P_Grid
"""
user_input = config.Configuration("../../config/user_input.cfg")
T_Grid = np.array(user_input["Xsec"]["Molecule"]["T_Grid"],dtype=float)
P_Grid = np.array(user_input["Xsec"]["Molecule"]["P_Grid"],dtype=float)/101300
"""
#generate_cross_section_molecule("HNO3",d_path,r_path,T_Grid,P_Grid)
# wn_bin for new database. Need to fix compatibility issues before this can be used
# wn_bin = [[200,400,0.1],[400,2000,0.4],[2000,10000,2],[10000,30000,10]]
| [
"numpy.arange",
"SEAS_Main.Cross_Section.Cross_Section_Calculator.calculate_pressure_layers",
"os.path.join",
"h5py.File",
"os.path.dirname",
"SEAS_Main.Cross_Section.Cross_Section_Calculator.cross_section_calculator",
"numpy.concatenate",
"sys.exit",
"warnings.filterwarnings",
"SEAS_Main.Cross_Se... | [((2350, 2383), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2373, 2383), False, 'import warnings\n'), ((2407, 2432), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2422, 2432), False, 'import os\n'), ((2453, 2479), 'os.path.join', 'os.path.join', (['DIR', '"""../.."""'], {}), "(DIR, '../..')\n", (2465, 2479), False, 'import os\n'), ((2776, 2802), 'h5py.File', 'h5py.File', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (2785, 2802), False, 'import h5py\n'), ((6599, 6609), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6607, 6609), False, 'import sys\n'), ((6888, 6951), 'SEAS_Main.Cross_Section.Cross_Section_Calculator.calculate_temperature_layers', 'csc.calculate_temperature_layers', ([], {'T_Min': '(100)', 'T_Max': '(800)', 'Step': '(25)'}), '(T_Min=100, T_Max=800, Step=25)\n', (6920, 6951), True, 'import SEAS_Main.Cross_Section.Cross_Section_Calculator as csc\n'), ((4461, 4490), 'numpy.arange', 'np.arange', (['numin', 'numax', 'step'], {}), '(numin, numax, step)\n', (4470, 4490), True, 'import numpy as np\n'), ((5844, 5880), 'numpy.concatenate', 'np.concatenate', (['[xsec_grid, data]', '(2)'], {}), '([xsec_grid, data], 2)\n', (5858, 5880), True, 'import numpy as np\n'), ((6001, 6032), 'os.path.join', 'os.path.join', (['r_path', '"""nu.hdf5"""'], {}), "(r_path, 'nu.hdf5')\n", (6013, 6032), False, 'import os\n'), ((6181, 6223), 'os.path.join', 'os.path.join', (['r_path', "('%s.hdf5' % molecule)"], {}), "(r_path, '%s.hdf5' % molecule)\n", (6193, 6223), False, 'import os\n'), ((6965, 7030), 'SEAS_Main.Cross_Section.Cross_Section_Calculator.calculate_pressure_layers', 'csc.calculate_pressure_layers', ([], {'P_surface': '(100000.0)', 'P_Cutoff': '(1e-05)'}), '(P_surface=100000.0, P_Cutoff=1e-05)\n', (6994, 7030), True, 'import SEAS_Main.Cross_Section.Cross_Section_Calculator as csc\n'), ((4604, 4681), 'SEAS_Main.Cross_Section.Cross_Section_Calculator.cross_section_calculator', 'csc.cross_section_calculator', (['d_path', 'molecule', 'component', 'numin', 'numax', 'step'], {}), '(d_path, molecule, component, numin, numax, step)\n', (4632, 4681), True, 'import SEAS_Main.Cross_Section.Cross_Section_Calculator as csc\n'), ((5803, 5813), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5811, 5813), False, 'import sys\n'), ((5201, 5245), 'os.path.join', 'os.path.join', (['d_path', "('%s.header' % molecule)"], {}), "(d_path, '%s.header' % molecule)\n", (5213, 5245), False, 'import os\n'), ((5270, 5312), 'os.path.join', 'os.path.join', (['d_path', "('%s.data' % molecule)"], {}), "(d_path, '%s.data' % molecule)\n", (5282, 5312), False, 'import os\n')] |
import os
import sys
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
class ABF2(swhlab.ABF):
def phasicTonic(self,m1=None,m2=None,chunkMs=50,quietPercentile=10,
histResolution=.5,plotToo=False):
"""
let's keep the chunkMs as high as we reasonably can. 50ms is good.
Things get flakey at lower numbers like 10ms.
IMPORTANT! for this to work, prevent 0s from averaging in, so keep
bin sizes well above the data resolution.
"""
# prepare sectioning values to be used later
m1=0 if m1 is None else m1*self.pointsPerSec
m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec
m1,m2=int(m1),int(m2)
# prepare histogram values to be used later
padding=200 # pA or mV of maximum expected deviation
chunkPoints=int(chunkMs*self.pointsPerMs)
histBins=int((padding*2)/histResolution)
# center the data at 0 using peak histogram, not the mean
Y=self.sweepY[m1:m2]
hist,bins=np.histogram(Y,bins=2*padding)
Yoffset=bins[np.where(hist==max(hist))[0][0]]
Y=Y-Yoffset # we don't have to, but PDF math is easier
# calculate all histogram
nChunks=int(len(Y)/chunkPoints)
hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))
hist=hist/len(Y) # count as a fraction of total
Xs=bins[1:]
# get baseline data from chunks with smallest variance
chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
blData=chunks[np.where(percentiles<=quietPercentile)[0]].flatten()
# generate the standard curve and pull it to the histogram height
sigma=np.sqrt(np.var(blData))
center=np.average(blData)+histResolution/2
blCurve=mlab.normpdf(Xs,center,sigma)
blCurve=blCurve*max(hist)/max(blCurve)
# determine the phasic current by subtracting-out the baseline
#diff=hist-blCurve
diff=hist
IGNORE_DISTANCE=5 # KEEP THIS FIXED, NOT A FUNCTION OF VARIANCE
ignrCenter=len(Xs)/2
ignrPad=IGNORE_DISTANCE/histResolution
ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)
diff[ignr1:ignt2]=0
# optionally graph all this
if plotToo:
plt.figure(figsize=(15,5))
plt.plot(Y)
plt.figure(figsize=(7,7))
ax1=plt.subplot(211)
plt.title(abf.ID+" phasic analysis")
plt.ylabel("fraction")
plt.plot(Xs,hist,'-',alpha=.8,color='b',lw=3)
plt.plot(Xs,blCurve,lw=3,alpha=.5,color='r')
plt.margins(0,.1)
plt.subplot(212,sharex=ax1)
plt.title("baseline subtracted")
plt.ylabel("fraction")
plt.xlabel("data points (%s)"%abf.units)
plt.plot(Xs,diff,'-',alpha=.8,color='b',lw=3)
plt.axhline(0,lw=3,alpha=.5,color='r')
plt.axvline(0,lw=3,alpha=.5,color='k')
plt.margins(0,.1)
plt.axis([-50,50,None,None])
plt.tight_layout()
plt.show()
print(np.sum(np.split(diff,2),1))
return diff/len(Y)*abf.pointsPerSec # charge/sec
if __name__=="__main__":
#abfPath=r"X:\Data\2P01\2016\2016-09-01 PIR TGOT"
abfPath=r"C:\Users\scott\Documents\important\demodata"
abf=ABF2(os.path.join(abfPath,"16d16007.abf"))
histPoints=len(abf.phasicTonic(.75))
nSweeps=25
plt.figure(figsize=(10,5))
plt.grid()
for title,sweep1 in [["baseline",200],["baseline",240],["baseline",350]]:
hists=np.zeros((nSweeps,histPoints))
for i in range(nSweeps):
sweep=sweep1+i
abf.setsweep(sweep)
hists[i]=abf.phasicTonic(.75)
AV=np.average(hists,axis=0)
plt.plot(AV,lw=5,alpha=.5,label=title)
plt.legend()
# for sweep in abf.setsweeps():
# phasic=abf.phasicTonic(.75)
# neg[sweep],pos[sweep]=np.sum(np.split(phasic,2),1)
#
# plt.plot(pos,'.',color='b',alpha=.5)
# plt.plot(swhlab.common.lowpass(pos),'-',color='b',alpha=.5,lw=5,label="upward")
# plt.plot(neg,'.',color='r',alpha=.5)
# plt.plot(swhlab.common.lowpass(neg),'-',color='r',alpha=.5,lw=5,label="downward")
# for sweep in abf.comment_sweeps:
# plt.axvline(sweep,lw=5,alpha=.5,color='g',ls='--')
# plt.axhline(0,color='k',lw=3,alpha=.5)
plt.xlabel("sweep number")
plt.ylabel("ms * pA / sec")
# plt.legend(loc='upper left',shadow=True)
plt.show()
print("DONE") | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"sys.path.append",
"matplotlib.pyplot.margins",
"numpy.histogram",
"matplotlib.mlab.normpdf",
"numpy.reshape",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axis",
"nu... | [((21, 52), 'sys.path.append', 'sys.path.append', (['"""../../../../"""'], {}), "('../../../../')\n", (36, 52), False, 'import sys\n'), ((3878, 3905), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (3888, 3905), True, 'import matplotlib.pyplot as plt\n'), ((3909, 3919), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3917, 3919), True, 'import matplotlib.pyplot as plt\n'), ((4264, 4276), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4274, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4826, 4852), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sweep number"""'], {}), "('sweep number')\n", (4836, 4852), True, 'import matplotlib.pyplot as plt\n'), ((4857, 4884), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ms * pA / sec"""'], {}), "('ms * pA / sec')\n", (4867, 4884), True, 'import matplotlib.pyplot as plt\n'), ((4935, 4945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4943, 4945), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1167), 'numpy.histogram', 'np.histogram', (['Y'], {'bins': '(2 * padding)'}), '(Y, bins=2 * padding)\n', (1146, 1167), True, 'import numpy as np\n'), ((1383, 1440), 'numpy.histogram', 'np.histogram', (['Y'], {'bins': 'histBins', 'range': '(-padding, padding)'}), '(Y, bins=histBins, range=(-padding, padding))\n', (1395, 1440), True, 'import numpy as np\n'), ((1593, 1654), 'numpy.reshape', 'np.reshape', (['Y[:nChunks * chunkPoints]', '(nChunks, chunkPoints)'], {}), '(Y[:nChunks * chunkPoints], (nChunks, chunkPoints))\n', (1603, 1654), True, 'import numpy as np\n'), ((1669, 1691), 'numpy.var', 'np.var', (['chunks'], {'axis': '(1)'}), '(chunks, axis=1)\n', (1675, 1691), True, 'import numpy as np\n'), ((2127, 2158), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['Xs', 'center', 'sigma'], {}), '(Xs, center, sigma)\n', (2139, 2158), True, 'import matplotlib.mlab as mlab\n'), ((3767, 3804), 'os.path.join', 'os.path.join', (['abfPath', '"""16d16007.abf"""'], {}), "(abfPath, '16d16007.abf')\n", (3779, 3804), False, 'import os\n'), ((4012, 4043), 'numpy.zeros', 'np.zeros', (['(nSweeps, histPoints)'], {}), '((nSweeps, histPoints))\n', (4020, 4043), True, 'import numpy as np\n'), ((4188, 4213), 'numpy.average', 'np.average', (['hists'], {'axis': '(0)'}), '(hists, axis=0)\n', (4198, 4213), True, 'import numpy as np\n'), ((4221, 4263), 'matplotlib.pyplot.plot', 'plt.plot', (['AV'], {'lw': '(5)', 'alpha': '(0.5)', 'label': 'title'}), '(AV, lw=5, alpha=0.5, label=title)\n', (4229, 4263), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2058), 'numpy.var', 'np.var', (['blData'], {}), '(blData)\n', (2050, 2058), True, 'import numpy as np\n'), ((2075, 2093), 'numpy.average', 'np.average', (['blData'], {}), '(blData)\n', (2085, 2093), True, 'import numpy as np\n'), ((2674, 2701), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (2684, 2701), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2724), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {}), '(Y)\n', (2721, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2737, 2763), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (2747, 2763), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2795), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2790, 2795), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2846), 'matplotlib.pyplot.title', 'plt.title', (["(abf.ID + ' phasic analysis')"], {}), "(abf.ID + ' phasic analysis')\n", (2817, 2846), True, 'import matplotlib.pyplot as plt\n'), ((2857, 2879), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fraction"""'], {}), "('fraction')\n", (2867, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2943), 'matplotlib.pyplot.plot', 'plt.plot', (['Xs', 'hist', '"""-"""'], {'alpha': '(0.8)', 'color': '"""b"""', 'lw': '(3)'}), "(Xs, hist, '-', alpha=0.8, color='b', lw=3)\n", (2900, 2943), True, 'import matplotlib.pyplot as plt\n'), ((2950, 2999), 'matplotlib.pyplot.plot', 'plt.plot', (['Xs', 'blCurve'], {'lw': '(3)', 'alpha': '(0.5)', 'color': '"""r"""'}), "(Xs, blCurve, lw=3, alpha=0.5, color='r')\n", (2958, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3007, 3026), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (3018, 3026), True, 'import matplotlib.pyplot as plt\n'), ((3037, 3065), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (3048, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3077, 3109), 'matplotlib.pyplot.title', 'plt.title', (['"""baseline subtracted"""'], {}), "('baseline subtracted')\n", (3086, 3109), True, 'import matplotlib.pyplot as plt\n'), ((3122, 3144), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fraction"""'], {}), "('fraction')\n", (3132, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('data points (%s)' % abf.units)"], {}), "('data points (%s)' % abf.units)\n", (3167, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3261), 'matplotlib.pyplot.plot', 'plt.plot', (['Xs', 'diff', '"""-"""'], {'alpha': '(0.8)', 'color': '"""b"""', 'lw': '(3)'}), "(Xs, diff, '-', alpha=0.8, color='b', lw=3)\n", (3218, 3261), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3310), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'lw': '(3)', 'alpha': '(0.5)', 'color': '"""r"""'}), "(0, lw=3, alpha=0.5, color='r')\n", (3279, 3310), True, 'import matplotlib.pyplot as plt\n'), ((3319, 3361), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'lw': '(3)', 'alpha': '(0.5)', 'color': '"""k"""'}), "(0, lw=3, alpha=0.5, color='k')\n", (3330, 3361), True, 'import matplotlib.pyplot as plt\n'), ((3370, 3389), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (3381, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3431), 'matplotlib.pyplot.axis', 'plt.axis', (['[-50, 50, None, None]'], {}), '([-50, 50, None, None])\n', (3408, 3431), True, 'import matplotlib.pyplot as plt\n'), ((3441, 3459), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3457, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3472, 3482), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3480, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3521, 3538), 'numpy.split', 'np.split', (['diff', '(2)'], {}), '(diff, 2)\n', (3529, 3538), True, 'import numpy as np\n'), ((1886, 1926), 'numpy.where', 'np.where', (['(percentiles <= quietPercentile)'], {}), '(percentiles <= quietPercentile)\n', (1894, 1926), True, 'import numpy as np\n')] |
from PIL import Image
import cv2
import numpy as np
from PySide2.QtGui import QImage
# https://github.com/Mugichoko445/Fast-Digital-Image-Inpainting/blob/master/sources/FastDigitalImageInpainting.hpp
def convertPIL2CV(img:Image):
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
def convertCV2PIL(img)->Image:
return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def convertQImageToCV2(img):
"""Converts a QImage into CV2 format."""
img = img.convertToFormat(QImage.Format_RGB32)
width = img.width()
height = img.height()
ptr = img.bits()
arr = np.array(ptr).reshape(height, width, 4)
return arr
_a = 0.073235
_b = 0.176765
_K = np.array([[_a, _b, _a,
_b, 0, _b,
_a, _b, _a]])
def fastInpaint(src, mask=None, kernel=None, maxIter=100):
if not kernel:
kernel = _K
# Make mask BGR
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# Fill masked region with average color
avgColor = cv2.sumElems(src) // (np.prod(src.shape[:2]))
avgColorMat = np.ones((1,1,3))
avgColorMat[0,0] = np.asarray([avgColor[0], avgColor[1], avgColor[2]])
avgColorMat = cv2.resize(avgColorMat, (src.shape[1], src.shape[0]), 0.0, 0.0, cv2.INTER_NEAREST)
print(mask)
result = np.multiply(mask//255, src) + np.multiply((1 - mask//255), avgColorMat)
# Convolution
bSize = _K.shape[0] // 2
# result.convertTo(result, CV_32FC3)
result = (np.float32(result)-np.min(result))
result /= np.max(result)
# kernel3ch = cv2.cvtColor(kernel, cv2.COLOR_BGR2GRAY)
kernel3ch = np.zeros((kernel.shape[0], kernel.shape[1], 3))
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
kernel3ch[i,j,:] = 3*[kernel[i,j]]
inWithBorder = cv2.copyMakeBorder(result, bSize, bSize, bSize, bSize, cv2.BORDER_REPLICATE)
resInWithBorder = np.copy(inWithBorder[bSize:bSize+result.shape[0], bSize:bSize+result.shape[1]])
# ch = result.shape[-1]
for itr in range(maxIter):
inWithBorder = cv2.copyMakeBorder(result, bSize, bSize, bSize, bSize, cv2.BORDER_REPLICATE)
for r in range(result.shape[1]):
for c in range(result.shape[0]):
if np.all(mask[c,r,:] == 255):
roi = inWithBorder[c:c+_K.shape[1], r:r+_K.shape[0]]
s = cv2.sumElems(np.multiply(kernel3ch, roi))
result[c,r,0] = s[0]
result[c,r,1] = s[1]
result[c,r,2] = s[2]
# cv2.imshow("Inpainting...", result)
# cv2.waitKey(1)
result -= np.min(result)
result *= 255/np.max(result)
return np.uint8(result)
# print(avgColor)
# print(src.shape) | [
"numpy.uint8",
"numpy.copy",
"numpy.prod",
"numpy.multiply",
"numpy.all",
"numpy.ones",
"numpy.float32",
"cv2.copyMakeBorder",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.zeros",
"cv2.cvtColor",
"numpy.min",
"cv2.resize",
"cv2.sumElems"
] | [((690, 737), 'numpy.array', 'np.array', (['[[_a, _b, _a, _b, 0, _b, _a, _b, _a]]'], {}), '([[_a, _b, _a, _b, 0, _b, _a, _b, _a]])\n', (698, 737), True, 'import numpy as np\n'), ((902, 940), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_GRAY2BGR'], {}), '(mask, cv2.COLOR_GRAY2BGR)\n', (914, 940), False, 'import cv2\n'), ((1065, 1083), 'numpy.ones', 'np.ones', (['(1, 1, 3)'], {}), '((1, 1, 3))\n', (1072, 1083), True, 'import numpy as np\n'), ((1105, 1156), 'numpy.asarray', 'np.asarray', (['[avgColor[0], avgColor[1], avgColor[2]]'], {}), '([avgColor[0], avgColor[1], avgColor[2]])\n', (1115, 1156), True, 'import numpy as np\n'), ((1175, 1262), 'cv2.resize', 'cv2.resize', (['avgColorMat', '(src.shape[1], src.shape[0])', '(0.0)', '(0.0)', 'cv2.INTER_NEAREST'], {}), '(avgColorMat, (src.shape[1], src.shape[0]), 0.0, 0.0, cv2.\n INTER_NEAREST)\n', (1185, 1262), False, 'import cv2\n'), ((1511, 1525), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (1517, 1525), True, 'import numpy as np\n'), ((1602, 1649), 'numpy.zeros', 'np.zeros', (['(kernel.shape[0], kernel.shape[1], 3)'], {}), '((kernel.shape[0], kernel.shape[1], 3))\n', (1610, 1649), True, 'import numpy as np\n'), ((1795, 1871), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['result', 'bSize', 'bSize', 'bSize', 'bSize', 'cv2.BORDER_REPLICATE'], {}), '(result, bSize, bSize, bSize, bSize, cv2.BORDER_REPLICATE)\n', (1813, 1871), False, 'import cv2\n'), ((1894, 1982), 'numpy.copy', 'np.copy', (['inWithBorder[bSize:bSize + result.shape[0], bSize:bSize + result.shape[1]]'], {}), '(inWithBorder[bSize:bSize + result.shape[0], bSize:bSize + result.\n shape[1]])\n', (1901, 1982), True, 'import numpy as np\n'), ((2638, 2652), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (2644, 2652), True, 'import numpy as np\n'), ((2698, 2714), 'numpy.uint8', 'np.uint8', (['result'], {}), '(result)\n', (2706, 2714), True, 'import numpy as np\n'), ((255, 268), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (263, 268), True, 'import numpy as np\n'), ((352, 388), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (364, 388), False, 'import cv2\n'), ((1001, 1018), 'cv2.sumElems', 'cv2.sumElems', (['src'], {}), '(src)\n', (1013, 1018), False, 'import cv2\n'), ((1023, 1045), 'numpy.prod', 'np.prod', (['src.shape[:2]'], {}), '(src.shape[:2])\n', (1030, 1045), True, 'import numpy as np\n'), ((1287, 1316), 'numpy.multiply', 'np.multiply', (['(mask // 255)', 'src'], {}), '(mask // 255, src)\n', (1298, 1316), True, 'import numpy as np\n'), ((1317, 1358), 'numpy.multiply', 'np.multiply', (['(1 - mask // 255)', 'avgColorMat'], {}), '(1 - mask // 255, avgColorMat)\n', (1328, 1358), True, 'import numpy as np\n'), ((1462, 1480), 'numpy.float32', 'np.float32', (['result'], {}), '(result)\n', (1472, 1480), True, 'import numpy as np\n'), ((1481, 1495), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (1487, 1495), True, 'import numpy as np\n'), ((2057, 2133), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['result', 'bSize', 'bSize', 'bSize', 'bSize', 'cv2.BORDER_REPLICATE'], {}), '(result, bSize, bSize, bSize, bSize, cv2.BORDER_REPLICATE)\n', (2075, 2133), False, 'import cv2\n'), ((2671, 2685), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (2677, 2685), True, 'import numpy as np\n'), ((600, 613), 'numpy.array', 'np.array', (['ptr'], {}), '(ptr)\n', (608, 613), True, 'import numpy as np\n'), ((2239, 2267), 'numpy.all', 'np.all', (['(mask[c, r, :] == 255)'], {}), '(mask[c, r, :] == 255)\n', (2245, 2267), True, 'import numpy as np\n'), ((2378, 2405), 'numpy.multiply', 'np.multiply', (['kernel3ch', 'roi'], {}), '(kernel3ch, roi)\n', (2389, 2405), True, 'import numpy as np\n')] |
import os
from multiagent.scenarios.my_Scenario import MyScenario
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
from multiagent.simple_agents import StayAgent
class Scenario(MyScenario):
def make_world(self):
self.REWARD_FOR_COLISION = 505
name = 'open_1_2_without_vel_REWARD_FOR_COLISION_' + str(self.REWARD_FOR_COLISION)
world = super(Scenario, self).make_world(name, 2, 1, is_random_states_for_new_agent=False, bounds=False, REWARD_FOR_COLISION = self.REWARD_FOR_COLISION)
world.step = self.step_without_velocity(world)
self.caught_agents = set()
return world
def step_without_velocity(self, world):
def step_without_velocity():
for agent in world.scripted_agents:
agent.action = agent.action_callback(agent, self)
# gather forces applied to entities
for agent in world.agents:
speed = agent.action.u
if np.sqrt(np.square(speed[0]) + np.square(speed[1])) != 0:
speed = speed / np.sqrt(np.square(speed[0]) + np.square(speed[1])) * agent.max_speed
else:
print(('Speed equal: ' + str(speed[0]) + ' and ' + str(speed[1])))
agent.state.p_pos += speed * world.dt
agent.state.p_vel = speed
for agent in world.agents:
world.update_agent_state(agent)
# for good_agent in self.good_agents(world):
# for advertisal_agent in self.adversaries(world):
# if good_agent in self.caught_agents:
# continue
# if self.is_collision(advertisal_agent, good_agent):
# self.caught_agents.add(good_agent)
return step_without_velocity
def reset_world(self, world):
# random properties for agents
self.caught_agents = set()
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.85, 0.35]) if not agent.adversary else np.array([0.85, 0.35, 0.35])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# set random initial states
if self.is_random_states_for_new_agent and not self.evaluate:
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
else:
self.set_states_for_good_agent_and_adversary(world)
for i, landmark in enumerate(world.landmarks):
if not landmark.boundary:
landmark.state.p_pos = np.random.uniform(-0.9, +0.9, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def agent_reward(self, agent, world):
# Agents are negatively rewarded if caught by adversaries
if agent in self.caught_agents:
return 0
rew = 0
shape = True
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (increased reward for increased distance from adversary)
for adv in adversaries:
rew += 0.1 * np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos)))
if agent.collide:
for a in adversaries:
if self.is_collision(a, agent):
rew -= self.REWARD_FOR_COLISION
break
# agents are penalized for exiting the screen, so that they can be caught by the adversaries
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
if self.bounds:
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
return rew
def adversary_reward(self, agent, world):
# Adversaries are rewarded for collisions with agents
rew = 0
shape = True
good_agents = self.good_agents(world)
agents = []
for ag in good_agents:
if ag not in self.caught_agents:
agents.append(ag)
if len(agents) == 0:
return 0
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (decreased reward for increased distance from agents)
for adv in adversaries:
rew -= 0.1 * min([np.sqrt(np.sum(np.square(a.state.p_pos - adv.state.p_pos))) for a in agents])
if agent.collide:
for ag in agents:
for adv in adversaries:
delta_pos = ag.state.p_pos - adv.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
if self.is_collision(ag, adv):
rew += self.REWARD_FOR_COLISION
break
return rew
def set_states_for_good_agent_and_adversary(self, world):
if len(world.agents) != 3: return
world.agents[0].state.p_pos = np.array([-0.5, -0.5])
world.agents[1].state.p_pos = np.array([0.5, 0.5])
world.agents[2].state.p_pos = np.array([-0.3, 0.5])
for agent in world.agents:
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
def done(self, agent, world):
return self.adversary_done(agent, world) if agent.adversary else self.good_done(agent, world)
def good_done(self, agent, world):
if agent in self.caught_agents:
return True
for agent2 in world.agents:
if agent2.adversary:
if self.is_collision(agent, agent2):
self.caught_agents.add(agent)
return True
return False
def adversary_done(self, agent, world):
return len(self.good_agents(world)) == len(self.caught_agents)
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
if not entity.boundary:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
if not other.adversary:
other_vel.append(other.state.p_vel)
#normalize
other_pos = np.array(other_pos)
# if np.sqrt(np.sum(np.square(other_pos))) != 0:
# other_pos = other_pos / np.sqrt(np.sum(np.square(other_pos)))
#other_pos = other_pos.tolist()
is_caught = []
for good_agent in self.good_agents(world):
if good_agent in self.caught_agents:
is_caught.append([1.])
else:
is_caught.append([0.])
entity_pos = np.array(entity_pos).flatten()
other_pos = other_pos.flatten()
# if np.sqrt(np.sum(np.square(other_pos))) != 0:
# other_pos = other_pos / np.sqrt(np.sum(np.square(other_pos)))
is_caught = np.array(is_caught).flatten()
return np.concatenate([other_pos, is_caught]) | [
"numpy.square",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.random.uniform"
] | [((5236, 5258), 'numpy.array', 'np.array', (['[-0.5, -0.5]'], {}), '([-0.5, -0.5])\n', (5244, 5258), True, 'import numpy as np\n'), ((5297, 5317), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (5305, 5317), True, 'import numpy as np\n'), ((5356, 5377), 'numpy.array', 'np.array', (['[-0.3, 0.5]'], {}), '([-0.3, 0.5])\n', (5364, 5377), True, 'import numpy as np\n'), ((6804, 6823), 'numpy.array', 'np.array', (['other_pos'], {}), '(other_pos)\n', (6812, 6823), True, 'import numpy as np\n'), ((7506, 7544), 'numpy.concatenate', 'np.concatenate', (['[other_pos, is_caught]'], {}), '([other_pos, is_caught])\n', (7520, 7544), True, 'import numpy as np\n'), ((2240, 2268), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (2248, 2268), True, 'import numpy as np\n'), ((5446, 5467), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (5454, 5467), True, 'import numpy as np\n'), ((5496, 5517), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (5504, 5517), True, 'import numpy as np\n'), ((2024, 2052), 'numpy.array', 'np.array', (['[0.35, 0.85, 0.35]'], {}), '([0.35, 0.85, 0.35])\n', (2032, 2052), True, 'import numpy as np\n'), ((2081, 2109), 'numpy.array', 'np.array', (['[0.85, 0.35, 0.35]'], {}), '([0.85, 0.35, 0.35])\n', (2089, 2109), True, 'import numpy as np\n'), ((2450, 2488), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (2467, 2488), True, 'import numpy as np\n'), ((2525, 2546), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2533, 2546), True, 'import numpy as np\n'), ((2579, 2600), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (2587, 2600), True, 'import numpy as np\n'), ((2811, 2853), 'numpy.random.uniform', 'np.random.uniform', (['(-0.9)', '(+0.9)', 'world.dim_p'], {}), '(-0.9, +0.9, world.dim_p)\n', (2828, 2853), True, 'import numpy as np\n'), ((2893, 2914), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2901, 2914), True, 'import numpy as np\n'), ((3851, 3868), 'numpy.exp', 'np.exp', (['(2 * x - 2)'], {}), '(2 * x - 2)\n', (3857, 3868), True, 'import numpy as np\n'), ((7237, 7257), 'numpy.array', 'np.array', (['entity_pos'], {}), '(entity_pos)\n', (7245, 7257), True, 'import numpy as np\n'), ((7461, 7480), 'numpy.array', 'np.array', (['is_caught'], {}), '(is_caught)\n', (7469, 7480), True, 'import numpy as np\n'), ((1003, 1022), 'numpy.square', 'np.square', (['speed[0]'], {}), '(speed[0])\n', (1012, 1022), True, 'import numpy as np\n'), ((1025, 1044), 'numpy.square', 'np.square', (['speed[1]'], {}), '(speed[1])\n', (1034, 1044), True, 'import numpy as np\n'), ((3358, 3404), 'numpy.square', 'np.square', (['(agent.state.p_pos - adv.state.p_pos)'], {}), '(agent.state.p_pos - adv.state.p_pos)\n', (3367, 3404), True, 'import numpy as np\n'), ((4914, 4934), 'numpy.square', 'np.square', (['delta_pos'], {}), '(delta_pos)\n', (4923, 4934), True, 'import numpy as np\n'), ((1096, 1115), 'numpy.square', 'np.square', (['speed[0]'], {}), '(speed[0])\n', (1105, 1115), True, 'import numpy as np\n'), ((1118, 1137), 'numpy.square', 'np.square', (['speed[1]'], {}), '(speed[1])\n', (1127, 1137), True, 'import numpy as np\n'), ((4648, 4690), 'numpy.square', 'np.square', (['(a.state.p_pos - adv.state.p_pos)'], {}), '(a.state.p_pos - adv.state.p_pos)\n', (4657, 4690), True, 'import numpy as np\n')] |
"""
Reference: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
"Treatment Effect Estimation using Invariant Risk Minimization,"
IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021
Last updated: March 12, 2021
Code author: <NAME>
File name: metrics.py
Note: metric functions
(1) PEHE: Precision in Estimation of Heterogeneous Effect
(2) ATE: Average Treatment Effect
"""
# necessary packages
import numpy as np
def PEHE(test_potential_outcome , pred_potential_outcome):
"""Compute Precision in Estimation of Heterogeneous Effect.
Args:
- test_potential_outcome: true potential outcomes
- pred_potential_outcome: estimated potential outcomes
Returns:
- pehe: PEHE
"""
ite_test = test_potential_outcome[:,1] - test_potential_outcome[:,0]
ite_pred = pred_potential_outcome[:,1] - pred_potential_outcome[:,0]
pehe = np.mean(np.square(ite_test - ite_pred), axis=0)
return np.sqrt(pehe)
def ate_error(test_ate, pred_ate):
"""Compute the error in Average Treatment Effect.
Args:
- test_ate: true average treatment effect
- pred_ate: estimated average treatment effect
Returns:
- ate_error: computed error in average treatment effect
"""
ate_error = np.abs(test_ate - pred_ate)
return ate_error | [
"numpy.abs",
"numpy.sqrt",
"numpy.square"
] | [((956, 969), 'numpy.sqrt', 'np.sqrt', (['pehe'], {}), '(pehe)\n', (963, 969), True, 'import numpy as np\n'), ((1271, 1298), 'numpy.abs', 'np.abs', (['(test_ate - pred_ate)'], {}), '(test_ate - pred_ate)\n', (1277, 1298), True, 'import numpy as np\n'), ((905, 935), 'numpy.square', 'np.square', (['(ite_test - ite_pred)'], {}), '(ite_test - ite_pred)\n', (914, 935), True, 'import numpy as np\n')] |
from kosudoku.grid import SudokuWell, SudokuGenomicCoord
# ------------------------------------------------------------------------------------------------ #
def FindFeaturesWithDisruptionsInProgenitorButNotInQC(featureArray):
# Take an updated feature array and check it for features that have representative mutants in the
# progenitor but not in the QC collection
i = 0
featuresWithDisruptionsInQCAndProgenitor = []
featuresWithDisruptionsInProgenitorOnly = []
while i < len(featureArray):
if len(featureArray[i].sudokuGridEntries) > 0:
featureHasAtLeastOneRepInQCCollection = False
featureHasAtLeastOneRepInProgenitorCollection = False
sudokuGridEntries = featureArray[i].sudokuGridEntries
j = 0
while j < len(sudokuGridEntries):
addressDictKeys = list(sudokuGridEntries[j]['addressDict'].keys())
if 'colonyPurified' in addressDictKeys:
featureHasAtLeastOneRepInQCCollection = True
if 'progenitor' in addressDictKeys:
featureHasAtLeastOneRepInProgenitorCollection = True
j += 1
if featureHasAtLeastOneRepInProgenitorCollection == True \
and featureHasAtLeastOneRepInQCCollection == True:
featuresWithDisruptionsInQCAndProgenitor.append(featureArray[i])
elif featureHasAtLeastOneRepInProgenitorCollection == True \
and featureHasAtLeastOneRepInQCCollection == False:
featuresWithDisruptionsInProgenitorOnly.append(featureArray[i])
i += 1
return [featuresWithDisruptionsInQCAndProgenitor, featuresWithDisruptionsInProgenitorOnly]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignProgenitorWellsToCondensedCollection(wellsToBeAssignedInput, startPlateNumber, \
startRowNumber, startColNumber, rows, columns, fillOrder='columns', fillPattern='checkerboard', \
plateRowForCondensedWells=1, fillLastPlateWithBlanks=True):
# Note on input parameters: rows and columns are ordered listings of all of the rows and columns
# on a plate.
from copy import deepcopy
import pdb
wellsToBeAssigned = deepcopy(wellsToBeAssignedInput)
wellsToBeAssigned.reverse()
totalWellsForRearray = len(wellsToBeAssigned)
currentRowNumber = startRowNumber
currentColNumber = startColNumber
currentPlateNumber = startPlateNumber
wellsAssignedWithMutants = 0
wellsAssignedOnCurrentPlate = 0
condensedWellArray = []
while wellsAssignedWithMutants < totalWellsForRearray:
currentRow = rows[currentRowNumber - 1]
currentCol = columns[currentColNumber - 1]
newWell = SudokuCondensedWell(currentPlateNumber, plateRowForCondensedWells, \
currentPlateNumber, currentRow, currentCol, addressSystem='condensed')
# Test if this is supposed to be a blank well
blank = TestIfCondensedCollectionPlateWellIsBlank(currentPlateNumber, currentRowNumber, \
currentColNumber, fillPattern)
if blank:
# If the well is supposed to be blank, set readalignment coords to blank
newWell.readAlignmentCoords.append(deepcopy(blankSudokuCoord))
newWell.condensationData = blankSudokuGridEntrySummary
newWell.addressDict['progenitor'] = \
blankSudokuGridEntrySummary['addressDict']['progenitor']
else:
# If the well isn't supposed to be blank, copy over information from wells to be
# assigned
wellToBeAssigned = wellsToBeAssigned.pop()
addressDictData = deepcopy(wellToBeAssigned[1]['well'].addressDict['progenitor'])
readAlignmentCoords = deepcopy(wellToBeAssigned[1]['well'].readAlignmentCoords)
newWell.addressDict['progenitor'] = addressDictData
newWell.readAlignmentCoords = readAlignmentCoords
newWell.condensationData = wellToBeAssigned[1]['sudokuGridEntry']
wellsAssignedWithMutants += 1
# Add the new well to the growing list of condensed collection wells
condensedWellArray.append(newWell)
wellsAssignedOnCurrentPlate += 1
# Increment the row, column and plate numbers
[nextPlateNumber, nextRowNumber, nextColNumber] = \
IncrementWell(currentPlateNumber, currentRowNumber, currentColNumber, rows, columns, \
fillOrder)
if nextPlateNumber != currentPlateNumber:
wellsAssignedOnCurrentPlate = 0
currentPlateNumber = nextPlateNumber
currentRowNumber = nextRowNumber
currentColNumber = nextColNumber
# If desired, fill out the remainder of the last plate with blank entries
if fillLastPlateWithBlanks == True:
lastPlateNumberWithEntries = deepcopy(currentPlateNumber)
while lastPlateNumberWithEntries == currentPlateNumber:
currentRow = rows[currentRowNumber - 1]
currentCol = columns[currentColNumber - 1]
newWell = SudokuCondensedWell(currentPlateNumber, plateRowForCondensedWells, \
currentPlateNumber, currentRow, currentCol, addressSystem='condensed')
newWell.addressDict['progenitor'] = \
blankSudokuGridEntrySummary['addressDict']['progenitor']
newWell.condensationData = blankSudokuGridEntrySummary
newWell.readAlignmentCoords.append(deepcopy(blankSudokuCoord))
condensedWellArray.append(newWell)
# Increment the row, column and plate numbers
[nextPlateNumber, nextRowNumber, nextColNumber] = \
IncrementWell(currentPlateNumber, currentRowNumber, currentColNumber, rows, columns, \
fillOrder)
if nextPlateNumber != currentPlateNumber:
wellsAssignedOnCurrentPlate = 0
currentPlateNumber = nextPlateNumber
currentRowNumber = nextRowNumber
currentColNumber = nextColNumber
return [condensedWellArray, currentPlateNumber, currentRowNumber, currentColNumber]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SortBestSudokuWellsIntoSinglyAndMultiplyOccupiedSets(featuresAndBestWellArray, \
rearrayPickOrder='columns'):
from copy import deepcopy
featuresAndBestWellArrayCopy = deepcopy(featuresAndBestWellArray)
singlyOccupiedRepresenativeWells = []
multiplyOccupiedRepresentativeWells = []
totalWellsForRearrayOfSinglyOccupiedWells = 0
totalWellsForRearrayOfMultiplyOccupiedWells = 0
for featureAndBestWell in featuresAndBestWellArrayCopy:
locationSummary = featureAndBestWell[1]['sudokuGridEntry']
multipleOccupancy = locationSummary['multipleOccupancy']
coloniesToPick = featureAndBestWell[1]['minimumColoniesToPick']
if multipleOccupancy:
j = 0
while j < coloniesToPick:
multiplyOccupiedRepresentativeWells.append(deepcopy(featureAndBestWell))
multiplyOccupiedRepresentativeWells[-1][1]['sudokuGridEntry']['condensationType'] \
= 'Colony Purification'
j += 1
totalWellsForRearrayOfMultiplyOccupiedWells += coloniesToPick
else:
singlyOccupiedRepresenativeWells.append(deepcopy(featureAndBestWell))
singlyOccupiedRepresenativeWells[-1][1]['sudokuGridEntry']['condensationType'] = \
'Rearray'
totalWellsForRearrayOfSinglyOccupiedWells += coloniesToPick
totalWellsForRearrayOfSinglyOccupiedWells = int(totalWellsForRearrayOfSinglyOccupiedWells)
totalWellsForRearrayOfMultiplyOccupiedWells = int(totalWellsForRearrayOfMultiplyOccupiedWells)
if rearrayPickOrder == 'columns':
secondSortIndex = 'col'
thirdSortIndex = 'row'
elif rearrayPickOrder == 'rows':
secondSortIndex = 'row'
thirdSortIndex = 'col'
else:
secondSortIndex = 'col'
thirdSortIndex = 'row'
# Sort the singly occupied wells array
singlyOccupiedRepresenativeWells = \
sorted(singlyOccupiedRepresenativeWells, key = lambda x: \
(x[1]['sudokuGridEntry']['plateName'], x[1]['sudokuGridEntry'][secondSortIndex], \
x[1]['sudokuGridEntry'][thirdSortIndex]))
# Sort the multiply occupied wells array
multiplyOccupiedRepresentativeWells = \
sorted(multiplyOccupiedRepresentativeWells, key = lambda x: \
(x[1]['sudokuGridEntry']['plateName'], x[1]['sudokuGridEntry'][secondSortIndex], \
x[1]['sudokuGridEntry'][thirdSortIndex]))
return [singlyOccupiedRepresenativeWells, multiplyOccupiedRepresentativeWells]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# Pick Best Mutant To Disrupt Feature
def PickBestSudokuWellToDisruptFeature(featureArray, sudokuGridLookupDict, pickProbability=0.95):
import pdb
from operator import itemgetter
from math import floor, ceil
from numpy import log
featuresAndBestWellArray = []
i = 0
while i < len(featureArray):
feature = featureArray[i]
featureLocusTag = feature.tagDict['locus_tag']
sudokuGridEntries = feature.sudokuGridEntries
sudokuWellObjectsToScore = []
j = 0
while j < len(sudokuGridEntries) and len(sudokuGridEntries) > 0:
addressDictKeys = list(sudokuGridEntries[j]['addressDict'].keys())
if 'progenitor' in addressDictKeys:
plateCol = sudokuGridEntries[j]['addressDict']['progenitor']['plateCol']
plateRow = sudokuGridEntries[j]['addressDict']['progenitor']['plateRow']
row = sudokuGridEntries[j]['addressDict']['progenitor']['row']
col = sudokuGridEntries[j]['addressDict']['progenitor']['col']
wellOccupants = sudokuGridEntries[j]['wellOccupants']
fracDistFromTranslationStart = sudokuGridEntries[j]['fracDistFromTranslationStart']
locatability = sudokuGridEntries[j]['locatability']
sudokuWell = sudokuGridLookupDict[plateRow][plateCol].wellGrid[row][col]
if locatability == 'unambiguous':
locatabilityDiscountFactor = 1.0
else:
locatabilityDiscountFactor = 0.5
purifiability = \
(1.0 - fracDistFromTranslationStart)*locatabilityDiscountFactor*(1.0/wellOccupants)
pA = (1.0/(float(wellOccupants)))
if pA == 1.0:
minimumColoniesToPick = 1.0
else:
minimumColoniesToPick = floor(log(1-pickProbability)/log(1-pA))
sudokuWellObjectsToScore.append({'well':sudokuWell, 'purifiability':purifiability,\
'sudokuGridEntry':sudokuGridEntries[j], \
'minimumColoniesToPick':minimumColoniesToPick})
j += 1
sudokuWellObjectsToScore = sorted(sudokuWellObjectsToScore, reverse=True, \
key=itemgetter('purifiability'))
bestWell = sudokuWellObjectsToScore[0]
featuresAndBestWellArray.append([feature, bestWell])
i += 1
return featuresAndBestWellArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignPlateGridCoordinatesToCondensedCollectionPlates(condensedWellArray):
# This function assigns plate row and plate column coordinates to plates in the condensed
# collection.
from copy import deepcopy
from scipy import unique
from numpy import sqrt
from math import ceil, floor
wellList = deepcopy(condensedWellArray)
# Figure out how many plates we have in the condensed collection. The names don't need to
# be continuous or even numbers, just sortable.
j = 0
plateNames = []
while j < len(wellList):
plateNames.append(wellList[j].plateName)
j += 1
uniquePlateNames = unique(plateNames)
lengthPlateNames = len(uniquePlateNames)
# Figure out the dimension of the plate grid
sqrtLenPlates = sqrt(lengthPlateNames)
plateCols = ceil(sqrtLenPlates)
plateRows = floor(sqrtLenPlates)
while plateRows*plateCols < lengthPlateNames:
plateRows += 1
# Generate the keys for the plate rows and plate columns
i = 1
plateRowKeys = []
while i <= plateRows:
plateRowKeys.append('PR' + str(i).zfill(2))
i += 1
i = 1
plateColKeys = []
while i <= plateCols:
plateColKeys.append('PC' + str(i).zfill(2))
i += 1
# Assign the plate names to positions in the new plate grid.
uniquePlateNames = sorted(uniquePlateNames, reverse=True)
uniquePlateNamesCopy = deepcopy(uniquePlateNames)
plateGridLookupDict = {}
continueAdding = True
i = 0
while i < len(plateRowKeys):
plateGridLookupDict[plateRowKeys[i]] = {}
j = 0
while j < len(plateColKeys) and len(uniquePlateNamesCopy) > 0:
plateGridLookupDict[plateRowKeys[i]][plateColKeys[j]] = uniquePlateNamesCopy.pop()
j += 1
i += 1
# Use the plate grid lookup table to assign plate grid coordinates to each well in the
# condensed collection.
i = 0
while i < len(wellList):
plateName = wellList[i].plateName
plateNameMatchFound = False
j = 0
while j < len(plateRowKeys) and plateNameMatchFound == False:
k = 0
tempPlateColKeys = sorted(list(plateGridLookupDict[plateRowKeys[j]].keys()))
while k < len(tempPlateColKeys):
testPlateName = str(plateGridLookupDict[plateRowKeys[j]][tempPlateColKeys[k]])
if str(testPlateName) == str(plateName):
wellList[i].plateCol = plateColKeys[k]
wellList[i].plateRow = plateRowKeys[j]
wellList[i].addressDict['condensed']['plateCol'] = plateColKeys[k]
wellList[i].addressDict['condensed']['plateRow'] = plateRowKeys[j]
plateNameMatchFound = True
break
k += 1
j += 1
if plateNameMatchFound == False:
print('Plate name match not found for wellList entry ' + str(i))
i += 1
return wellList
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def WriteSudokuGridCondensationInstructions(condensationFileName, condensedWellArray):
from kosudoku.xml import ExportListForXML
import pdb
headers = [\
'#Condensed Collection Plate', 'Condensed Collection Row', \
'Condensed Collection Column', 'Condensed Collection Well', \
'Condensed Collection Plate Row', 'Condensed Collection Plate Column', \
'Progenitor Collection Plate', 'Progenitor Collection Row', \
'Progenitor Collection Column', 'Progenitor Collection Well', \
'Condensation Type', \
'Desired Transposon Coordinate', \
'Desired Feature', 'Desired Locus Tag']
headerStr = ExportListForXML(headers) + '\n'
outputStr = ''
i = 0
while i < len(condensedWellArray):
condensedWell = condensedWellArray[i]
addressDict = condensedWell.addressDict
try:
progAddressDict = addressDict['progenitor']
except:
pdb.set_trace()
try:
condAddressDict = addressDict['condensed']
except:
pdb.set_trace()
condCollectionPlate = condAddressDict['plateName']
condCollectionRow = condAddressDict['row']
condCollectionCol = condAddressDict['col']
condCollectionWell = str(condCollectionRow) + str(condCollectionCol).zfill(2)
condCollectionPR = condAddressDict['plateRow']
condCollectionPC = condAddressDict['plateCol']
progCollectionPlate = progAddressDict['plateName']
progCollectionRow = progAddressDict['row']
progCollectionCol = progAddressDict['col']
if progCollectionRow == 'NA' and progCollectionCol == 'NA':
progCollectionWell = 'NA'
else:
progCollectionWell = str(progCollectionRow) + str(progCollectionCol).zfill(2)
try:
condensationType = condensedWell.condensationData['condensationType']
except:
pdb.set_trace()
desiredTransposonCoord = condensedWell.condensationData['readAlignmentCoord']
readAlignmentCoords = condensedWell.readAlignmentCoords
desiredFeature = ''
desiredLocusTag = ''
for coord in readAlignmentCoords:
if desiredTransposonCoord == coord.coord:
desiredFeature = coord.featureName[0]
desiredLocusTag = coord.locusTag[0]
outputLineArray = [condCollectionPlate, condCollectionRow, condCollectionCol, \
condCollectionWell, condCollectionPR, condCollectionPC, \
progCollectionPlate, progCollectionRow, progCollectionCol, progCollectionWell, \
condensationType, desiredTransposonCoord, desiredFeature, \
desiredLocusTag]
lineStr = ExportListForXML(outputLineArray) + '\n'
outputStr += lineStr
i += 1
outputHandle = open(condensationFileName, 'w')
outputHandle.write(headerStr)
outputHandle.write(outputStr)
outputHandle.close()
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportPredictedCondensedCollectionCatalog(filename):
# Imports data from the kosudoku-condense run for use in kosudoku-isitinthere.
# Assumes that data can be found in the order:
# Outputs a dictionary of the condensed collection:
# condensedCatalog[condensed collection location key] =
# {progenitor plate:, progenitor row:, progenitor col:, hoped for transposon:}
# At this stage, the progenitor contents don't need to be filled in.
# This will be with a code called UpdateColonyPurificationDictWithProgenitorCollection
import pdb
from kosudoku.utils import ParseCSVLine
from ast import literal_eval
fileHandle = open(filename, 'r')
data = fileHandle.readlines()
sudokuCatalog = {}
i = 0
while i < len(data):
line = data[i]
if line[0] != '#':
columns = ParseCSVLine(data[i])
entryDict = {}
entryDict['cpPlate'] = columns[0]
entryDict['cpRow'] = columns[1]
entryDict['cpCol'] = columns[2]
entryDict['cpWell'] = columns[3]
entryDict['cpPlateRow'] = columns[4]
entryDict['cpPlateCol'] = columns[5]
entryDict['progenitorPlate'] = columns[6]
entryDict['progenitorRow'] = columns[7]
entryDict['progenitorCol'] = columns[8]
entryDict['progenitorWell'] = columns[9]
entryDict['condensationType'] = columns[10]
entryDict['hopedForTransposonCoord'] = columns[11]
entryDict['hopedForFeature'] = columns[12]
entryDict['hopedForLocusTag'] = columns[13]
entryDict['progenitorContents'] = []
cpKey = entryDict['cpPlate'] + '_' + entryDict['cpWell']
sudokuCatalog[cpKey] = entryDict
i += 1
return sudokuCatalog
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
class SudokuCondensedWell(SudokuWell):
def __init__(self, sourcePlateName, sourcePlateRow, sourcePlateCol, sourceRow, \
sourceCol, addressSystem='condensed', OD=1.0):
SudokuWell.__init__(self, sourcePlateName, sourcePlateRow, sourcePlateCol, sourceRow, \
sourceCol, addressSystem=addressSystem, OD=1.0)
self.condensationData = {}
# ------------------------------------------------------------------------------------------------ #
# ----------------------------------------------------------------------------------------------- #
def TestIfCondensedCollectionPlateWellIsBlank(currentPlate, currentRowNumber, currentColNumber, \
pattern):
# Returns if true is well is supposed to be blank, false if it is supposed to have something in
# it.
if pattern == 'checkerboard':
# Currently, with a checkerboard, the plate number doesn't matter at all. Basically,
# wells that an have odd row and odd column or even row and even column numbers (e.g. A1
# or B2) are blank, and all others are filled.
if currentColNumber%2 == 0:
# For even numbered columns
if currentRowNumber%2 == 0:
# This is an even row in an even column, for instance B2, which should be blank
blankTest = True
else:
# This is an odd row in an even column, for instance B1, which should have something
# in it
blankTest = False
else:
# For odd numbered columns
if currentRowNumber%2 == 0:
# This is an even row in an odd column, for instance A2, which should have something
# in it
blankTest = False
else:
# This is an odd row in an odd column, for instance A1, which should be blank
blankTest = True
elif pattern == 'all':
# If every well is filled, then no well should be blank.
blankTest = False
else:
print('Currently only doing re-array into all wells or checkerboard.')
return
return blankTest
# ----------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------ #
def IncrementWellInColumnsOrder(currentPlateNumber, currentRowNumber, currentColNumber, rows, cols):
# What I mean here is that the current well is incremented along columns in the plate. For example:
# A1, B1, C1, D1, .... which is I think the most natural way to fill a plate.
if currentRowNumber == len(rows):
nextRowNumber = 1
nextColNumber = currentColNumber+1
else:
nextRowNumber = currentRowNumber + 1
nextColNumber = currentColNumber
if nextColNumber == len(cols) + 1:
nextPlateNumber = currentPlateNumber + 1
nextColNumber = 1
else:
nextPlateNumber = currentPlateNumber
return [nextPlateNumber, nextRowNumber, nextColNumber]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def IncrementWellInRowsOrder(currentPlateNumber, currentRowNumber, currentColNumber, rows, cols):
# What I mean here is that the current well is incremented along rows in the plate. For example:
# A1, A2, A3, A4, .... which is I think is a slightly unnatural way to fill a plate.
if currentColNumber == len(cols):
nextColNumber = 1
nextRowNumber = currentRowNumber + 1
else:
nextRowNumber = currentRowNumber
nextColNumber = currentColNumber + 1
if nextRowNumber == len(rows) + 1:
nextPlateNumber = currentPlateNumber + 1
nextRowNumber = 1
else:
nextPlateNumber = currentPlateNumber
return [nextPlateNumber, nextRowNumber, nextColNumber]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def IncrementWell(currentPlateNumber, currentRowNumber, currentColNumber, rows, cols, fillOrder):
if fillOrder == 'rows':
[nextPlateNumber, nextRowNumber, nextColNumber] = \
IncrementWellInRowsOrder(currentPlateNumber, currentRowNumber, currentColNumber, rows96, \
columns96)
elif fillOrder == 'columns':
[nextPlateNumber, nextRowNumber, nextColNumber] = \
IncrementWellInColumnsOrder(currentPlateNumber, currentRowNumber, currentColNumber, \
rows96, columns96)
return [nextPlateNumber, nextRowNumber, nextColNumber]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# Some useful global variables for use in condensation
blankSudokuCoord = SudokuGenomicCoord(-1, 'NA', 'NA', 'NA')
blankSudokuCoord.locusTag.append('BLANK')
blankSudokuCoord.featureName.append('BLANK')
blankSudokuCoord.distanceFromFeatureTranslationStart.append('NA')
blankSudokuCoord.fracDistanceFromFeatureTranslationStart.append('NA')
blankSudokuGridEntrySummary = {\
'addressDict': {'progenitor': {'col': 'NA','plateCol': 'NA','plateName': 'NA', 'plateRow': 'NA',\
'row': 'NA'}},\
'col': 'NA', 'distFromTranslationStart': 'NA', 'fracDistFromTranslationStart': 'NA', \
'locatability': 'NA', 'multipleOccupancy': 'NA','plateCol': 'NA', \
'plateName': 'NA','plateRow': 'NA','readAlignmentCoord': -1,'row': 'NA', 'wellOccupants': 0, \
'condensationType':'NA'}
columns96 = [1,2,3,4,5,6,7,8,9,10,11,12]
rows96 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
wellsPer96WellPlate = len(columns96)*len(rows96)
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindSingleRepresentativeForColonyPurifiedMutantsInCondensedCollection(cdSudokuWellArray, \
rearrayPickOrder='columns'):
# Used in the kosudoku-qc program
# How is this supposed to work
return
# ------------------------------------------------------------------------------------------------ #
| [
"kosudoku.xml.ExportListForXML",
"scipy.unique",
"math.ceil",
"numpy.sqrt",
"kosudoku.grid.SudokuGenomicCoord",
"math.floor",
"numpy.log",
"kosudoku.utils.ParseCSVLine",
"kosudoku.grid.SudokuWell.__init__",
"pdb.set_trace",
"copy.deepcopy",
"operator.itemgetter"
] | [((22813, 22853), 'kosudoku.grid.SudokuGenomicCoord', 'SudokuGenomicCoord', (['(-1)', '"""NA"""', '"""NA"""', '"""NA"""'], {}), "(-1, 'NA', 'NA', 'NA')\n", (22831, 22853), False, 'from kosudoku.grid import SudokuWell, SudokuGenomicCoord\n'), ((2179, 2211), 'copy.deepcopy', 'deepcopy', (['wellsToBeAssignedInput'], {}), '(wellsToBeAssignedInput)\n', (2187, 2211), False, 'from copy import deepcopy\n'), ((6003, 6037), 'copy.deepcopy', 'deepcopy', (['featuresAndBestWellArray'], {}), '(featuresAndBestWellArray)\n', (6011, 6037), False, 'from copy import deepcopy\n'), ((11034, 11062), 'copy.deepcopy', 'deepcopy', (['condensedWellArray'], {}), '(condensedWellArray)\n', (11042, 11062), False, 'from copy import deepcopy\n'), ((11329, 11347), 'scipy.unique', 'unique', (['plateNames'], {}), '(plateNames)\n', (11335, 11347), False, 'from scipy import unique\n'), ((11455, 11477), 'numpy.sqrt', 'sqrt', (['lengthPlateNames'], {}), '(lengthPlateNames)\n', (11459, 11477), False, 'from numpy import sqrt\n'), ((11491, 11510), 'math.ceil', 'ceil', (['sqrtLenPlates'], {}), '(sqrtLenPlates)\n', (11495, 11510), False, 'from math import ceil, floor\n'), ((11524, 11544), 'math.floor', 'floor', (['sqrtLenPlates'], {}), '(sqrtLenPlates)\n', (11529, 11544), False, 'from math import ceil, floor\n'), ((12029, 12055), 'copy.deepcopy', 'deepcopy', (['uniquePlateNames'], {}), '(uniquePlateNames)\n', (12037, 12055), False, 'from copy import deepcopy\n'), ((4514, 4542), 'copy.deepcopy', 'deepcopy', (['currentPlateNumber'], {}), '(currentPlateNumber)\n', (4522, 4542), False, 'from copy import deepcopy\n'), ((14150, 14175), 'kosudoku.xml.ExportListForXML', 'ExportListForXML', (['headers'], {}), '(headers)\n', (14166, 14175), False, 'from kosudoku.xml import ExportListForXML\n'), ((18366, 18503), 'kosudoku.grid.SudokuWell.__init__', 'SudokuWell.__init__', (['self', 'sourcePlateName', 'sourcePlateRow', 'sourcePlateCol', 'sourceRow', 'sourceCol'], {'addressSystem': 'addressSystem', 'OD': '(1.0)'}), '(self, sourcePlateName, sourcePlateRow, sourcePlateCol,\n sourceRow, sourceCol, addressSystem=addressSystem, OD=1.0)\n', (18385, 18503), False, 'from kosudoku.grid import SudokuWell, SudokuGenomicCoord\n'), ((3453, 3516), 'copy.deepcopy', 'deepcopy', (["wellToBeAssigned[1]['well'].addressDict['progenitor']"], {}), "(wellToBeAssigned[1]['well'].addressDict['progenitor'])\n", (3461, 3516), False, 'from copy import deepcopy\n'), ((3542, 3599), 'copy.deepcopy', 'deepcopy', (["wellToBeAssigned[1]['well'].readAlignmentCoords"], {}), "(wellToBeAssigned[1]['well'].readAlignmentCoords)\n", (3550, 3599), False, 'from copy import deepcopy\n'), ((15945, 15978), 'kosudoku.xml.ExportListForXML', 'ExportListForXML', (['outputLineArray'], {}), '(outputLineArray)\n', (15961, 15978), False, 'from kosudoku.xml import ExportListForXML\n'), ((17166, 17187), 'kosudoku.utils.ParseCSVLine', 'ParseCSVLine', (['data[i]'], {}), '(data[i])\n', (17178, 17187), False, 'from kosudoku.utils import ParseCSVLine\n'), ((3094, 3120), 'copy.deepcopy', 'deepcopy', (['blankSudokuCoord'], {}), '(blankSudokuCoord)\n', (3102, 3120), False, 'from copy import deepcopy\n'), ((5060, 5086), 'copy.deepcopy', 'deepcopy', (['blankSudokuCoord'], {}), '(blankSudokuCoord)\n', (5068, 5086), False, 'from copy import deepcopy\n'), ((6844, 6872), 'copy.deepcopy', 'deepcopy', (['featureAndBestWell'], {}), '(featureAndBestWell)\n', (6852, 6872), False, 'from copy import deepcopy\n'), ((10351, 10378), 'operator.itemgetter', 'itemgetter', (['"""purifiability"""'], {}), "('purifiability')\n", (10361, 10378), False, 'from operator import itemgetter\n'), ((14399, 14414), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (14412, 14414), False, 'import pdb\n'), ((14487, 14502), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (14500, 14502), False, 'import pdb\n'), ((15253, 15268), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15266, 15268), False, 'import pdb\n'), ((6571, 6599), 'copy.deepcopy', 'deepcopy', (['featureAndBestWell'], {}), '(featureAndBestWell)\n', (6579, 6599), False, 'from copy import deepcopy\n'), ((10019, 10043), 'numpy.log', 'log', (['(1 - pickProbability)'], {}), '(1 - pickProbability)\n', (10022, 10043), False, 'from numpy import log\n'), ((10042, 10053), 'numpy.log', 'log', (['(1 - pA)'], {}), '(1 - pA)\n', (10045, 10053), False, 'from numpy import log\n')] |
import logging
import numpy as np
from gym.spaces import Discrete
from ray.rllib.utils.annotations import override
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.rollout_worker import get_global_worker
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.utils.typing import EnvType
logger = logging.getLogger(__name__)
def model_vector_env(env: EnvType) -> BaseEnv:
"""Returns a VectorizedEnv wrapper around the given environment.
To obtain worker configs, one can call get_global_worker().
Args:
env (EnvType): The input environment (of any supported environment
type) to be convert to a _VectorizedModelGymEnv (wrapped as
an RLlib BaseEnv).
Returns:
BaseEnv: The BaseEnv converted input `env`.
"""
worker = get_global_worker()
worker_index = worker.worker_index
if worker_index:
env = _VectorizedModelGymEnv(
make_env=worker.make_sub_env_fn,
existing_envs=[env],
num_envs=worker.num_envs,
observation_space=env.observation_space,
action_space=env.action_space,
)
return BaseEnv.to_base_env(
env,
make_env=worker.make_sub_env_fn,
num_envs=worker.num_envs,
remote_envs=False,
remote_env_batch_wait_ms=0)
class _VectorizedModelGymEnv(VectorEnv):
"""Vectorized Environment Wrapper for MB-MPO.
Primary change is in the `vector_step` method, which calls the dynamics
models for next_obs "calculation" (instead of the actual env). Also, the
actual envs need to have two extra methods implemented: `reward(obs)` and
(optionally) `done(obs)`. If `done` is not implemented, we will assume
that episodes in the env do not terminate, ever.
"""
def __init__(self,
make_env=None,
existing_envs=None,
num_envs=1,
*,
observation_space=None,
action_space=None,
env_config=None):
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
while len(self.envs) < num_envs:
self.envs.append(self.make_env(len(self.envs)))
super().__init__(
observation_space=observation_space
or self.envs[0].observation_space,
action_space=action_space or self.envs[0].action_space,
num_envs=num_envs)
worker = get_global_worker()
self.model, self.device = worker.foreach_policy(
lambda x, y: (x.dynamics_model, x.device))[0]
@override(VectorEnv)
def vector_reset(self):
"""Override parent to store actual env obs for upcoming predictions.
"""
self.cur_obs = [e.reset() for e in self.envs]
return self.cur_obs
@override(VectorEnv)
def reset_at(self, index):
"""Override parent to store actual env obs for upcoming predictions.
"""
obs = self.envs[index].reset()
self.cur_obs[index] = obs
return obs
@override(VectorEnv)
def vector_step(self, actions):
if self.cur_obs is None:
raise ValueError("Need to reset env first")
# If discrete, need to one-hot actions
if isinstance(self.action_space, Discrete):
act = np.array(actions)
new_act = np.zeros((act.size, act.max() + 1))
new_act[np.arange(act.size), act] = 1
actions = new_act.astype("float32")
# Batch the TD-model prediction.
obs_batch = np.stack(self.cur_obs, axis=0)
action_batch = np.stack(actions, axis=0)
# Predict the next observation, given previous a) real obs
# (after a reset), b) predicted obs (any other time).
next_obs_batch = self.model.predict_model_batches(
obs_batch, action_batch, device=self.device)
next_obs_batch = np.clip(next_obs_batch, -1000, 1000)
# Call env's reward function.
# Note: Each actual env must implement one to output exact rewards.
rew_batch = self.envs[0].reward(obs_batch, action_batch,
next_obs_batch)
# If env has a `done` method, use it.
if hasattr(self.envs[0], "done"):
dones_batch = self.envs[0].done(next_obs_batch)
# Otherwise, assume the episode does not end.
else:
dones_batch = np.asarray([False for _ in range(self.num_envs)])
info_batch = [{} for _ in range(self.num_envs)]
self.cur_obs = next_obs_batch
return list(next_obs_batch), list(rew_batch), list(
dones_batch), info_batch
@override(VectorEnv)
def get_unwrapped(self):
return self.envs
| [
"logging.getLogger",
"numpy.clip",
"ray.rllib.utils.annotations.override",
"numpy.stack",
"numpy.array",
"ray.rllib.env.base_env.BaseEnv.to_base_env",
"ray.rllib.evaluation.rollout_worker.get_global_worker",
"numpy.arange"
] | [((324, 351), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (341, 351), False, 'import logging\n'), ((811, 830), 'ray.rllib.evaluation.rollout_worker.get_global_worker', 'get_global_worker', ([], {}), '()\n', (828, 830), False, 'from ray.rllib.evaluation.rollout_worker import get_global_worker\n'), ((1162, 1297), 'ray.rllib.env.base_env.BaseEnv.to_base_env', 'BaseEnv.to_base_env', (['env'], {'make_env': 'worker.make_sub_env_fn', 'num_envs': 'worker.num_envs', 'remote_envs': '(False)', 'remote_env_batch_wait_ms': '(0)'}), '(env, make_env=worker.make_sub_env_fn, num_envs=worker.\n num_envs, remote_envs=False, remote_env_batch_wait_ms=0)\n', (1181, 1297), False, 'from ray.rllib.env.base_env import BaseEnv\n'), ((2629, 2648), 'ray.rllib.utils.annotations.override', 'override', (['VectorEnv'], {}), '(VectorEnv)\n', (2637, 2648), False, 'from ray.rllib.utils.annotations import override\n'), ((2854, 2873), 'ray.rllib.utils.annotations.override', 'override', (['VectorEnv'], {}), '(VectorEnv)\n', (2862, 2873), False, 'from ray.rllib.utils.annotations import override\n'), ((3092, 3111), 'ray.rllib.utils.annotations.override', 'override', (['VectorEnv'], {}), '(VectorEnv)\n', (3100, 3111), False, 'from ray.rllib.utils.annotations import override\n'), ((4707, 4726), 'ray.rllib.utils.annotations.override', 'override', (['VectorEnv'], {}), '(VectorEnv)\n', (4715, 4726), False, 'from ray.rllib.utils.annotations import override\n'), ((2488, 2507), 'ray.rllib.evaluation.rollout_worker.get_global_worker', 'get_global_worker', ([], {}), '()\n', (2505, 2507), False, 'from ray.rllib.evaluation.rollout_worker import get_global_worker\n'), ((3591, 3621), 'numpy.stack', 'np.stack', (['self.cur_obs'], {'axis': '(0)'}), '(self.cur_obs, axis=0)\n', (3599, 3621), True, 'import numpy as np\n'), ((3645, 3670), 'numpy.stack', 'np.stack', (['actions'], {'axis': '(0)'}), '(actions, axis=0)\n', (3653, 3670), True, 'import numpy as np\n'), ((3941, 3977), 'numpy.clip', 'np.clip', (['next_obs_batch', '(-1000)', '(1000)'], {}), '(next_obs_batch, -1000, 1000)\n', (3948, 3977), True, 'import numpy as np\n'), ((3355, 3372), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (3363, 3372), True, 'import numpy as np\n'), ((3451, 3470), 'numpy.arange', 'np.arange', (['act.size'], {}), '(act.size)\n', (3460, 3470), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import json
"""
Adapted code from <NAME>
"""
def plot_together(infiles):
sns.set_style("ticks")
sns.set_context(font_scale=3,context='paper')
sns.set_context({"figure.figsize": (20, 20)})
# load files
dict_list = []
lab_list = []
colors = ['black', 'red', 'blue', 'green']
for f in infiles:
with open(f) as json_file:
fdict = json.load(json_file)
dict_list.append(fdict)
homedir = os.path.basename(os.path.dirname(f))
lab_list.append(homedir)
fig, ax = plt.subplots(figsize = (8,14))
for d, l, c in zip(dict_list, lab_list, colors):
ax.plot(np.array(d["pathwayProfile"]["radiusMean"])*10,
-np.array(d["pathwayProfile"]["s"]),
color=c, linewidth=4, label=l)
radius_sd = np.array(d["pathwayProfile"]["radiusSd"])*10
ax.fill_betweenx(-np.array(d["pathwayProfile"]["s"]),
np.array(d["pathwayProfile"]["radiusMean"])*10 - radius_sd,
np.array(d["pathwayProfile"]["radiusMean"])*10 + radius_sd,
facecolor = "#000000",
alpha = 0.2)
ax.set_xlim(0,15)
ax.set_ylim(-6,11)
ax.set_xlabel('Radius ($\AA$)')
ax.set_ylabel('Distance along pore (nm)')
# configure legend
l = plt.legend()
l.draw_frame(False)
# configure spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_linewidth(4)
ax.spines['bottom'].set_linewidth(4)
# configure ticks
ax.xaxis.set_tick_params(width=4)
ax.yaxis.set_tick_params(width=4)
plt.savefig('output.png', bbox_inches='tight')
plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", nargs='+',
help="Name of the input file(s).")
args = parser.parse_args()
plot_together(args.infile)
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"seaborn.set_context",
"seaborn.set_style",
"os.path.dirname",
"numpy.array",
"json.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((164, 186), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (177, 186), True, 'import seaborn as sns\n'), ((191, 237), 'seaborn.set_context', 'sns.set_context', ([], {'font_scale': '(3)', 'context': '"""paper"""'}), "(font_scale=3, context='paper')\n", (206, 237), True, 'import seaborn as sns\n'), ((241, 286), 'seaborn.set_context', 'sns.set_context', (["{'figure.figsize': (20, 20)}"], {}), "({'figure.figsize': (20, 20)})\n", (256, 286), True, 'import seaborn as sns\n'), ((628, 657), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 14)'}), '(figsize=(8, 14))\n', (640, 657), True, 'import matplotlib.pyplot as plt\n'), ((1388, 1400), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1398, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1761), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {'bbox_inches': '"""tight"""'}), "('output.png', bbox_inches='tight')\n", (1726, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1774, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1864), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1862, 1864), False, 'import argparse\n'), ((467, 487), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (476, 487), False, 'import json\n'), ((559, 577), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (574, 577), False, 'import os\n'), ((899, 940), 'numpy.array', 'np.array', (["d['pathwayProfile']['radiusSd']"], {}), "(d['pathwayProfile']['radiusSd'])\n", (907, 940), True, 'import numpy as np\n'), ((730, 773), 'numpy.array', 'np.array', (["d['pathwayProfile']['radiusMean']"], {}), "(d['pathwayProfile']['radiusMean'])\n", (738, 773), True, 'import numpy as np\n'), ((795, 829), 'numpy.array', 'np.array', (["d['pathwayProfile']['s']"], {}), "(d['pathwayProfile']['s'])\n", (803, 829), True, 'import numpy as np\n'), ((971, 1005), 'numpy.array', 'np.array', (["d['pathwayProfile']['s']"], {}), "(d['pathwayProfile']['s'])\n", (979, 1005), True, 'import numpy as np\n'), ((1023, 1066), 'numpy.array', 'np.array', (["d['pathwayProfile']['radiusMean']"], {}), "(d['pathwayProfile']['radiusMean'])\n", (1031, 1066), True, 'import numpy as np\n'), ((1099, 1142), 'numpy.array', 'np.array', (["d['pathwayProfile']['radiusMean']"], {}), "(d['pathwayProfile']['radiusMean'])\n", (1107, 1142), True, 'import numpy as np\n')] |
import numpy as np
from scipy.special import softmax
def create_portfolio(num_assets, num_assets_in_portfolio):
'''
Args:
num_assets: the number of assets in our stock universe
num_assets_in_portfolio: a number in the range [1, num_assets]
Requires:
1 <= num_assets_in_portfolio <= num_assets
Returns:
portfolio_weights:
- has shape (num_assets, 1)
- only num_assets_in_portfolio have weights; all other assets have weights == 0
- sum(portfolio_weights) == 1
'''
# asset_weights are the weights we'd like to assign to the assets we will choose.
asset_weights = []
for _ in range(num_assets_in_portfolio):
asset_weights.append(np.random.random())
# Make the asset weights sum to 1.
asset_weights = softmax(np.array(asset_weights))
# asset_indices maintains a 1:1 relationship with asset_weights.
# We will make portfolio_weights[asset_indices[i]] = asset_weights[i]
asset_indices = np.random.choice(a=np.arange(num_assets), size=num_assets_in_portfolio)
# Create portfolio weights from these asset weights.
portfolio_weights = np.zeros(shape=(num_assets, 1))
for i in range(num_assets_in_portfolio):
portfolio_weights[asset_indices[i]] = asset_weights[i]
return portfolio_weights
def create_list_of_portfolios(num_assets):
'''
Returns:
portfolios:
- a list of portfolios
'''
portfolios = []
# minimum of 2 assets in each portfolio.
for num_assets_in_portfolio in range(2, num_assets):
# create 10 portfolios where each portfolio consists of num_assets_in_portfolio assets.
for _ in range(10):
portfolios.append(create_portfolio(num_assets, num_assets_in_portfolio))
return portfolios | [
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"numpy.arange"
] | [((1097, 1128), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_assets, 1)'}), '(shape=(num_assets, 1))\n', (1105, 1128), True, 'import numpy as np\n'), ((764, 787), 'numpy.array', 'np.array', (['asset_weights'], {}), '(asset_weights)\n', (772, 787), True, 'import numpy as np\n'), ((681, 699), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (697, 699), True, 'import numpy as np\n'), ((966, 987), 'numpy.arange', 'np.arange', (['num_assets'], {}), '(num_assets)\n', (975, 987), True, 'import numpy as np\n')] |
import numpy as np
def matrix_minor(A, remove_row_idx, remove_col_idx):
"""
Returns a minor matrix, cut down from A by removing
one of its rows and one of its columns.
>>> import numpy as np
>>> A = np.array([[4, 3, 5], [3, 2, 6], [3, 2, 7]])
>>> A_inv = matrix_minor(A, 2, 2)
>>> A_inv
array([[4., 3.],
[3., 2.]])
"""
m, n = A.shape
assert m > 1 and n > 1
assert remove_row_idx <= m - 1, 'Row index out of bounds'
assert remove_col_idx <= n - 1, 'Column index out of bounds'
res = np.empty(shape=(m - 1, n - 1))
retained_row_idx = -1
retained_col_indices = np.arange(n) != remove_col_idx
for x in range(m):
if x != remove_row_idx:
retained_row_idx += 1
res[retained_row_idx, :] = A[x, :][retained_col_indices]
return res
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| [
"numpy.empty",
"numpy.arange",
"pytest.main"
] | [((554, 584), 'numpy.empty', 'np.empty', ([], {'shape': '(m - 1, n - 1)'}), '(shape=(m - 1, n - 1))\n', (562, 584), True, 'import numpy as np\n'), ((895, 918), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (906, 918), False, 'import pytest\n'), ((638, 650), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (647, 650), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#/usr/bin/python2
from __future__ import print_function
import codecs
import os
import argparse
import tensorflow as tf
import numpy as np
from nltk.collocations import BigramCollocationFinder
from nltk.probability import FreqDist
import math
#from hyperparams import Hyperparams as hp
from data_load import load_test_data, load_de_vocab, load_en_vocab
from train import Graph
from nltk.translate.bleu_score import corpus_bleu
def cal_Distinct(corpus):
"""
Calculates unigram and bigram diversity
Args:
corpus: tokenized list of sentences sampled
Returns:
uni_diversity: distinct-1 score
bi_diversity: distinct-2 score
"""
bigram_finder = BigramCollocationFinder.from_words(corpus)
bi_diversity = len(bigram_finder.ngram_fd) / bigram_finder.N
dist = FreqDist(corpus)
uni_diversity = len(dist) / len(corpus)
return uni_diversity, bi_diversity
def cal_BERTScore(refer, candidate):
_, _, bert_scores = score(candidate, refer,
bert="bert-base-uncased", no_idf=True)
bert_scores = bert_scores.tolist()
bert_scores = [0.5 if math.isnan(score) else score for score in bert_scores]
return np.mean(bert_scores)
def cal_acc_f1(tp, fn, fp, tn):
# return (macro-f1, micro-f1, Acc)
acc = (tp + tn) / (tp + fn + fp + tn)
precision_p, precision_n = tp / (tp + fp), tn / (tn + fn)
recall_p, recall_n = tp / (tp + fn), tn / (tn + fp)
avg_pre, avg_recall = (precision_n + precision_p) / 2, (recall_p + recall_n) / 2
macro_f1 = 2 * avg_pre * avg_recall / (avg_pre + avg_recall)
mi_pre = (tp + tn) / (tp + fp + tn + fn)
mi_rec = (tp + tn) / (tp + fn + tn + fp)
micro_f1 = 2 * mi_pre * mi_rec / (mi_pre + mi_rec)
return macro_f1, micro_f1, acc
def cal_acc_P_R_F1(tp, fn, fp, tn):
# cal the F1 metric from the stat data of the postive label
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
acc = (tp + tn) / (tp + fn + fp + tn)
return round(precision, 4), round(recall, 4), round(f1, 4), round(acc, 4)
def eval(hp):
# Load graph
g = Graph(hp=hp, is_training=False)
print("Graph loaded")
# Load data
X, X_image, X_length, Y, Sources, Targets, X_turn_number, SRC_emotion, TGT_emotion, Speakers, A = load_test_data(hp)
#print(X)
de2idx, idx2de = load_de_vocab(hp)
en2idx, idx2en = load_en_vocab(hp)
# Start session
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
## Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
print("Restored!")
## Get model name
mname = open(hp.logdir + '/checkpoint', 'r').read().split('"')[1] # model name
#fftmp=open("tmp.txt","w")
## Inference
if not os.path.exists('results'): os.mkdir('results')
with codecs.open("results/" + mname, "w", "utf-8") as fout:
list_of_refs, hypotheses, test_loss = [], [], []
for i in range(len(X) // hp.batch_size):
### Get mini-batches
x = X[i*hp.batch_size: (i+1)*hp.batch_size]
x_length=X_length[i*hp.batch_size: (i+1)*hp.batch_size]
y = Y[i*hp.batch_size: (i+1)*hp.batch_size]
x_emotion = SRC_emotion[i*hp.batch_size: (i+1)*hp.batch_size]
speaker = Speakers[i*hp.batch_size: (i+1)*hp.batch_size]
x_image = X_image[i*hp.batch_size: (i+1)*hp.batch_size]
a = A[i*hp.batch_size: (i+1)*hp.batch_size]
sources = Sources[i*hp.batch_size: (i+1)*hp.batch_size]
targets = Targets[i*hp.batch_size: (i+1)*hp.batch_size]
eval_bath = sess.run(g.mean_loss, {g.x: x, g.x_image: x_image, g.x_length:x_length,g.y: y, g.x_emotion: x_emotion, g.speaker: speaker, g.A: a, g.x_turn_number: x_turn_number})
test_loss.append( eval_bath)
### Autoregressive inference
preds = np.zeros((hp.batch_size, hp.maxlen), np.int32)
for j in range(hp.maxlen):
_preds = sess.run(g.preds, {g.x: x,g.x_length:x_length, g.y: preds})
preds[:, j] = _preds[:, j]
### Write to file
for source, target, pred in zip(sources, targets, preds): # sentence-wise
got = " ".join(idx2en[idx] for idx in pred).split("</S>")[0].strip()
fout.write("- source: " + source +"\n")
fout.write("- expected: " + target + "\n")
fout.write("- got: " + got + "\n\n")
fout.flush()
# bleu score
#ref = target.split()
ref = target.split(u"</d>")[1].split()
hypothesis = got.split()
if len(ref) > 3 and len(hypothesis) > 3:
list_of_refs.append([ref])
hypotheses.append(hypothesis)
## Calculate bleu score
score = corpus_bleu(list_of_refs, hypotheses)
fout.write("Test Bleu Score = " + str(100*score))
print("Test Bleu Score = " + str(100*score))
print("eval PPL = %.5lf"%(round(math.exp(np.mean(test_loss)), 4)))
print("eval loss = %.5lf"%(np.mean(test_loss)))
# Distinct-1, Distinct-2
candidates = []
for line in hypotheses:
candidates.extend(line)
distinct_1, distinct_2 = cal_Distinct(candidates)
print('Distinct-1:' + str(round(distinct_1, 4)) + 'Distinct-2:' + str(round(distinct_2, 4)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Translate script')
base_dir = 'your_data_path'
parser.add_argument('--source_train', type=str, default=base_dir + 'corpora/train_query.txt', help='src train file')
parser.add_argument('--target_train', type=str, default=base_dir + 'corpora/train_answer.txt', help='src train file')
parser.add_argument('--source_test', type=str, default=base_dir + 'corpora/test_query.txt', help='src test file')
parser.add_argument('--target_test', type=str, default=base_dir + 'corpora/test_answer.txt', help='tgt test file')
parser.add_argument('--source_dev', type=str, default=base_dir + 'corpora/dev_query.txt', help='src dev file')
parser.add_argument('--target_dev', type=str, default=base_dir + 'corpora/dev_answer.txt', help='tgt dev file')
parser.add_argument('--logdir', type=str, default='logdir2020_test', help='logdir')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--dropout_rate', type=float, default=0.1, help='dropout ratio')
parser.add_argument('--hidden_units', type=int, default=512,
help='context encoder hidden size')
parser.add_argument('--num_blocks', type=int, default=6, help='num_blocks')
parser.add_argument('--num_heads', type=int, default=8, help='num_heads')
parser.add_argument('--maxlen', type=int, default=50, help='maxlen')
parser.add_argument('--min_cnt', type=int, default=1, help='min_cnt')
parser.add_argument('--num_epochs', type=int, default=20000, help='num_epochs')
parser.add_argument('--num_layers', type=int, default=1, help='num_layers')
parser.add_argument('--max_turn', type=int, default=33, help='max_turn')
parser.add_argument('--sinusoid', dest='sinusoid', action='store_true')
hp = parser.parse_args()
print('[!] Parameters:')
print(hp)
eval(hp)
print("Done")
| [
"numpy.mean",
"data_load.load_test_data",
"os.path.exists",
"nltk.translate.bleu_score.corpus_bleu",
"tensorflow.ConfigProto",
"data_load.load_de_vocab",
"argparse.ArgumentParser",
"nltk.collocations.BigramCollocationFinder.from_words",
"nltk.probability.FreqDist",
"data_load.load_en_vocab",
"ma... | [((712, 754), 'nltk.collocations.BigramCollocationFinder.from_words', 'BigramCollocationFinder.from_words', (['corpus'], {}), '(corpus)\n', (746, 754), False, 'from nltk.collocations import BigramCollocationFinder\n'), ((832, 848), 'nltk.probability.FreqDist', 'FreqDist', (['corpus'], {}), '(corpus)\n', (840, 848), False, 'from nltk.probability import FreqDist\n'), ((1221, 1241), 'numpy.mean', 'np.mean', (['bert_scores'], {}), '(bert_scores)\n', (1228, 1241), True, 'import numpy as np\n'), ((2182, 2213), 'train.Graph', 'Graph', ([], {'hp': 'hp', 'is_training': '(False)'}), '(hp=hp, is_training=False)\n', (2187, 2213), False, 'from train import Graph\n'), ((2364, 2382), 'data_load.load_test_data', 'load_test_data', (['hp'], {}), '(hp)\n', (2378, 2382), False, 'from data_load import load_test_data, load_de_vocab, load_en_vocab\n'), ((2418, 2435), 'data_load.load_de_vocab', 'load_de_vocab', (['hp'], {}), '(hp)\n', (2431, 2435), False, 'from data_load import load_test_data, load_de_vocab, load_en_vocab\n'), ((2457, 2474), 'data_load.load_en_vocab', 'load_en_vocab', (['hp'], {}), '(hp)\n', (2470, 2474), False, 'from data_load import load_test_data, load_de_vocab, load_en_vocab\n'), ((6236, 6291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Translate script"""'}), "(description='Translate script')\n", (6259, 6291), False, 'import argparse\n'), ((2564, 2585), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {}), '()\n', (2583, 2585), True, 'import tensorflow as tf\n'), ((1155, 1172), 'math.isnan', 'math.isnan', (['score'], {}), '(score)\n', (1165, 1172), False, 'import math\n'), ((2746, 2783), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['hp.logdir'], {}), '(hp.logdir)\n', (2772, 2783), True, 'import tensorflow as tf\n'), ((3035, 3060), 'os.path.exists', 'os.path.exists', (['"""results"""'], {}), "('results')\n", (3049, 3060), False, 'import os\n'), ((3062, 3081), 'os.mkdir', 'os.mkdir', (['"""results"""'], {}), "('results')\n", (3070, 3081), False, 'import os\n'), ((3099, 3144), 'codecs.open', 'codecs.open', (["('results/' + mname)", '"""w"""', '"""utf-8"""'], {}), "('results/' + mname, 'w', 'utf-8')\n", (3110, 3144), False, 'import codecs\n'), ((5510, 5547), 'nltk.translate.bleu_score.corpus_bleu', 'corpus_bleu', (['list_of_refs', 'hypotheses'], {}), '(list_of_refs, hypotheses)\n', (5521, 5547), False, 'from nltk.translate.bleu_score import corpus_bleu\n'), ((2625, 2666), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2639, 2666), True, 'import tensorflow as tf\n'), ((4317, 4363), 'numpy.zeros', 'np.zeros', (['(hp.batch_size, hp.maxlen)', 'np.int32'], {}), '((hp.batch_size, hp.maxlen), np.int32)\n', (4325, 4363), True, 'import numpy as np\n'), ((5801, 5819), 'numpy.mean', 'np.mean', (['test_loss'], {}), '(test_loss)\n', (5808, 5819), True, 'import numpy as np\n'), ((5732, 5750), 'numpy.mean', 'np.mean', (['test_loss'], {}), '(test_loss)\n', (5739, 5750), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Fix an (any) KHARMA restart file so that KHARMA can restart from it
# this works around a bug in Parthenon w.r.t. mesh sizes
import sys
import numpy as np
import h5py
outf = h5py.File(sys.argv[1], "r+")
# Parthenon records the full size here,
# but pretty clearly expects the size without ghost zones.
# TODO running this script twice will cause errors
outf['Info'].attrs.modify('MeshBlockSize',
np.maximum(outf['Info'].attrs['MeshBlockSize'][()] - 2*outf['Info'].attrs['IncludesGhost'][()]*outf['Info'].attrs['NGhost'][()],
np.ones_like(outf['Info'].attrs['MeshBlockSize'][()])))
outf.close() | [
"numpy.ones_like",
"h5py.File"
] | [((202, 230), 'h5py.File', 'h5py.File', (['sys.argv[1]', '"""r+"""'], {}), "(sys.argv[1], 'r+')\n", (211, 230), False, 'import h5py\n'), ((569, 622), 'numpy.ones_like', 'np.ones_like', (["outf['Info'].attrs['MeshBlockSize'][()]"], {}), "(outf['Info'].attrs['MeshBlockSize'][()])\n", (581, 622), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from psiaudio.stim import Cos2EnvelopeFactory, ToneFactory
@pytest.fixture
def factory(fs, stim_calibration):
tone = ToneFactory(fs=fs, level=94, frequency=1000,
calibration=stim_calibration)
envelope = Cos2EnvelopeFactory(fs=fs, start_time=0, rise_time=0.5,
duration=5, input_factory=tone)
return envelope
def test_factory_reset(factory):
n = factory.n_samples_remaining()
full_waveform = factory.get_samples_remaining()
full_waveform_post = factory.next(n)
assert np.all(full_waveform_post == 0)
assert n == full_waveform.shape[-1]
factory.reset()
full_waveform_reset = factory.get_samples_remaining()
full_waveform_reset_post = factory.next(n)
assert np.all(full_waveform_reset_post == 0)
assert_array_equal(full_waveform, full_waveform_reset)
def test_factory_chunks(factory, chunksize):
n = factory.n_samples_remaining()
full_waveform = factory.get_samples_remaining()
factory.reset()
chunks = []
while not factory.is_complete():
chunks.append(factory.next(chunksize))
chunks = np.concatenate(chunks, axis=-1)
assert_array_equal(full_waveform, chunks[:n])
| [
"psiaudio.stim.ToneFactory",
"psiaudio.stim.Cos2EnvelopeFactory",
"numpy.concatenate",
"numpy.all",
"numpy.testing.assert_array_equal"
] | [((202, 276), 'psiaudio.stim.ToneFactory', 'ToneFactory', ([], {'fs': 'fs', 'level': '(94)', 'frequency': '(1000)', 'calibration': 'stim_calibration'}), '(fs=fs, level=94, frequency=1000, calibration=stim_calibration)\n', (213, 276), False, 'from psiaudio.stim import Cos2EnvelopeFactory, ToneFactory\n'), ((315, 406), 'psiaudio.stim.Cos2EnvelopeFactory', 'Cos2EnvelopeFactory', ([], {'fs': 'fs', 'start_time': '(0)', 'rise_time': '(0.5)', 'duration': '(5)', 'input_factory': 'tone'}), '(fs=fs, start_time=0, rise_time=0.5, duration=5,\n input_factory=tone)\n', (334, 406), False, 'from psiaudio.stim import Cos2EnvelopeFactory, ToneFactory\n'), ((635, 666), 'numpy.all', 'np.all', (['(full_waveform_post == 0)'], {}), '(full_waveform_post == 0)\n', (641, 666), True, 'import numpy as np\n'), ((844, 881), 'numpy.all', 'np.all', (['(full_waveform_reset_post == 0)'], {}), '(full_waveform_reset_post == 0)\n', (850, 881), True, 'import numpy as np\n'), ((886, 940), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['full_waveform', 'full_waveform_reset'], {}), '(full_waveform, full_waveform_reset)\n', (904, 940), False, 'from numpy.testing import assert_array_equal\n'), ((1212, 1243), 'numpy.concatenate', 'np.concatenate', (['chunks'], {'axis': '(-1)'}), '(chunks, axis=-1)\n', (1226, 1243), True, 'import numpy as np\n'), ((1248, 1293), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['full_waveform', 'chunks[:n]'], {}), '(full_waveform, chunks[:n])\n', (1266, 1293), False, 'from numpy.testing import assert_array_equal\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.