code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from pathlib import PurePath
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
filedir = PurePath(__file__).parent
styledir = filedir.parents[1] / "./style"
plt.style.use(str(styledir / "./base.mplstyle"))
mpl.use("pgf")
mpl.rc("pgf", preamble="\\usepackage{" + (styledir / "./matplotlib").as_posix() + "}")
x = 0.03 + np.linspace(0, 0.22, 600)
plt.figure()
plt.plot(x, x * np.sin(1 / x), label=r"$x\sin(1/x)$")
plt.plot(x, x, label="$x$")
plt.legend()
plt.savefig(str(filedir / "./asymptotic.pdf"))
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.sin",
"pathlib.PurePath",
"numpy.linspace"
] | [((234, 248), 'matplotlib.use', 'mpl.use', (['"""pgf"""'], {}), "('pgf')\n", (241, 248), True, 'import matplotlib as mpl\n'), ((375, 387), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n'), ((442, 469), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'label': '"""$x$"""'}), "(x, x, label='$x$')\n", (450, 469), True, 'import matplotlib.pyplot as plt\n'), ((470, 482), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (480, 482), True, 'import matplotlib.pyplot as plt\n'), ((116, 134), 'pathlib.PurePath', 'PurePath', (['__file__'], {}), '(__file__)\n', (124, 134), False, 'from pathlib import PurePath\n'), ((348, 373), 'numpy.linspace', 'np.linspace', (['(0)', '(0.22)', '(600)'], {}), '(0, 0.22, 600)\n', (359, 373), True, 'import numpy as np\n'), ((404, 417), 'numpy.sin', 'np.sin', (['(1 / x)'], {}), '(1 / x)\n', (410, 417), True, 'import numpy as np\n')] |
from math import ceil
import numpy as np
import torchvision.transforms.functional as F
class TianchiOCRDynamicResize(object):
def __init__(self, divisible_by=16):
self.divider = divisible_by
def __call__(self, img, label):
size = img.size
short_side = min(size)
try_times = int(ceil(short_side / self.divider))
for i in range(try_times + 2):
if i * self.divider > short_side:
new_size = (i - 1) * self.divider
img = F.resize(img, (new_size, new_size))
label[0][:, ::2] *= new_size / size[0]
label[0][:, 1::2] *= new_size / size[1]
return img, label
class TianchiOCRClip(object):
def __call__(self, img, label):
label[0][:, ::2] = np.minimum(np.maximum(label[0][:, ::2], 0), img.size[0] - 1)
label[0][:, 1::2] = np.minimum(np.maximum(label[0][:, ::2], 0), img.size[1] - 1)
return img, label
class TianchiPolygonsToBBoxes(object):
def __call__(self, img, label):
xmin = label[0][:, ::2].min(axis=1).reshape(-1, 1)
ymin = label[0][:, 1::2].min(axis=1).reshape(-1, 1)
xmax = label[0][:, ::2].max(axis=1).reshape(-1, 1)
ymax = label[0][:, 1::2].max(axis=1).reshape(-1, 1)
bboxes = np.hstack((xmin, ymin, xmax, ymax))
return img, (bboxes, label[1])
| [
"math.ceil",
"torchvision.transforms.functional.resize",
"numpy.maximum",
"numpy.hstack"
] | [((1296, 1331), 'numpy.hstack', 'np.hstack', (['(xmin, ymin, xmax, ymax)'], {}), '((xmin, ymin, xmax, ymax))\n', (1305, 1331), True, 'import numpy as np\n'), ((322, 353), 'math.ceil', 'ceil', (['(short_side / self.divider)'], {}), '(short_side / self.divider)\n', (326, 353), False, 'from math import ceil\n'), ((799, 830), 'numpy.maximum', 'np.maximum', (['label[0][:, ::2]', '(0)'], {}), '(label[0][:, ::2], 0)\n', (809, 830), True, 'import numpy as np\n'), ((888, 919), 'numpy.maximum', 'np.maximum', (['label[0][:, ::2]', '(0)'], {}), '(label[0][:, ::2], 0)\n', (898, 919), True, 'import numpy as np\n'), ((512, 547), 'torchvision.transforms.functional.resize', 'F.resize', (['img', '(new_size, new_size)'], {}), '(img, (new_size, new_size))\n', (520, 547), True, 'import torchvision.transforms.functional as F\n')] |
import argparse, os
import torch
from torch.autograd import Variable
from scipy.ndimage import imread
from PIL import Image
import numpy as np
import time, math
#import matplotlib.pyplot as plt
import os
import easyargs
import progressbar
import imageio
import glob
import cv2
parser = argparse.ArgumentParser(description="PyTorch VDSR Demo")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="model/model_epoch_50.pth", type=str, help="model path")
parser.add_argument("--image", default="butterfly_GT", type=str, help="image name")
parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
parser.add_argument("--in_folder", default=None, type=str, help="input folder")
parser.add_argument("--output_dir", default=None, type=str, help="output folder")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def colorize(y, ycbcr):
img = np.zeros((y.shape[0], y.shape[1], 3), np.uint8)
img[:,:,0] = y
img[:,:,1] = ycbcr[:,:,1]
img[:,:,2] = ycbcr[:,:,2]
# img = Image.fromarray(img, "YCbCr").convert("RGB")
return img
def upscale_function(image, opt):
cuda = opt.cuda
if cuda:
#print("=> use gpu id: '{}'".format(opt.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
if not torch.cuda.is_available():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model"]
# im_gt_ycbcr = imread("Set5/" + opt.image + ".bmp", mode="YCbCr")
# im_b_ycbcr = imread("Set5/"+ opt.image + "_scale_"+ str(opt.scale) + ".bmp", mode="YCbCr")
# im_gt_y = im_gt_ycbcr[:,:,0].astype(float)
im_b_y = image[:,:,0].astype(float)
# psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=opt.scale)
im_input = im_b_y/255.
im_input = Variable(torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1])
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
out = model(im_input)
elapsed_time = time.time() - start_time
out = out.cpu()
im_h_y = out.data[0].numpy().astype(np.float32)
im_h_y = im_h_y * 255.
im_h_y[im_h_y < 0] = 0
im_h_y[im_h_y > 255.] = 255.
# psnr_predicted = PSNR(im_gt_y, im_h_y[0,:,:], shave_border=opt.scale)
im_h = colorize(im_h_y[0,:,:], image)
# im_gt = Image.fromarray(im_gt_ycbcr, "YCbCr").convert("RGB")
#im_b = Image.fromarray(im_b_ycbcr, "YCbCr").convert("RGB")
#print("Scale=",opt.scale)
# print("PSNR_predicted=", psnr_predicted)
# print("PSNR_bicubic=", psnr_bicubic)
#print("It takes {}s for processing".format(elapsed_time))
return im_h
# fig = plt.figure()
# ax = plt.subplot("131")
# ax.imshow(im_gt)
# ax.set_title("GT")
#
# ax = plt.subplot("132")
# ax.imshow(im_b)
# ax.set_title("Input(bicubic)")
#
# ax = plt.subplot("133")
# ax.imshow(im_h)
# ax.set_title("Output(vdsr)")
# plt.show()
def folders_in(directory, recursive=True):
all_folders = [directory]
# silly hack to handle file streams which respond only after query
for root, dirnames, filenames in os.walk(directory):
if not recursive:
return dirnames
all_folders.extend(dirnames)
return all_folders
def filter_files(filenames, extensions):
return [name for name in filenames
if os.path.splitext(name)[-1].lower() in extensions and '_VDSR' not in name]
def files_in(directory, extensions, recursive=False):
all_files = []
for root, dirnames, filenames in os.walk(directory):
curr_files = filter_files(filenames, extensions)
if curr_files:
curr_files = [os.path.join(directory, filename) for filename in curr_files]
all_files.extend(curr_files)
if not recursive:
return all_files
return all_files
def process_image(image, opt):
image = image.astype(np.float32)
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)
output_image = upscale_function(image, opt)
output_image = (output_image).astype(np.uint8)
output_image = cv2.cvtColor(output_image, cv2.COLOR_YCR_CB2BGR)
return output_image
def process_out_file_path(file_name, output_dir):
if output_dir is None:
output_dir = os.path.abspath(os.path.dirname(file_name))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
basename = os.path.basename(file_name)
extension = basename.split('.')[-1]
out_name = basename[:-len(extension)-1] + '_VDSR' + '.' + extension
return os.path.join(output_dir, out_name)
def main():
"""
Calculate histogram transfer from reference image to a given video
:param in_folder: Input folder of folders with video files
:return:
"""
opt = parser.parse_args()
folders = folders_in(opt.in_folder, recursive=True)
for folder in folders:
video_files = files_in(folder, extensions=['.mp4'])
image_files = files_in(folder, extensions=['jpg', 'JPG', 'png', 'jpeg', 'JPEG'])
reuse=False
if image_files:
for image_file in image_files:
out_file = process_out_file_path(image_file, opt.output_dir)
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
out_image = process_image(image, opt)
out_image = cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(out_file, out_image)
if not reuse:
reuse = True
if video_files:
for video_file in video_files:
video_reader = imageio.get_reader(video_file)
out_video = process_out_file_path(video_file, opt.output_dir)
writer = imageio.get_writer(out_video, fps=video_reader.get_meta_data()['fps'])
print('Working on %s' % out_video)
bar = progressbar.ProgressBar()
for frame in bar(video_reader):
writer.append_data(process_image(frame, opt))
if not reuse:
reuse = True
writer.close()
if __name__ == '__main__':
main() | [
"argparse.ArgumentParser",
"os.walk",
"numpy.mean",
"os.path.join",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"os.path.dirname",
"os.path.exists",
"math.log10",
"imageio.get_reader",
"os.path.basename",
"torch.cuda.is_available",
"progressbar.ProgressBar",
"torch.from_numpy",
"os.ma... | [((304, 360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch VDSR Demo"""'}), "(description='PyTorch VDSR Demo')\n", (327, 360), False, 'import argparse, os\n'), ((1391, 1438), 'numpy.zeros', 'np.zeros', (['(y.shape[0], y.shape[1], 3)', 'np.uint8'], {}), '((y.shape[0], y.shape[1], 3), np.uint8)\n', (1399, 1438), True, 'import numpy as np\n'), ((2629, 2640), 'time.time', 'time.time', ([], {}), '()\n', (2638, 2640), False, 'import time, math\n'), ((3859, 3877), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (3866, 3877), False, 'import os\n'), ((4290, 4308), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (4297, 4308), False, 'import os\n'), ((4691, 4732), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2YCR_CB'], {}), '(image, cv2.COLOR_BGR2YCR_CB)\n', (4703, 4732), False, 'import cv2\n'), ((4856, 4904), 'cv2.cvtColor', 'cv2.cvtColor', (['output_image', 'cv2.COLOR_YCR_CB2BGR'], {}), '(output_image, cv2.COLOR_YCR_CB2BGR)\n', (4868, 4904), False, 'import cv2\n'), ((5174, 5201), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (5190, 5201), False, 'import os\n'), ((5330, 5364), 'os.path.join', 'os.path.join', (['output_dir', 'out_name'], {}), '(output_dir, out_name)\n', (5342, 5364), False, 'import os\n'), ((1246, 1265), 'numpy.mean', 'np.mean', (['(imdff ** 2)'], {}), '(imdff ** 2)\n', (1253, 1265), True, 'import numpy as np\n'), ((1323, 1347), 'math.log10', 'math.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (1333, 1347), False, 'import time, math\n'), ((1936, 2000), 'torch.load', 'torch.load', (['opt.model'], {'map_location': '(lambda storage, loc: storage)'}), '(opt.model, map_location=lambda storage, loc: storage)\n', (1946, 2000), False, 'import torch\n'), ((2688, 2699), 'time.time', 'time.time', ([], {}), '()\n', (2697, 2699), False, 'import time, math\n'), ((5095, 5121), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (5109, 5121), False, 'import os\n'), ((5132, 5155), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (5143, 5155), False, 'import os\n'), ((1800, 1825), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1823, 1825), False, 'import torch\n'), ((5053, 5079), 'os.path.dirname', 'os.path.dirname', (['file_name'], {}), '(file_name)\n', (5068, 5079), False, 'import os\n'), ((4419, 4452), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (4431, 4452), False, 'import os\n'), ((6014, 6036), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (6024, 6036), False, 'import cv2\n'), ((6062, 6100), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (6074, 6100), False, 'import cv2\n'), ((6185, 6227), 'cv2.cvtColor', 'cv2.cvtColor', (['out_image', 'cv2.COLOR_RGB2BGR'], {}), '(out_image, cv2.COLOR_RGB2BGR)\n', (6197, 6227), False, 'import cv2\n'), ((6245, 6277), 'cv2.imwrite', 'cv2.imwrite', (['out_file', 'out_image'], {}), '(out_file, out_image)\n', (6256, 6277), False, 'import cv2\n'), ((6448, 6478), 'imageio.get_reader', 'imageio.get_reader', (['video_file'], {}), '(video_file)\n', (6466, 6478), False, 'import imageio\n'), ((6732, 6757), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {}), '()\n', (6755, 6757), False, 'import progressbar\n'), ((2401, 2427), 'torch.from_numpy', 'torch.from_numpy', (['im_input'], {}), '(im_input)\n', (2417, 2427), False, 'import torch\n'), ((4099, 4121), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (4115, 4121), False, 'import os\n')] |
import matplotlib.pyplot as plt
from singlecellmultiomics.bamProcessing import random_sample_bam
import singlecellmultiomics.pyutils as pyutils
import collections
import pandas as pd
import matplotlib
import numpy as np
import seaborn as sns
matplotlib.rcParams['figure.dpi'] = 160
matplotlib.use('Agg')
def plot_lorentz(cdf, per_sample=False):
fig, ax = plt.subplots(figsize=(6,6))
if per_sample:
for cell in cdf:
ax.plot(np.linspace(0,1,cdf.shape[0]), np.cumsum(cdf[cell].fillna(0).sort_values(ascending=True))/cdf[cell].sum(),label=cell,zorder=1)
else:
ax.plot(np.linspace(0,1,cdf.shape[0]), np.cumsum(cdf.sum(1).fillna(0).sort_values(ascending=True))/cdf.sum().sum(),label='observed',zorder=1)
ax.plot([0,1],[0,1],c='grey',ls=':',label='optimum',zorder=1)
plt.title('Lorenz curve, all samples')
ax.set_ylabel('Fraction of molecules (cumulative)')
ax.set_xlabel('Fraction of genome')
plt.legend()
ax.grid(zorder=0)
sns.despine()
return fig, ax
class Lorenz:
def __init__(self, args):
pass
def process_file(self, path):
self.cdf = random_sample_bam(path, 10_000)
def to_csv(self, path):
self.cdf.to_csv(path)
def __repr__(self):
return f'Lorenz'
def plot(self, target_path, title=None):
fig, ax = plot_lorentz(self.cdf,False)
plt.tight_layout()
plt.savefig(target_path)
plt.close()
fig, ax = plt.subplots(figsize=(10,5))
cov_per_cell = (((self.cdf>0).sum() / self.cdf.shape[0]).sort_values())
cov_per_cell.name='fraction genome covered'
cov_per_cell.plot.bar()
mean_cov = cov_per_cell.mean()
ax.axhline(mean_cov,c='green',label='mean coverage (%.3f)' % mean_cov)
ax.set_ylabel('Fraction genome covered')
ax.set_xlabel("Cells")
ax.set_xticks([],[])
sns.despine()
plt.legend()
plt.savefig(target_path.replace('.png', '.cell_genome_fraction.png'))
plt.tight_layout()
plt.close()
| [
"matplotlib.pyplot.title",
"singlecellmultiomics.bamProcessing.random_sample_bam",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"seaborn.despine",
"matplotlib.use",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((282, 303), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (296, 303), False, 'import matplotlib\n'), ((360, 388), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (372, 388), True, 'import matplotlib.pyplot as plt\n'), ((958, 970), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (968, 970), True, 'import matplotlib.pyplot as plt\n'), ((997, 1010), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1008, 1010), True, 'import seaborn as sns\n'), ((818, 856), 'matplotlib.pyplot.title', 'plt.title', (['"""Lorenz curve, all samples"""'], {}), "('Lorenz curve, all samples')\n", (827, 856), True, 'import matplotlib.pyplot as plt\n'), ((1142, 1172), 'singlecellmultiomics.bamProcessing.random_sample_bam', 'random_sample_bam', (['path', '(10000)'], {}), '(path, 10000)\n', (1159, 1172), False, 'from singlecellmultiomics.bamProcessing import random_sample_bam\n'), ((1385, 1403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1401, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1436), 'matplotlib.pyplot.savefig', 'plt.savefig', (['target_path'], {}), '(target_path)\n', (1423, 1436), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1456), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1454, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1506), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1489, 1506), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1919), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1917, 1919), True, 'import seaborn as sns\n'), ((1928, 1940), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1938, 1940), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2046), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2044, 2046), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2066), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2064, 2066), True, 'import matplotlib.pyplot as plt\n'), ((606, 637), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'cdf.shape[0]'], {}), '(0, 1, cdf.shape[0])\n', (617, 637), True, 'import numpy as np\n'), ((452, 483), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'cdf.shape[0]'], {}), '(0, 1, cdf.shape[0])\n', (463, 483), True, 'import numpy as np\n')] |
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import pickle
import cv2
from os import path
class Classifier:
def __init__(self, defaultModel, label_file):
self.defaultModel = defaultModel
self.modelPath = defaultModel
self.model = load_model(self.modelPath)
self.label = pickle.loads(open(label_file, "rb").read())
return
def setModel(self, modelPath):
self.modelPath = modelPath
self.model = load_model(self.modelPath)
def getModel(self):
# Get the model name (id)
print(self.modelPath)
return self.modelPath.split(path.sep)[-2]
def predict(self, image, IMAGE_DIMS=(96, 96)):
# pre-process
image = cv2.resize(image, IMAGE_DIMS)
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# prediction
proba = self.model.predict(image)[0]
idx = np.argmax(proba)
label = self.label.classes_[idx]
return label, proba[idx] * 100
| [
"keras.models.load_model",
"numpy.argmax",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"cv2.resize"
] | [((320, 346), 'keras.models.load_model', 'load_model', (['self.modelPath'], {}), '(self.modelPath)\n', (330, 346), False, 'from keras.models import load_model\n'), ((519, 545), 'keras.models.load_model', 'load_model', (['self.modelPath'], {}), '(self.modelPath)\n', (529, 545), False, 'from keras.models import load_model\n'), ((775, 804), 'cv2.resize', 'cv2.resize', (['image', 'IMAGE_DIMS'], {}), '(image, IMAGE_DIMS)\n', (785, 804), False, 'import cv2\n'), ((867, 886), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (879, 886), False, 'from keras.preprocessing.image import img_to_array\n'), ((903, 932), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (917, 932), True, 'import numpy as np\n'), ((1013, 1029), 'numpy.argmax', 'np.argmax', (['proba'], {}), '(proba)\n', (1022, 1029), True, 'import numpy as np\n')] |
"""
ABFs can be created when signals are applied using the DAC. If these "stimulus
waveforms" are used, they either come from an epoch table or from a DAC file.
Code in this file determines where the stimulus comes from and returns it for
a given sweep and channel.
If the stimulus waveform comes from a file, code here also assists in caching
the data from that file so the file only needs to be read from disk once.
"""
import numpy as np
import copy
import os
import sys
import pyabf
import pyabf.waveform
# cache stimulus files in this dictionary
# keys are stimulus filenames, values are ABF and ATF objects
cachedStimuli = {}
class Stimulus:
"""
The Stimulus class used to be where all waveform generation happened.
Waveform generation from the epoch table now occurs in waveform.py.
This class is kept so old code doesn't break, but is getting dismantled.
"""
def __init__(self, abf, channel):
assert isinstance(abf, pyabf.ABF)
self.abf = abf
self.channel = channel
self.text = "" # this is displayed on the markdown info page
def __str__(self):
return "Stimulus(abf, %d)" % self.channel
def __repr__(self):
return "Stimulus(abf, %d)" % self.channel
def stimulusWaveform(self, stimulusSweep=0):
"""
Return a signal (the same size as a sweep) representing the command
waveform of the DAC for the given channel.
"""
if self.abf.abfVersion["major"] == 1:
nWaveformEnable = self.abf._headerV1.nWaveformEnable[self.channel]
nWaveformSource = self.abf._headerV1.nWaveformSource[self.channel]
elif self.abf.abfVersion["major"] == 2:
nWaveformEnable = self.abf._dacSection.nWaveformEnable[self.channel]
nWaveformSource = self.abf._dacSection.nWaveformSource[self.channel]
if nWaveformEnable == 0 or nWaveformSource == 0:
self.text = "DAC waveform is not enabled"
return np.full(self.abf.sweepPointCount,
self.abf.holdingCommand[self.channel])
elif nWaveformSource == 1:
epochTable = pyabf.waveform.EpochTable(self.abf, self.channel)
self.text = str(epochTable)
sweepWaveform = epochTable.epochWaveformsBySweep[stimulusSweep]
sweepC = sweepWaveform.getWaveform()
return sweepC
elif nWaveformSource == 2:
self.text = "DAC waveform is controlled by custom file"
return stimulusWaveformFromFile(self.abf)
else:
self.text = "unknown nWaveformSource (%d)" % nWaveformSource
return np.full(self.abf.sweepPointCount, np.nan)
@property
def protocolStorageDir(self):
print("WARNING: use abf.stimulusFileFolder (not protocolStorageDir)")
return self.abf.stimulusFileFolder
@protocolStorageDir.setter
def protocolStorageDir(self, val=None):
print("WARNING: use abf.stimulusFileFolder (not protocolStorageDir)")
self.abf.stimulusFileFolder = val
def stimulusWaveformFromFile(abf, channel=0):
"""
Attempt to find the stimulus file used to record an ABF, read the stimulus
file (ABF or ATF), and return the stimulus waveform (as a numpy array).
"""
assert isinstance(abf, pyabf.ABF)
assert channel in abf.channelList
# prepare potential file paths where the stimulus file may exist
stimFname = abf._stringsIndexed.lDACFilePath[channel]
stimBN = os.path.basename(stimFname)
abfFolder = os.path.dirname(abf.abfFilePath)
pathSameFolder = os.path.join(abfFolder, stimBN)
pathAlt = os.path.join(str(abf.stimulusFileFolder), stimBN)
# try to find the stimulus file
if os.path.exists(stimFname):
stimFname = os.path.abspath(stimFname)
elif os.path.exists(pathSameFolder):
stimFname = pathSameFolder
elif pathAlt and os.path.exists(pathAlt):
stimFname = pathAlt
else:
return np.full(abf.sweepPointCount, np.nan)
# the stimulus waveform file was found, consider caching
if abf._cacheStimulusFiles:
stimFname = os.path.realpath(stimFname)
if not stimFname in cachedStimuli.keys():
if stimFname.upper().endswith(".ABF"):
cachedStimuli[stimFname] = pyabf.ABF(stimFname)
elif stimFname.upper().endswith(".ATF"):
cachedStimuli[stimFname] = pyabf.ATF(stimFname)
return cachedStimuli[stimFname].sweepY
else:
if stimFname.upper().endswith(".ABF"):
return pyabf.ABF(stimFname)
elif stimFname.upper().endswith(".ATF"):
return pyabf.ATF(stimFname) | [
"numpy.full",
"os.path.abspath",
"pyabf.waveform.EpochTable",
"os.path.basename",
"os.path.dirname",
"os.path.realpath",
"os.path.exists",
"pyabf.ABF",
"pyabf.ATF",
"os.path.join"
] | [((3503, 3530), 'os.path.basename', 'os.path.basename', (['stimFname'], {}), '(stimFname)\n', (3519, 3530), False, 'import os\n'), ((3547, 3579), 'os.path.dirname', 'os.path.dirname', (['abf.abfFilePath'], {}), '(abf.abfFilePath)\n', (3562, 3579), False, 'import os\n'), ((3601, 3632), 'os.path.join', 'os.path.join', (['abfFolder', 'stimBN'], {}), '(abfFolder, stimBN)\n', (3613, 3632), False, 'import os\n'), ((3741, 3766), 'os.path.exists', 'os.path.exists', (['stimFname'], {}), '(stimFname)\n', (3755, 3766), False, 'import os\n'), ((3788, 3814), 'os.path.abspath', 'os.path.abspath', (['stimFname'], {}), '(stimFname)\n', (3803, 3814), False, 'import os\n'), ((3824, 3854), 'os.path.exists', 'os.path.exists', (['pathSameFolder'], {}), '(pathSameFolder)\n', (3838, 3854), False, 'import os\n'), ((4141, 4168), 'os.path.realpath', 'os.path.realpath', (['stimFname'], {}), '(stimFname)\n', (4157, 4168), False, 'import os\n'), ((1992, 2064), 'numpy.full', 'np.full', (['self.abf.sweepPointCount', 'self.abf.holdingCommand[self.channel]'], {}), '(self.abf.sweepPointCount, self.abf.holdingCommand[self.channel])\n', (1999, 2064), True, 'import numpy as np\n'), ((4574, 4594), 'pyabf.ABF', 'pyabf.ABF', (['stimFname'], {}), '(stimFname)\n', (4583, 4594), False, 'import pyabf\n'), ((2153, 2202), 'pyabf.waveform.EpochTable', 'pyabf.waveform.EpochTable', (['self.abf', 'self.channel'], {}), '(self.abf, self.channel)\n', (2178, 2202), False, 'import pyabf\n'), ((3912, 3935), 'os.path.exists', 'os.path.exists', (['pathAlt'], {}), '(pathAlt)\n', (3926, 3935), False, 'import os\n'), ((3990, 4026), 'numpy.full', 'np.full', (['abf.sweepPointCount', 'np.nan'], {}), '(abf.sweepPointCount, np.nan)\n', (3997, 4026), True, 'import numpy as np\n'), ((4313, 4333), 'pyabf.ABF', 'pyabf.ABF', (['stimFname'], {}), '(stimFname)\n', (4322, 4333), False, 'import pyabf\n'), ((4663, 4683), 'pyabf.ATF', 'pyabf.ATF', (['stimFname'], {}), '(stimFname)\n', (4672, 4683), False, 'import pyabf\n'), ((2659, 2700), 'numpy.full', 'np.full', (['self.abf.sweepPointCount', 'np.nan'], {}), '(self.abf.sweepPointCount, np.nan)\n', (2666, 2700), True, 'import numpy as np\n'), ((4430, 4450), 'pyabf.ATF', 'pyabf.ATF', (['stimFname'], {}), '(stimFname)\n', (4439, 4450), False, 'import pyabf\n')] |
import numpy as np
def mincorr_cost_func(tmp, cost_func):
if cost_func in ['sum', 'avg']:
# corr sum of each "available" k to the "selected" i
sk = np.sum(tmp, axis=0)
elif cost_func in ['sqrt', 'squared']:
sk = np.sum(np.power(tmp, 2), axis=0)
elif cost_func in ['mu_max']:
sk = np.mean(tmp, axis=0) + np.max(tmp, axis=0)
return sk
def mincorr(cmat, n_stop=5, max_rho=0.4, init_method='avg',
cost_fn='sqrt', verbose=True):
# declare list of indicies
available = list(range(cmat.shape[0]))
selected = []
# prep: convert to abs correlations
absrho = np.abs(cmat, order='C')
# (1) Initial Condition
# (1a) find "i" with the lowest correlations
if init_method in ['sum', 'avg']:
si = np.sum(absrho, axis=1)
elif init_method in ['low', 'lowest', 'min']:
si = np.min(absrho, axis=1) # same as picking the lowest |r_ij|
else:
raise Exception(
"unknown init_method='{:s}' value".format(str(init_method)))
i = np.argmin(si)
selected.append(i)
available.remove(i)
# (1b) find "j" with the lowest correlation |r_ij| given "i"
j = np.argmin(absrho[i, :])
selected.append(j)
available.remove(j)
# (2) Enforce max abs correlation
if max_rho < 1.0:
# (2a) reset all absrho>maxrho to np.inf
absrho[absrho > max_rho] = np.inf
# (2b) add features if corr to previously
# selected features is below max_rho
for _ in range(2, n_stop):
# slice available
tmp = absrho[selected, :] # slice \r_ij| given i \in "selected"
tmp = tmp[:, available] # remove "selected" colums (with 1s)
# find best
# sk = np.sum(np.power(tmp,2), axis=0)
sk = mincorr_cost_func(tmp, cost_fn)
k = np.argmin(sk) # find index with lowest cost func
if sk[k] < np.inf:
# store indicies
j = available[k] # get the index
selected.append(j)
available.remove(j)
else:
if verbose:
print(("Terminated: max_rho={:5.3f} works only "
"for {:d} features".format(max_rho, _)))
break
# (2c) Reset matrix
absrho = np.abs(cmat, order='C')
# (3) fill the remaining spots
for _ in range(len(selected), n_stop):
# slice available
tmp = absrho[selected, :] # slice \r_ij| given i \in "selected"
tmp = tmp[:, available] # remove "selected" colums (with 1s)
# find best
# sk = np.sum(np.power(tmp,2), axis=0)
sk = mincorr_cost_func(tmp, cost_fn)
k = np.argmin(sk) # find index with lowest cost func
# store indicies
j = available[k] # get the index
selected.append(j)
available.remove(j)
# done
return selected
def mincorr_result_check(cmat, selected):
print("selected: ", selected)
print("avg abs corr: {:7.4f}".format(
np.sum(np.abs(np.tril(cmat[selected, :][:, selected], -1))) / (
(len(selected) - 1) * len(selected) / 2)))
print("selected corr matrix:\n", cmat[selected, :][:, selected].round(3))
print("")
| [
"numpy.abs",
"numpy.sum",
"numpy.tril",
"numpy.power",
"numpy.argmin",
"numpy.min",
"numpy.mean",
"numpy.max"
] | [((634, 657), 'numpy.abs', 'np.abs', (['cmat'], {'order': '"""C"""'}), "(cmat, order='C')\n", (640, 657), True, 'import numpy as np\n'), ((1050, 1063), 'numpy.argmin', 'np.argmin', (['si'], {}), '(si)\n', (1059, 1063), True, 'import numpy as np\n'), ((1185, 1208), 'numpy.argmin', 'np.argmin', (['absrho[i, :]'], {}), '(absrho[i, :])\n', (1194, 1208), True, 'import numpy as np\n'), ((170, 189), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (176, 189), True, 'import numpy as np\n'), ((787, 809), 'numpy.sum', 'np.sum', (['absrho'], {'axis': '(1)'}), '(absrho, axis=1)\n', (793, 809), True, 'import numpy as np\n'), ((2346, 2369), 'numpy.abs', 'np.abs', (['cmat'], {'order': '"""C"""'}), "(cmat, order='C')\n", (2352, 2369), True, 'import numpy as np\n'), ((2742, 2755), 'numpy.argmin', 'np.argmin', (['sk'], {}), '(sk)\n', (2751, 2755), True, 'import numpy as np\n'), ((873, 895), 'numpy.min', 'np.min', (['absrho'], {'axis': '(1)'}), '(absrho, axis=1)\n', (879, 895), True, 'import numpy as np\n'), ((1860, 1873), 'numpy.argmin', 'np.argmin', (['sk'], {}), '(sk)\n', (1869, 1873), True, 'import numpy as np\n'), ((253, 269), 'numpy.power', 'np.power', (['tmp', '(2)'], {}), '(tmp, 2)\n', (261, 269), True, 'import numpy as np\n'), ((326, 346), 'numpy.mean', 'np.mean', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (333, 346), True, 'import numpy as np\n'), ((349, 368), 'numpy.max', 'np.max', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (355, 368), True, 'import numpy as np\n'), ((3088, 3131), 'numpy.tril', 'np.tril', (['cmat[selected, :][:, selected]', '(-1)'], {}), '(cmat[selected, :][:, selected], -1)\n', (3095, 3131), True, 'import numpy as np\n')] |
from copy import deepcopy
from unittest import TestCase
import numpy as np
import visualiser
from env import Env
from samplers.informedSampler import InformedSampler
from tests.common_vars import template_args
from utils import planner_registry
class TestInformedSampler(TestCase):
def setUp(self) -> None:
args = deepcopy(template_args)
visualiser.VisualiserSwitcher.choose_visualiser("base")
# setup to use the correct sampler
args["sampler"] = InformedSampler()
# use some suitable planner
args["planner_data_pack"] = planner_registry.PLANNERS["rrt"]
self.env = Env(args)
self.sampler = self.env.args.sampler
def test_get_next_pos(self):
# assert that the sampling points returned after getting an initial solution
# has a smaller range
np.random.seed(0)
pts_before_sol = []
for i in range(100):
pts_before_sol.append(self.sampler.get_next_pos()[0])
pts_before_sol = np.array(pts_before_sol)
self.sampler.args.planner.c_max = (
np.linalg.norm(self.env.start_pt.pos - self.env.goal_pt.pos) + 1
)
pts_after_sol = []
for i in range(100):
pts_after_sol.append(self.sampler.get_next_pos()[0])
pts_after_sol = np.array(pts_after_sol)
self.assertTrue((pts_before_sol.max(axis=0) > pts_after_sol.max(axis=0)).all())
| [
"copy.deepcopy",
"numpy.random.seed",
"samplers.informedSampler.InformedSampler",
"env.Env",
"visualiser.VisualiserSwitcher.choose_visualiser",
"numpy.array",
"numpy.linalg.norm"
] | [((330, 353), 'copy.deepcopy', 'deepcopy', (['template_args'], {}), '(template_args)\n', (338, 353), False, 'from copy import deepcopy\n'), ((362, 417), 'visualiser.VisualiserSwitcher.choose_visualiser', 'visualiser.VisualiserSwitcher.choose_visualiser', (['"""base"""'], {}), "('base')\n", (409, 417), False, 'import visualiser\n'), ((488, 505), 'samplers.informedSampler.InformedSampler', 'InformedSampler', ([], {}), '()\n', (503, 505), False, 'from samplers.informedSampler import InformedSampler\n'), ((632, 641), 'env.Env', 'Env', (['args'], {}), '(args)\n', (635, 641), False, 'from env import Env\n'), ((845, 862), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (859, 862), True, 'import numpy as np\n'), ((1012, 1036), 'numpy.array', 'np.array', (['pts_before_sol'], {}), '(pts_before_sol)\n', (1020, 1036), True, 'import numpy as np\n'), ((1315, 1338), 'numpy.array', 'np.array', (['pts_after_sol'], {}), '(pts_after_sol)\n', (1323, 1338), True, 'import numpy as np\n'), ((1094, 1154), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.env.start_pt.pos - self.env.goal_pt.pos)'], {}), '(self.env.start_pt.pos - self.env.goal_pt.pos)\n', (1108, 1154), True, 'import numpy as np\n')] |
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
# external packages
from PyQt5.QtGui import QVector3D
from PyQt5.Qt3DCore import QEntity
from PyQt5.Qt3DExtras import QCylinderMesh
from skyfield import functions
import numpy as np
# local import
from gui.extWindows.simulator.materials import Materials
from gui.extWindows.simulator import tools
class SimulatorLaser:
__all__ = ['SimulatorLaser',
]
def __init__(self, app):
super().__init__()
self.app = app
self.model = {}
self.modelRoot = None
def create(self, rEntity, show):
"""
dict {'name of model': {'parent': }}
:param rEntity: root of the 3D models
:param show: root of the 3D models
:return: success
"""
if self.model:
self.modelRoot.setParent(None)
self.model.clear()
if not show:
return False
self.modelRoot = QEntity(rEntity)
self.model = {
'ref': {
'parent': None,
'scale': [1, 1, 1],
},
'az': {
'parent': 'ref',
'scale': [1, 1, 1],
},
'alt': {
'parent': 'az',
'scale': [1, 1, 1],
},
'laser': {
'parent': 'alt',
'source': [QCylinderMesh(), 4500, 10, 20, 20],
'trans': [0, 2250, 0],
'mat': Materials().laser,
},
}
for name in self.model:
tools.linkModel(self.model, name, self.modelRoot)
self.updatePositions()
return True
def updatePositions(self):
"""
:return:
"""
if not self.model:
return False
if not self.app.mount.obsSite.haJNow:
return False
lat = self.app.mount.obsSite.location.latitude
ha = self.app.mount.obsSite.haJNow
dec = self.app.mount.obsSite.decJNow
pierside = self.app.mount.obsSite.pierside
geometry = self.app.mount.geometry
_, _, _, PB, PD = geometry.calcTransformationMatrices(ha=ha,
dec=dec,
lat=lat,
pierside=pierside)
PB *= 1000
PB[2] += 1000
radius, alt, az = functions.to_spherical(-PD)
az = np.degrees(az)
alt = np.degrees(alt)
self.model['ref']['t'].setTranslation(QVector3D(PB[0], PB[1], PB[2]))
self.model['az']['t'].setRotationZ(az + 90)
self.model['alt']['t'].setRotationX(-alt)
return True
| [
"skyfield.functions.to_spherical",
"numpy.degrees",
"gui.extWindows.simulator.tools.linkModel",
"gui.extWindows.simulator.materials.Materials",
"PyQt5.Qt3DExtras.QCylinderMesh",
"PyQt5.QtGui.QVector3D",
"PyQt5.Qt3DCore.QEntity"
] | [((1375, 1391), 'PyQt5.Qt3DCore.QEntity', 'QEntity', (['rEntity'], {}), '(rEntity)\n', (1382, 1391), False, 'from PyQt5.Qt3DCore import QEntity\n'), ((2893, 2920), 'skyfield.functions.to_spherical', 'functions.to_spherical', (['(-PD)'], {}), '(-PD)\n', (2915, 2920), False, 'from skyfield import functions\n'), ((2934, 2948), 'numpy.degrees', 'np.degrees', (['az'], {}), '(az)\n', (2944, 2948), True, 'import numpy as np\n'), ((2963, 2978), 'numpy.degrees', 'np.degrees', (['alt'], {}), '(alt)\n', (2973, 2978), True, 'import numpy as np\n'), ((1997, 2046), 'gui.extWindows.simulator.tools.linkModel', 'tools.linkModel', (['self.model', 'name', 'self.modelRoot'], {}), '(self.model, name, self.modelRoot)\n', (2012, 2046), False, 'from gui.extWindows.simulator import tools\n'), ((3025, 3055), 'PyQt5.QtGui.QVector3D', 'QVector3D', (['PB[0]', 'PB[1]', 'PB[2]'], {}), '(PB[0], PB[1], PB[2])\n', (3034, 3055), False, 'from PyQt5.QtGui import QVector3D\n'), ((1811, 1826), 'PyQt5.Qt3DExtras.QCylinderMesh', 'QCylinderMesh', ([], {}), '()\n', (1824, 1826), False, 'from PyQt5.Qt3DExtras import QCylinderMesh\n'), ((1909, 1920), 'gui.extWindows.simulator.materials.Materials', 'Materials', ([], {}), '()\n', (1918, 1920), False, 'from gui.extWindows.simulator.materials import Materials\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
import os
import time
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import math
import input_data
import numpy as np
from multiprocessing import Pool
import threading
from tqdm import tqdm,trange
def placeholder_inputs(batch_size=16, num_frame_per_clib=16, crop_size=224, rgb_channels=3, flow_channels=2):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
num_frame_per_clib: The num of frame per clib.
crop_size: The crop size of per clib.
channels: The input channel of per clib.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
rgb_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
num_frame_per_clib,
crop_size,
crop_size,
rgb_channels))
flow_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
num_frame_per_clib,
crop_size,
crop_size,
flow_channels))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size
))
is_training = tf.placeholder(tf.bool)
return rgb_images_placeholder, flow_images_placeholder, labels_placeholder, is_training
def rgb_placeholder_inputs(batch_size=16, num_frame_per_clib=16, crop_size=224, rgb_channels=3, flow_channels=2):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
num_frame_per_clib: The num of frame per clib.
crop_size: The crop size of per clib.
channels: The input channel of per clib.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
rgb_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
num_frame_per_clib,
crop_size,
crop_size,
rgb_channels))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size
))
is_training = tf.placeholder(tf.bool)
return rgb_images_placeholder, labels_placeholder, is_training
def Normalization(clips, frames_num):
new_clips = []
for index in range(frames_num):
clip = tf.image.per_image_standardization(clips[index])
new_clips.append(clip)
return new_clips
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def l2_loss(weight_decay, weighyt_list):
l2_reg = tf.contrib.layers.l2_regularizer(weight_decay)
return tf.contrib.layers.apply_regularization(regularizer=l2_reg, weights_list=weighyt_list)
def tower_loss(logit, labels, wd):
print(logit.shape)
print(labels.shape)
weight_map = []
for variable in tf.global_variables():
if 'conv_3d/w' in variable.name or 'kernel' in variable.name:
weight_map.append(variable)
cross_entropy_mean = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logit)
)
weight_decay = l2_loss(wd, weight_map)
#tf.summary.scalar('sgd_weight_decay_loss', weight_decay)
# Calculate the total loss for the current tower.
total_loss = cross_entropy_mean + weight_decay
return total_loss
def tower_acc(logit, labels):
correct_pred = tf.equal(tf.argmax(logit, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return accuracy
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, wd):
var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
if wd is not None:
weight_decay = tf.nn.l2_loss(var)*wd
tf.add_to_collection('weightdecay_losses', weight_decay)
return var
def data_to_feed_dict(data):
rgb_train_images = []
train_labels = []
for i in data:
tmp_train_images = i.get_result()[0]
tmp_labels = i.get_result()[1]
rgb_train_images.extend(tmp_train_images)
train_labels.extend(tmp_labels)
return np.array(rgb_train_images), np.array(train_labels)
def get_data(filename, batch_size, num_frames_per_clip=64, sample_rate=4, crop_size=224, shuffle=False, add_flow=False):
rgb_train_images, flow_train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
filename=filename,
batch_size=batch_size,
num_frames_per_clip=num_frames_per_clip,
sample_rate=sample_rate,
crop_size=crop_size,
shuffle=shuffle,
add_flow=add_flow
)
return rgb_train_images, train_labels
class MyThread(threading.Thread):
def __init__(self, func, args=()):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception:
return None
def load_data(filename, batch_size, num_frames_per_clip, sample_rate, crop_size, shuffle=False, add_flow=False):
data = []
'''
p = Pool(batch_size/8)
for i in range(batch_size):
data.append(p.apply_async(get_data, args=(
filename,
8,
num_frames_per_clip,
sample_rate,
crop_size,
shuffle,
add_flow
)))
p.close()
#p.join()
'''
for i in range(batch_size/4):
t = MyThread(get_data, args=(
filename,
4,
num_frames_per_clip,
sample_rate,
crop_size,
shuffle,
add_flow
))
data.append(t)
t.start()
for t in data:
t.join()
print('DATA_LOAD_COMP: enqueue......')
rgb_train_images, train_labels = data_to_feed_dict(data)
return rgb_train_images, train_labels
def topk(predicts, labels, ids):
scores = {}
top1_list = []
top5_list = []
clips_top1_list = []
clips_top5_list = []
start_time = time.time()
print('Results process..............')
for index in tqdm(range(len(predicts))):
id = ids[index]
score = predicts[index]
if str(id) not in scores.keys():
scores['%d'%id] = []
scores['%d'%id].append(score)
else:
scores['%d'%id].append(score)
avg_pre_index = np.argsort(score).tolist()
top1 = (labels[id] in avg_pre_index[-1:])
top5 = (labels[id] in avg_pre_index[-5:])
clips_top1_list.append(top1)
clips_top5_list.append(top5)
print('Clips-----TOP_1_ACC in test: %f' % np.mean(clips_top1_list))
print('Clips-----TOP_5_ACC in test: %f' % np.mean(clips_top5_list))
print('..............')
for _id in range(len(labels)-1):
avg_pre_index = np.argsort(np.mean(scores['%d'%_id], axis=0)).tolist()
top1 = (labels[_id] in avg_pre_index[-1:])
top5 = (labels[_id] in avg_pre_index[-5:])
top1_list.append(top1)
top5_list.append(top5)
print('TOP_1_ACC in test: %f' % np.mean(top1_list))
print('TOP_5_ACC in test: %f' % np.mean(top5_list))
duration = time.time() - start_time
print('Time use: %.3f' % duration)
| [
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.contrib.layers.l2_regularizer",
"numpy.argsort",
"tensorflow.global_variables",
"numpy.mean",
"tensorflow.image.per_image_standardization",
"input_data.read_clip_and_label",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.plac... | [((1993, 2099), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_frame_per_clib, crop_size, crop_size, rgb_channels)'}), '(tf.float32, shape=(batch_size, num_frame_per_clib, crop_size,\n crop_size, rgb_channels))\n', (2007, 2099), True, 'import tensorflow as tf\n'), ((2367, 2474), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_frame_per_clib, crop_size, crop_size, flow_channels)'}), '(tf.float32, shape=(batch_size, num_frame_per_clib, crop_size,\n crop_size, flow_channels))\n', (2381, 2474), True, 'import tensorflow as tf\n'), ((2737, 2779), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': 'batch_size'}), '(tf.int64, shape=batch_size)\n', (2751, 2779), True, 'import tensorflow as tf\n'), ((2860, 2883), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (2874, 2883), True, 'import tensorflow as tf\n'), ((3912, 4018), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_frame_per_clib, crop_size, crop_size, rgb_channels)'}), '(tf.float32, shape=(batch_size, num_frame_per_clib, crop_size,\n crop_size, rgb_channels))\n', (3926, 4018), True, 'import tensorflow as tf\n'), ((4281, 4323), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': 'batch_size'}), '(tf.int64, shape=batch_size)\n', (4295, 4323), True, 'import tensorflow as tf\n'), ((4404, 4427), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (4418, 4427), True, 'import tensorflow as tf\n'), ((5239, 5285), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['weight_decay'], {}), '(weight_decay)\n', (5271, 5285), True, 'import tensorflow as tf\n'), ((5298, 5388), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', ([], {'regularizer': 'l2_reg', 'weights_list': 'weighyt_list'}), '(regularizer=l2_reg, weights_list=\n weighyt_list)\n', (5336, 5388), True, 'import tensorflow as tf\n'), ((5515, 5536), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5534, 5536), True, 'import tensorflow as tf\n'), ((7219, 7422), 'input_data.read_clip_and_label', 'input_data.read_clip_and_label', ([], {'filename': 'filename', 'batch_size': 'batch_size', 'num_frames_per_clip': 'num_frames_per_clip', 'sample_rate': 'sample_rate', 'crop_size': 'crop_size', 'shuffle': 'shuffle', 'add_flow': 'add_flow'}), '(filename=filename, batch_size=batch_size,\n num_frames_per_clip=num_frames_per_clip, sample_rate=sample_rate,\n crop_size=crop_size, shuffle=shuffle, add_flow=add_flow)\n', (7249, 7422), False, 'import input_data\n'), ((9027, 9038), 'time.time', 'time.time', ([], {}), '()\n', (9036, 9038), False, 'import time\n'), ((4612, 4660), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['clips[index]'], {}), '(clips[index])\n', (4646, 4660), True, 'import tensorflow as tf\n'), ((4982, 5001), 'tensorflow.concat', 'tf.concat', (['grads', '(0)'], {}), '(grads, 0)\n', (4991, 5001), True, 'import tensorflow as tf\n'), ((5018, 5041), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (5032, 5041), True, 'import tensorflow as tf\n'), ((5711, 5786), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logit'}), '(labels=labels, logits=logit)\n', (5757, 5786), True, 'import tensorflow as tf\n'), ((6109, 6128), 'tensorflow.argmax', 'tf.argmax', (['logit', '(1)'], {}), '(logit, 1)\n', (6118, 6128), True, 'import tensorflow as tf\n'), ((6169, 6202), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (6176, 6202), True, 'import tensorflow as tf\n'), ((6288, 6307), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (6297, 6307), True, 'import tensorflow as tf\n'), ((6324, 6377), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer'}), '(name, shape, initializer=initializer)\n', (6339, 6377), True, 'import tensorflow as tf\n'), ((6490, 6528), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (6526, 6528), True, 'import tensorflow as tf\n'), ((6609, 6665), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""weightdecay_losses"""', 'weight_decay'], {}), "('weightdecay_losses', weight_decay)\n", (6629, 6665), True, 'import tensorflow as tf\n'), ((6976, 7002), 'numpy.array', 'np.array', (['rgb_train_images'], {}), '(rgb_train_images)\n', (6984, 7002), True, 'import numpy as np\n'), ((7004, 7026), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (7012, 7026), True, 'import numpy as np\n'), ((10185, 10196), 'time.time', 'time.time', ([], {}), '()\n', (10194, 10196), False, 'import time\n'), ((4907, 4927), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (4921, 4927), True, 'import tensorflow as tf\n'), ((6578, 6596), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (6591, 6596), True, 'import tensorflow as tf\n'), ((9641, 9665), 'numpy.mean', 'np.mean', (['clips_top1_list'], {}), '(clips_top1_list)\n', (9648, 9665), True, 'import numpy as np\n'), ((9714, 9738), 'numpy.mean', 'np.mean', (['clips_top5_list'], {}), '(clips_top5_list)\n', (9721, 9738), True, 'import numpy as np\n'), ((10092, 10110), 'numpy.mean', 'np.mean', (['top1_list'], {}), '(top1_list)\n', (10099, 10110), True, 'import numpy as np\n'), ((10149, 10167), 'numpy.mean', 'np.mean', (['top5_list'], {}), '(top5_list)\n', (10156, 10167), True, 'import numpy as np\n'), ((9389, 9406), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (9399, 9406), True, 'import numpy as np\n'), ((9843, 9878), 'numpy.mean', 'np.mean', (["scores['%d' % _id]"], {'axis': '(0)'}), "(scores['%d' % _id], axis=0)\n", (9850, 9878), True, 'import numpy as np\n')] |
import kornia_rs as K
from kornia_rs import Tensor as cvTensor
import torch
import numpy as np
def test_smoke():
# dumy test
H, W, C = 2, 2, 3
data = [i for i in range(H * W * C)]
cv_tensor = cvTensor([H, W, C], data)
assert cv_tensor.shape == [H, W, C]
assert len(data) == len(cv_tensor.data)
assert cv_tensor.strides == [6, 3, 1]
def test_conversions():
H, W, C = 2, 2, 3
data = [i for i in range(H * W * C)]
cv_tensor = cvTensor([H, W, C], data)
# to dlpack / torch / numpy
dlpack = K.cvtensor_to_dlpack(cv_tensor)
th_tensor = torch.utils.dlpack.from_dlpack(dlpack)
assert [x for x in th_tensor.shape] == cv_tensor.shape
def test_conversions2():
H, W, C = 2, 2, 3
data = [i for i in range(H * W * C)]
cv_tensor = cvTensor([H, W, C], data)
# to dlpack / torch / numpy
th_tensor = torch.utils.dlpack.from_dlpack(cv_tensor)
np_array = np._from_dlpack(cv_tensor)
np.testing.assert_array_equal(np_array, th_tensor.numpy()) | [
"kornia_rs.cvtensor_to_dlpack",
"kornia_rs.Tensor",
"numpy._from_dlpack",
"torch.utils.dlpack.from_dlpack"
] | [((210, 235), 'kornia_rs.Tensor', 'cvTensor', (['[H, W, C]', 'data'], {}), '([H, W, C], data)\n', (218, 235), True, 'from kornia_rs import Tensor as cvTensor\n'), ((466, 491), 'kornia_rs.Tensor', 'cvTensor', (['[H, W, C]', 'data'], {}), '([H, W, C], data)\n', (474, 491), True, 'from kornia_rs import Tensor as cvTensor\n'), ((538, 569), 'kornia_rs.cvtensor_to_dlpack', 'K.cvtensor_to_dlpack', (['cv_tensor'], {}), '(cv_tensor)\n', (558, 569), True, 'import kornia_rs as K\n'), ((586, 624), 'torch.utils.dlpack.from_dlpack', 'torch.utils.dlpack.from_dlpack', (['dlpack'], {}), '(dlpack)\n', (616, 624), False, 'import torch\n'), ((789, 814), 'kornia_rs.Tensor', 'cvTensor', (['[H, W, C]', 'data'], {}), '([H, W, C], data)\n', (797, 814), True, 'from kornia_rs import Tensor as cvTensor\n'), ((864, 905), 'torch.utils.dlpack.from_dlpack', 'torch.utils.dlpack.from_dlpack', (['cv_tensor'], {}), '(cv_tensor)\n', (894, 905), False, 'import torch\n'), ((921, 947), 'numpy._from_dlpack', 'np._from_dlpack', (['cv_tensor'], {}), '(cv_tensor)\n', (936, 947), True, 'import numpy as np\n')] |
import numpy as np
from lizardanalysis.utils import auxiliaryfunctions
# TODO: calculate frame wise for step-phases not average over all! Or do both in 2 different functions
def climbing_speed(**kwargs):
"""
Uses the Nose tracking point to determine the climbing speed.
Takes the absolute value of the distance in pixels covered in a certain range of the frames taken from the middle of the run.
(data_rows_count/2) +/- (framerate/speed_interval)
:return: dictionary with function name (key) and list (len=data_rows_count) of climbing speed in px/s
"""
import os
from pathlib import Path
from lizardanalysis.utils import auxiliaryfunctions
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
config = kwargs.get('config')
current_path = os.getcwd()
config_file = Path(config).resolve()
cfg = auxiliaryfunctions.read_config(config_file)
framerate = cfg['framerate']
speed_interval = 5
long_range_speed_start = int((data_rows_count/2)) - int((framerate/speed_interval))
long_range_speed_end = int((data_rows_count/2)) + int((framerate/speed_interval))
#TODO: test if +/- int((framerate/speed_interval)) creates start/end out of bounds
scorer = data.columns[1][0]
# TODO: filter columns of used labels for likelihood BEFORE calculation
likelihood = 0.90
# nose_coords = data[scorer, 'Nose']
# nose_coords = nose_coords[nose_coords.likelihood >= 0.90]
nose_coords = data[scorer, 'Nose', 'x']
long_range_speed_2and1halftel = abs(nose_coords.iloc[long_range_speed_start] - nose_coords.iloc[long_range_speed_end])
long_range_speed = int(long_range_speed_2and1halftel*(speed_interval/2.))
print("long range speed (px/sec): ", long_range_speed)
#TODO: calculate climbing speed and write results to new column in dataframe
# mgs: changed this to use a numpy array
# speed_list = []
# for i in range(data_rows_count):
# speed_list.append(long_range_speed)
speed_list = np.zeros((data_rows_count, )) + long_range_speed
return {__name__.rsplit('.', 1)[1]: speed_list} | [
"os.getcwd",
"pathlib.Path",
"lizardanalysis.utils.auxiliaryfunctions.read_config",
"numpy.zeros"
] | [((863, 874), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (872, 874), False, 'import os\n'), ((926, 969), 'lizardanalysis.utils.auxiliaryfunctions.read_config', 'auxiliaryfunctions.read_config', (['config_file'], {}), '(config_file)\n', (956, 969), False, 'from lizardanalysis.utils import auxiliaryfunctions\n'), ((2084, 2112), 'numpy.zeros', 'np.zeros', (['(data_rows_count,)'], {}), '((data_rows_count,))\n', (2092, 2112), True, 'import numpy as np\n'), ((893, 905), 'pathlib.Path', 'Path', (['config'], {}), '(config)\n', (897, 905), False, 'from pathlib import Path\n')] |
"""Autocorrelation plot of data."""
import numpy as np
from ..data import convert_to_dataset
from ..stats import autocorr
from .plot_utils import (
_scale_fig_size,
default_grid,
make_label,
xarray_var_iter,
_create_axes_grid,
filter_plotters_list,
)
from ..utils import _var_names
def plot_autocorr(
data, var_names=None, max_lag=None, combined=False, figsize=None, textsize=None, ax=None
):
"""Bar plot of the autocorrelation function for a sequence of data.
Useful in particular for posteriors from MCMC samples which may display correlation.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names : list of variable names, optional
Variables to be plotted, if None all variable are plotted.
Vector-value stochastics are handled automatically.
max_lag : int, optional
Maximum lag to calculate autocorrelation. Defaults to 100 or num draws, whichever is smaller
combined : bool
Flag for combining multiple chains into a single chain. If False (default), chains will be
plotted separately.
figsize : tuple
Figure size. If None it will be defined automatically.
Note this is not used if ax is supplied.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
ax: axes
Matplotlib axes
Returns
-------
axes : matplotlib axes
Examples
--------
Plot default autocorrelation
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_autocorr(data)
Plot subset variables by specifying variable name exactly
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'] )
Combine chains collapsing by variable
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'], combined=True)
Specify maximum lag (x axis bound)
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'], max_lag=200, combined=True)
"""
data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, data)
# Default max lag to 100 or max length of chain
if max_lag is None:
max_lag = min(100, data["draw"].shape[0])
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names, combined)), "plot_autocorr"
)
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters)
figsize, _, titlesize, xt_labelsize, linewidth, _ = _scale_fig_size(
figsize, textsize, rows, cols
)
if ax is None:
_, axes = _create_axes_grid(
length_plotters, rows, cols, figsize=figsize, squeeze=False, sharex=True, sharey=True
)
else:
axes = ax
axes = np.atleast_2d(axes) # in case of only 1 plot
for (var_name, selection, x), ax_ in zip(plotters, axes.flatten()):
x_prime = x
if combined:
x_prime = x.flatten()
y = autocorr(x_prime)
ax_.vlines(x=np.arange(0, max_lag), ymin=0, ymax=y[0:max_lag], lw=linewidth)
ax_.hlines(0, 0, max_lag, "steelblue")
ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)
ax_.tick_params(labelsize=xt_labelsize)
if axes.size > 0:
axes[0, 0].set_xlim(0, max_lag)
axes[0, 0].set_ylim(-1, 1)
return axes
| [
"numpy.arange",
"numpy.atleast_2d"
] | [((3075, 3094), 'numpy.atleast_2d', 'np.atleast_2d', (['axes'], {}), '(axes)\n', (3088, 3094), True, 'import numpy as np\n'), ((3322, 3343), 'numpy.arange', 'np.arange', (['(0)', 'max_lag'], {}), '(0, max_lag)\n', (3331, 3343), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
from ..utils.events.dataclass import Property, evented_dataclass
@evented_dataclass
class GridCanvas:
"""Grid for canvas.
Right now the only grid mode that is still inside one canvas with one
camera, but future grid modes could support multiple canvases.
Attributes
----------
enabled : bool
If grid is enabled or not.
stride : int
Number of layers to place in each grid square before moving on to
the next square. The default ordering is to place the most visible
layer in the top left corner of the grid. A negative stride will
cause the order in which the layers are placed in the grid to be
reversed.
shape : 2-tuple of int
Number of rows and columns in the grid. A value of -1 for either or
both of will be used the row and column numbers will trigger an
auto calculation of the necessary grid shape to appropriately fill
all the layers at the appropriate stride.
"""
enabled: bool = False
stride: int = 1
shape: Property[Tuple, None, tuple] = (-1, -1)
def actual_shape(self, nlayers=1):
"""Return the actual shape of the grid.
This will return the shape parameter, unless one of the row
or column numbers is -1 in which case it will compute the
optimal shape of the grid given the number of layers and
current stride.
If the grid is not enabled, this will return (1, 1).
Parameters
----------
nlayers : int
Number of layers that need to be placed in the grid.
Returns
-------
shape : 2-tuple of int
Number of rows and columns in the grid.
"""
if self.enabled:
n_row, n_column = self.shape
n_grid_squares = np.ceil(nlayers / abs(self.stride)).astype(int)
if n_row == -1 and n_column == -1:
n_column = np.ceil(np.sqrt(n_grid_squares)).astype(int)
n_row = np.ceil(n_grid_squares / n_column).astype(int)
elif n_row == -1:
n_row = np.ceil(n_grid_squares / n_column).astype(int)
elif n_column == -1:
n_column = np.ceil(n_grid_squares / n_row).astype(int)
n_row = max(1, n_row)
n_column = max(1, n_column)
return (n_row, n_column)
else:
return (1, 1)
def position(self, index, nlayers):
"""Return the position of a given linear index in grid.
If the grid is not enabled, this will return (0, 0).
Parameters
----------
index : int
Position of current layer in layer list.
nlayers : int
Number of layers that need to be placed in the grid.
Returns
-------
position : 2-tuple of int
Row and column position of current index in the grid.
"""
if self.enabled:
n_row, n_column = self.actual_shape(nlayers)
# Adjust for forward or reverse ordering
if self.stride < 0:
adj_i = nlayers - index - 1
else:
adj_i = index
adj_i = adj_i // abs(self.stride)
adj_i = adj_i % (n_row * n_column)
i_row = adj_i // n_column
i_column = adj_i % n_column
return (i_row, i_column)
else:
return (0, 0)
| [
"numpy.ceil",
"numpy.sqrt"
] | [((2052, 2086), 'numpy.ceil', 'np.ceil', (['(n_grid_squares / n_column)'], {}), '(n_grid_squares / n_column)\n', (2059, 2086), True, 'import numpy as np\n'), ((1991, 2014), 'numpy.sqrt', 'np.sqrt', (['n_grid_squares'], {}), '(n_grid_squares)\n', (1998, 2014), True, 'import numpy as np\n'), ((2153, 2187), 'numpy.ceil', 'np.ceil', (['(n_grid_squares / n_column)'], {}), '(n_grid_squares / n_column)\n', (2160, 2187), True, 'import numpy as np\n'), ((2260, 2291), 'numpy.ceil', 'np.ceil', (['(n_grid_squares / n_row)'], {}), '(n_grid_squares / n_row)\n', (2267, 2291), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 11:13:15 2019
@author: jkern
"""
from __future__ import division
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def hydro(sim_years):
#########################################################################
# This purpose of this script is to use synthetic streamflows at major California
# reservoir sites to simulate daily hydropower production for the PG&E and SCE
# zones of the California electricty market (CAISO), using parameters optimized
# via a differential evolution algorithm.
#########################################################################
# load California storage reservoir (ORCA) sites
df_sites = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name = 'ORCA',header=0)
ORCA_sites = list(df_sites)
# load upper generation amounts for each predicted hydropower dam (PG&E and SCE)
upper_gen = pd.read_excel('CA_hydropower/upper.xlsx',header =0)
# month-day calender
calender = pd.read_excel('CA_hydropower/calender.xlsx',header=0)
# load simulated full natural flows at each California storage reservoir (ORCA site)
df_sim = pd.read_csv('Synthetic_streamflows/synthetic_streamflows_CA.csv',header=0)
df_sim = df_sim.loc[0:(sim_years+3)*365,:]
# load simulated outflows calculated by ORCA
df_ORCA = pd.read_csv('ORCA_output.csv')
outflow_sites = ['SHA_otf','ORO_otf','YRS_otf','FOL_otf','NML_otf','DNP_otf','EXC_otf','MIL_otf','ISB_otf','SUC_otf','KWH_otf','PFT_otf']
for i in range(0,len(df_ORCA)):
for s in outflow_sites:
df_sim.loc[i,s] = df_ORCA.loc[i,s]
sim_years = sim_years+3
#Add month and day columns to the dataframe
Month = []
Day = []
count = 0
for i in range(0,len(df_sim)):
if count < 365:
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
else:
count = 0
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
df_sim['Month']=Month
df_sim['Day']=Day
# calculate simulated totals
Sim_totals = []
for i in range(0,sim_years):
sample = df_sim.loc[i*365:i*365+365,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Sim_totals = np.append(Sim_totals,total)
# load historical full natural flows for 2001, 2005, 2010 and 2011
df_hist = pd.read_excel('CA_hydropower/hist_reservoir_inflows.xlsx',header=0)
Hist_totals = []
Hist_years = [2001,2005,2010,2011]
for i in Hist_years:
sample = df_hist[df_hist['year'] == i]
sample = sample.loc[:,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Hist_totals = np.append(Hist_totals,total)
# find most similar historical year for each simulated year
Rule_list=[]
for i in range(0,sim_years):
Difference=abs(Sim_totals[i]- Hist_totals)
#Select which rule to use
for n in range(0,len(Hist_years)):
if Difference[n]==np.min(Difference):
Rule=n
Rule_list.append(Rule)
# PGE hydro projects
PGE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='PGE',header=0)
PGE_dams = list(PGE_names.loc[:,'Balch 1':])
PGE_Storage=[PGE_dams[3],PGE_dams[7],PGE_dams[8],PGE_dams[9]]
PGE_No_Data_Dams=[PGE_dams[2],PGE_dams[4],PGE_dams[10],PGE_dams[11],PGE_dams[15],PGE_dams[16],PGE_dams[17],PGE_dams[26],PGE_dams[30],PGE_dams[38],PGE_dams[39],PGE_dams[55],PGE_dams[60],PGE_dams[65]]
## SCE hydro projects
SCE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='SCE',header=0)
SCE_dams = list(SCE_names.loc[:,'Big_Creek_1 ':])
SCE_No_Data_Dams=[SCE_dams[7],SCE_dams[8],SCE_dams[12]]
#Simulate all the PGE inflow dams
check_unused = []
PGE_name_list = []
SCE_name_list = []
f_horizon = 7
for name in PGE_dams:
STOR = np.zeros((365*(sim_years),1))
for year in range(0,sim_years):
GEN = np.zeros((365,7))
if name in PGE_No_Data_Dams:
pass
elif name in PGE_Storage:
# which operating rule to use?
Rule=Rule_list[year]
File_name='CA_hydropower/PGE_Storage_FNF_V2/1.0_FNF_Storage_Rule_' + str(name) +'.txt'
Temp_Rule=pd.read_csv(File_name,delimiter=' ',header=None)
peak_flow,starting,ending,refill_1_date,evac_date,peak_end,refill_2_date,storage,power_cap,eff,min_power=Temp_Rule.loc[Rule][:]
k = str(PGE_names.loc[0][name])
I_O=str(PGE_names.loc[1][name])
#Which site to use
if k =='Oroville' and I_O =='Inflows':
site_name=['ORO_fnf']
elif k =='Oroville' and I_O =='Outflows':
site_name=['ORO_otf']
elif k =='Pine Flat' and I_O =='Inflows':
site_name=['PFT_fnf']
elif k =='Pine Flat' and I_O =='Outflows':
site_name=['PFT_otf']
elif k =='Shasta' and I_O =='Inflows':
site_name=['SHA_fnf']
elif k =='Shasta' and I_O =='Outflows':
site_name=['SHA_otf']
elif k =='New Melones' and I_O =='Inflows':
site_name=['NML_fnf']
elif k =='New Melones' and I_O =='Outflows':
site_name=['NML_otf']
elif k =='Pardee' and I_O =='Inflows':
site_name=['PAR_fnf']
elif k =='Pardee' and I_O =='Outflows':
site_name=['PAR_otf']
elif k =='New Exchequer' and I_O =='Inflows':
site_name=['EXC_fnf']
elif k =='New Exchequer' and I_O =='Outflows':
site_name=['EXC_otf']
elif k =='Folsom' and I_O =='Inflows':
site_name=['FOL_fnf']
elif k =='Folsom' and I_O =='Outflows':
site_name=['FOL_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['DNP_fnf']
elif k =='<NAME>' and I_O =='Outflows':
site_name=['DNP_otf']
elif k =='Millerton' and I_O =='Inflows':
site_name=['MIL_fnf']
elif k =='Millerton' and I_O =='Outflows':
site_name=['MIL_otf']
elif k =='Isabella' and I_O =='Inflows':
site_name=['ISB_fnf']
elif k =='Isabella' and I_O =='Outflows':
site_name=['ISB_otf']
elif k =='Yuba' and I_O =='Inflows':
site_name=['YRS_fnf']
elif k =='Yuba' and I_O =='Outflows':
site_name=['YRS_otf']
else:
None
flow_ts = df_sim.loc[:,site_name].values
# iterate through every day of the year
for day in range(0,365):
for fd in range(0,f_horizon):
s = day + fd
#forecast day? if not, take beginning storage from previous time step
if day>0 and fd < 1:
storage = STOR[year*365+day-1]
elif day<1 and fd <1:
storage = 0
else:
pass
# available hydro production based on water availability
avail_power = flow_ts[year*365+day]*eff
# if it's during first refill
if s < refill_1_date:
gen =starting- ((starting-min_power)/refill_1_date)*s
storage = avail_power-gen
# if it maintains the water
elif s >= refill_1_date and s < evac_date:
gen=min_power
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif s >= evac_date and s < peak_end:
gen= min_power+ ((power_cap-min_power)/(peak_end-evac_date)* (s- evac_date))
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif s >= peak_end and s < refill_2_date:
gen= power_cap
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
elif s >=refill_2_date :
gen = power_cap-((power_cap-ending)/(365-refill_2_date)* (s-refill_2_date))
GEN[day,fd] = gen
if fd < 1:
STOR[year*365+day] = storage
else:
upper_now=upper_gen.loc[upper_gen.loc[:,'Name']== name]
upper_now=upper_now.reset_index(drop=True)
upper=upper_now.loc[0]['Max Gen']
Rule=Rule_list[year]
File_name='CA_hydropower/PGE_FNF_2/FNF_' + str(name) +'.txt'
Temp_Rule=pd.read_csv(File_name,delimiter=' ',header=None)
peak_flow,sum_cap,spr_cap,fall_cap,win_date,spr_date,sum_date,fall_date,eff,check_surplus=Temp_Rule.loc[Rule][:]
surplus = 0
transfer = 0
k = str(PGE_names.loc[0][name])
I_O=str(PGE_names.loc[1][name])
if k =='Oroville' and I_O =='Inflows':
site_name=['ORO_fnf']
elif k =='Oroville' and I_O =='Outflows':
site_name=['ORO_otf']
elif k =='Pine Flat' and I_O =='Inflows':
site_name=['PFT_fnf']
elif k =='Pine Flat' and I_O =='Outflows':
site_name=['PFT_otf']
elif k =='Shasta' and I_O =='Inflows':
site_name=['SHA_fnf']
elif k =='Shasta' and I_O =='Outflows':
site_name=['SHA_otf']
elif k =='New Melones' and I_O =='Inflows':
site_name=['NML_fnf']
elif k =='New Melones' and I_O =='Outflows':
site_name=['NML_otf']
elif k =='Pardee' and I_O =='Inflows':
site_name=['PAR_fnf']
elif k =='Pardee' and I_O =='Outflows':
site_name=['PAR_otf']
elif k =='New Exchequer' and I_O =='Inflows':
site_name=['EXC_fnf']
elif k =='New Exchequer' and I_O =='Outflows':
site_name=['EXC_otf']
elif k =='Folsom' and I_O =='Inflows':
site_name=['FOL_fnf']
elif k =='Folsom' and I_O =='Outflows':
site_name=['FOL_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['DNP_fnf']
elif k =='<NAME>' and I_O =='Outflows':
site_name=['DNP_otf']
elif k =='Millerton' and I_O =='Inflows':
site_name=['MIL_fnf']
elif k =='Millerton' and I_O =='Outflows':
site_name=['MIL_otf']
elif k =='Isabella' and I_O =='Inflows':
site_name=['ISB_fnf']
elif k =='Isabella' and I_O =='Outflows':
site_name=['ISB_otf']
elif k =='Yuba' and I_O =='Inflows':
site_name=['YRS_fnf']
elif k =='Yuba' and I_O =='Outflows':
site_name=['YRS_otf']
else:
None
flow_ts = df_sim.loc[:,site_name].values
annual = flow_ts[year*365:year*365+365]
max_flow = np.max(annual[105:260])
L = list(annual)
peak_flow = L.index(max_flow)
for day in range(0,365):
for fd in range(0,f_horizon):
s = day + fd
#forecast day? if not, take beginning storage from previous time step
if day>0 and fd < 1:
surplus = STOR[year*365+day-1]
elif day<1 and fd <1:
surplus = 0
else:
pass
# available hydro production based on water availability
avail_power = flow_ts[year*365+day]*eff
# if it's still winter, operate as RoR
if s < peak_flow - win_date:
if avail_power >= upper:
gen = upper
surplus = surplus + (avail_power - upper)
else:
gen = avail_power
# if it's spring, operate as RoR with upper limit
elif s >= peak_flow - win_date and s < peak_flow - spr_date:
if avail_power > spr_cap:
surplus = surplus + (avail_power - spr_cap)
gen = spr_cap
elif avail_power <= spr_cap:
deficit = spr_cap - avail_power
if surplus>0:
transfer = np.min((surplus,deficit))
surplus = surplus - transfer
else:
transfer = 0
gen = avail_power + transfer
# if it's summer, operate as RoR with upper limit
elif s >= peak_flow - spr_date and s < peak_flow + sum_date:
if avail_power > sum_cap:
surplus = surplus + (avail_power - sum_cap)
gen = sum_cap
elif avail_power <= sum_cap:
deficit = sum_cap - avail_power
if surplus>0:
transfer = np.min((surplus,deficit))
surplus = surplus - transfer
else:
transfer = 0
gen = avail_power + transfer
# if it's fall, operate as RoR with upper limit
elif s >= peak_flow + sum_date and s < peak_flow + fall_date:
if avail_power > fall_cap:
surplus = surplus + (avail_power - fall_cap)
gen = fall_cap
elif avail_power <= fall_cap:
deficit = fall_cap - avail_power
if surplus>0:
transfer = np.min((surplus,deficit))
surplus = surplus - transfer
else:
transfer = 0
gen = avail_power + transfer
elif s >= peak_flow + fall_date:
if avail_power >= upper:
gen = upper
surplus = surplus + (avail_power - upper)
else:
gen = avail_power
else:
if avail_power >= upper:
gen = upper
surplus = surplus + (avail_power - upper)
else:
gen = avail_power
GEN[day,fd] = gen
if fd < 1:
STOR[year*365+day] = surplus
# unused=surplus
# check_unused.append(surplus)
# rest_surplus=sum(check_unused)
if year < 1:
A_PGE = GEN
else:
A_PGE = np.vstack((A_PGE,GEN))
if name in PGE_No_Data_Dams:
pass
else:
PGE_name_list = np.append(PGE_name_list,name)
name_index = PGE_dams.index(name)
if name_index < 1:
M_PGE = A_PGE
else:
M_PGE = np.dstack((M_PGE,A_PGE))
for i in range(0,len(PGE_name_list)):
name = PGE_name_list[i]
filename = 'CA_hydropower/' + name + '_hydro.csv'
gen = M_PGE[:,:,i]
df_gen = pd.DataFrame(gen)
df_gen.columns = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_gen.to_csv(filename)
##Simulate all the SCE inflow dams
for name in SCE_dams:
STOR = np.zeros((365*sim_years,1))
for year in range(0,sim_years):
GEN = np.zeros((365,7))
if name in SCE_No_Data_Dams:
pass
else:
Rule=Rule_list[year]
File_name='CA_hydropower/SCE_FNF_V2/SCE_fnf_' + str(name) +'.txt'
Temp_Rule=pd.read_csv(File_name,delimiter=' ',header=None)
peak_flow,sum_cap,spr_cap,fall_cap,win_date,spr_date,sum_date,fall_date,eff,check_surplus=Temp_Rule.loc[Rule][:]
surplus = 0
transfer = 0
k = str(SCE_names.loc[0][name])
I_O=str(SCE_names.loc[1][name])
if k =='Oroville' and I_O =='Inflows':
site_name=['ORO_fnf']
elif k =='Oroville' and I_O =='Outflows':
site_name=['ORO_otf']
elif k =='Pine Flat' and I_O =='Inflows':
site_name=['PFT_fnf']
elif k =='Pine Flat' and I_O =='Outflows':
site_name=['PFT_otf']
elif k =='Shasta' and I_O =='Inflows':
site_name=['SHA_fnf']
elif k =='Shasta' and I_O =='Outflows':
site_name=['SHA_otf']
elif k =='New Melones' and I_O =='Inflows':
site_name=['NML_fnf']
elif k =='New Melones' and I_O =='Outflows':
site_name=['NML_otf']
elif k =='Pardee' and I_O =='Inflows':
site_name=['PAR_fnf']
elif k =='Pardee' and I_O =='Outflows':
site_name=['PAR_otf']
elif k =='New Exchequer' and I_O =='Inflows':
site_name=['EXC_fnf']
elif k =='New Exchequer' and I_O =='Outflows':
site_name=['EXC_otf']
elif k =='Folsom' and I_O =='Inflows':
site_name=['FOL_fnf']
elif k =='Folsom' and I_O =='Outflows':
site_name=['FOL_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['DNP_fnf']
elif k =='<NAME>' and I_O =='Outflows':
site_name=['DNP_otf']
elif k =='Millerton' and I_O =='Inflows':
site_name=['MIL_fnf']
elif k =='Millerton' and I_O =='Outflows':
site_name=['MIL_otf']
elif k =='Isabella' and I_O =='Inflows':
site_name=['ISB_fnf']
elif k =='Isabella' and I_O =='Outflows':
site_name=['ISB_otf']
elif k =='Yuba' and I_O =='Inflows':
site_name=['YRS_fnf']
elif k =='Yuba' and I_O =='Outflows':
site_name=['YRS_otf']
else:
None
flow_ts = df_sim.loc[:,site_name].values
annual = flow_ts[year*365:year*365+365]
max_flow = np.max(annual[105:260])
L = list(annual)
peak_flow = L.index(max_flow)
# iterate through every day of the year
for day in range(0,365):
for fd in range(0,f_horizon):
s = day + fd
#forecast day? if not, take beginning storage from previous time step
if day>1 and fd < 1:
surplus = STOR[year*365+day-1]
elif day<1 and fd <1:
surplus = 0
else:
pass
# available hydro production based on water availability
avail_power = flow_ts[year*365+day]*eff
# if it's still winter, operate as RoR
if s < peak_flow - win_date:
if avail_power >= upper:
gen = upper
surplus = surplus + (avail_power - upper)
else:
gen = avail_power
# if it's spring, operate as RoR with upper limit
elif s >= peak_flow - win_date and s < peak_flow - spr_date:
if avail_power > spr_cap:
surplus = surplus + (avail_power - spr_cap)
gen = spr_cap
elif avail_power <= spr_cap:
deficit = spr_cap - avail_power
if surplus>0:
transfer = np.min((surplus,deficit))
surplus = surplus - transfer
else:
transfer = 0
gen = avail_power + transfer
# if it's summer, operate as RoR with upper limit
elif s >= peak_flow - spr_date and s < peak_flow + sum_date:
if avail_power > sum_cap:
surplus = surplus + (avail_power - sum_cap)
gen = sum_cap
elif avail_power <= sum_cap:
deficit = sum_cap - avail_power
if surplus>0:
transfer = np.min((surplus,deficit))
surplus = surplus - transfer
else:
transfer = 0
gen = avail_power + transfer
# if it's fall, operate as RoR with upper limit
elif s >= peak_flow + sum_date and s < peak_flow + fall_date:
if avail_power > fall_cap:
surplus = surplus + (avail_power - fall_cap)
gen = fall_cap
elif avail_power <= fall_cap:
deficit = fall_cap - avail_power
if surplus>0:
transfer = np.min((surplus,deficit))
surplus = surplus - transfer
else:
transfer = 0
gen = avail_power + transfer
elif s >= peak_flow + fall_date:
if avail_power >= upper:
gen = upper
surplus = surplus + (avail_power - upper)
else:
gen = avail_power
else:
if avail_power >= upper:
gen = upper
surplus = surplus + (avail_power - upper)
else:
gen = avail_power
GEN[day,fd] = gen
if fd < 1:
STOR[year*365+day] = surplus
if year < 1:
A_SCE = GEN
else:
A_SCE = np.vstack((A_SCE,GEN))
if name in SCE_No_Data_Dams:
pass
else:
SCE_name_list = np.append(SCE_name_list,name)
name_index = SCE_dams.index(name)
if name_index < 1:
M_SCE = A_SCE
else:
M_SCE = np.dstack((M_SCE,A_SCE))
for i in range(0,len(SCE_name_list)):
name = SCE_name_list[i]
filename = 'CA_hydropower/' + name + '_hydro.csv'
gen = M_SCE[:,:,i]
df_gen = pd.DataFrame(gen)
df_gen.columns = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_gen.to_csv(filename)
PGE_total=np.sum(M_PGE,axis=2)
SCE_total=np.sum(M_SCE,axis=2)
# more maximum generation constraints
for i in range(0,len(PGE_total)):
for fd in range(0,f_horizon):
PGE_total[i,fd] = np.min((PGE_total[i,fd],851000/7))
SCE_total[i,fd] = np.min((SCE_total[i,fd],153000/7))
# Cut first year and last two years
PGE_total = PGE_total[365:-730]
SCE_total = SCE_total[365:-730]
df_PGE = pd.DataFrame(PGE_total)
df_PGE.columns = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_SCE = pd.DataFrame(SCE_total)
df_SCE.columns = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_PGE.to_csv('CA_hydropower/PGE_valley_hydro.csv')
df_SCE.to_csv('CA_hydropower/SCE_hydro.csv')
# #Forecast analysis
# TG = PGE_total
# differences=np.zeros((len(PGE_total)-f_horizon+1,f_horizon))
# for i in range(0,len(PGE_total)-f_horizon+1):
# differences[i,:] = (TG[i,:] - TG[i:i+f_horizon,0])/1000
#
# month_ID = np.zeros(((sim_years-1)*365,1))
# for i in range(0,sim_years-1):
# month_ID[i*365+0:i*365+31] = 1
# month_ID[i*365+31:i*365+59]=2
# month_ID[i*365+59:i*365+90]=3
# month_ID[i*365+90:i*365+120]=4
# month_ID[i*365+120:i*365+151]=5
# month_ID[i*365+151:i*365+181]=6
# month_ID[i*365+181:i*365+212]=7
# month_ID[i*365+212:i*365+243]=8
# month_ID[i*365+243:i*365+273]=9
# month_ID[i*365+273:i*365+304]=10
# month_ID[i*365+304:i*365+334]=11
# month_ID[i*365+334:i*365+365]=12
#
# month_ID = month_ID[:-6]
#
# combined = np.column_stack((differences,month_ID))
# df_combined = pd.DataFrame(combined)
# df_combined.columns = ['1','2','3','4','5','6','7','Month']
#
# plt.figure()
#
# months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
# for i in range(0,12):
# plt.subplot(4,3,i+1)
#
# month_selection = df_combined.loc[df_combined['Month']==i+1,:]
#
# for j in range(0,len(month_selection)):
#
# plt.plot(month_selection.iloc[j,0:f_horizon])
#
# if i ==6:
# plt.ylabel('Difference (GWh)',fontweight='bold')
# if i == 10:
# plt.xlabel('Forecast Horizon (Days)',fontweight='bold')
# plt.title(months[i],fontweight='bold')
# plt.ylim([-120,120])
# plt.subplots_adjust(wspace=0.6,hspace=1.2)
#
# plt.savefig('PGE_perfect_foresight.png', dpi=2000)
#
#
# #Forecast analysis
# TG = SCE_total
# differences=np.zeros((len(SCE_total)-f_horizon+1,f_horizon))
# for i in range(0,len(SCE_total)-f_horizon+1):
# differences[i,:] = (TG[i,:] - TG[i:i+f_horizon,0])/1000
#
# month_ID = np.zeros(((sim_years-1)*365,1))
# for i in range(0,sim_years-1):
# month_ID[i*365+0:i*365+31] = 1
# month_ID[i*365+31:i*365+59]=2
# month_ID[i*365+59:i*365+90]=3
# month_ID[i*365+90:i*365+120]=4
# month_ID[i*365+120:i*365+151]=5
# month_ID[i*365+151:i*365+181]=6
# month_ID[i*365+181:i*365+212]=7
# month_ID[i*365+212:i*365+243]=8
# month_ID[i*365+243:i*365+273]=9
# month_ID[i*365+273:i*365+304]=10
# month_ID[i*365+304:i*365+334]=11
# month_ID[i*365+334:i*365+365]=12
#
# month_ID = month_ID[:-6]
#
# combined = np.column_stack((differences,month_ID))
# df_combined = pd.DataFrame(combined)
# df_combined.columns = ['1','2','3','4','5','6','7','Month']
#
# plt.figure()
#
# months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
# for i in range(0,12):
# plt.subplot(4,3,i+1)
#
# month_selection = df_combined.loc[df_combined['Month']==i+1,:]
#
# for j in range(0,len(month_selection)):
#
# plt.plot(month_selection.iloc[j,0:f_horizon])
#
# if i ==6:
# plt.ylabel('Difference (GWh)',fontweight='bold')
# if i == 10:
# plt.xlabel('Forecast Horizon (Days)',fontweight='bold')
# plt.title(months[i],fontweight='bold')
# plt.ylim([-25,25])
# plt.subplots_adjust(wspace=0.6,hspace=1.2)
#
# plt.savefig('SCE_perfect_foresight.png', dpi=2000)
return None
| [
"pandas.DataFrame",
"numpy.dstack",
"numpy.sum",
"pandas.read_csv",
"numpy.zeros",
"pandas.read_excel",
"numpy.append",
"numpy.min",
"numpy.max",
"numpy.vstack"
] | [((805, 875), 'pandas.read_excel', 'pd.read_excel', (['"""CA_hydropower/sites.xlsx"""'], {'sheet_name': '"""ORCA"""', 'header': '(0)'}), "('CA_hydropower/sites.xlsx', sheet_name='ORCA', header=0)\n", (818, 875), True, 'import pandas as pd\n'), ((1014, 1065), 'pandas.read_excel', 'pd.read_excel', (['"""CA_hydropower/upper.xlsx"""'], {'header': '(0)'}), "('CA_hydropower/upper.xlsx', header=0)\n", (1027, 1065), True, 'import pandas as pd\n'), ((1111, 1165), 'pandas.read_excel', 'pd.read_excel', (['"""CA_hydropower/calender.xlsx"""'], {'header': '(0)'}), "('CA_hydropower/calender.xlsx', header=0)\n", (1124, 1165), True, 'import pandas as pd\n'), ((1272, 1347), 'pandas.read_csv', 'pd.read_csv', (['"""Synthetic_streamflows/synthetic_streamflows_CA.csv"""'], {'header': '(0)'}), "('Synthetic_streamflows/synthetic_streamflows_CA.csv', header=0)\n", (1283, 1347), True, 'import pandas as pd\n'), ((1465, 1495), 'pandas.read_csv', 'pd.read_csv', (['"""ORCA_output.csv"""'], {}), "('ORCA_output.csv')\n", (1476, 1495), True, 'import pandas as pd\n'), ((2681, 2749), 'pandas.read_excel', 'pd.read_excel', (['"""CA_hydropower/hist_reservoir_inflows.xlsx"""'], {'header': '(0)'}), "('CA_hydropower/hist_reservoir_inflows.xlsx', header=0)\n", (2694, 2749), True, 'import pandas as pd\n'), ((3432, 3501), 'pandas.read_excel', 'pd.read_excel', (['"""CA_hydropower/sites.xlsx"""'], {'sheet_name': '"""PGE"""', 'header': '(0)'}), "('CA_hydropower/sites.xlsx', sheet_name='PGE', header=0)\n", (3445, 3501), True, 'import pandas as pd\n'), ((3866, 3935), 'pandas.read_excel', 'pd.read_excel', (['"""CA_hydropower/sites.xlsx"""'], {'sheet_name': '"""SCE"""', 'header': '(0)'}), "('CA_hydropower/sites.xlsx', sheet_name='SCE', header=0)\n", (3879, 3935), True, 'import pandas as pd\n'), ((28872, 28893), 'numpy.sum', 'np.sum', (['M_PGE'], {'axis': '(2)'}), '(M_PGE, axis=2)\n', (28878, 28893), True, 'import numpy as np\n'), ((28907, 28928), 'numpy.sum', 'np.sum', (['M_SCE'], {'axis': '(2)'}), '(M_SCE, axis=2)\n', (28913, 28928), True, 'import numpy as np\n'), ((29319, 29342), 'pandas.DataFrame', 'pd.DataFrame', (['PGE_total'], {}), '(PGE_total)\n', (29331, 29342), True, 'import pandas as pd\n'), ((29426, 29449), 'pandas.DataFrame', 'pd.DataFrame', (['SCE_total'], {}), '(SCE_total)\n', (29438, 29449), True, 'import pandas as pd\n'), ((2563, 2591), 'numpy.append', 'np.append', (['Sim_totals', 'total'], {}), '(Sim_totals, total)\n', (2572, 2591), True, 'import numpy as np\n'), ((2998, 3027), 'numpy.append', 'np.append', (['Hist_totals', 'total'], {}), '(Hist_totals, total)\n', (3007, 3027), True, 'import numpy as np\n'), ((4238, 4268), 'numpy.zeros', 'np.zeros', (['(365 * sim_years, 1)'], {}), '((365 * sim_years, 1))\n', (4246, 4268), True, 'import numpy as np\n'), ((19588, 19605), 'pandas.DataFrame', 'pd.DataFrame', (['gen'], {}), '(gen)\n', (19600, 19605), True, 'import pandas as pd\n'), ((19809, 19839), 'numpy.zeros', 'np.zeros', (['(365 * sim_years, 1)'], {}), '((365 * sim_years, 1))\n', (19817, 19839), True, 'import numpy as np\n'), ((28734, 28751), 'pandas.DataFrame', 'pd.DataFrame', (['gen'], {}), '(gen)\n', (28746, 28751), True, 'import pandas as pd\n'), ((1975, 2021), 'numpy.append', 'np.append', (['Month', "calender.loc[count, 'Month']"], {}), "(Month, calender.loc[count, 'Month'])\n", (1984, 2021), True, 'import numpy as np\n'), ((2038, 2080), 'numpy.append', 'np.append', (['Day', "calender.loc[count, 'Day']"], {}), "(Day, calender.loc[count, 'Day'])\n", (2047, 2080), True, 'import numpy as np\n'), ((2165, 2211), 'numpy.append', 'np.append', (['Month', "calender.loc[count, 'Month']"], {}), "(Month, calender.loc[count, 'Month'])\n", (2174, 2211), True, 'import numpy as np\n'), ((2228, 2270), 'numpy.append', 'np.append', (['Day', "calender.loc[count, 'Day']"], {}), "(Day, calender.loc[count, 'Day'])\n", (2237, 2270), True, 'import numpy as np\n'), ((2526, 2540), 'numpy.sum', 'np.sum', (['sample'], {}), '(sample)\n', (2532, 2540), True, 'import numpy as np\n'), ((2960, 2974), 'numpy.sum', 'np.sum', (['sample'], {}), '(sample)\n', (2966, 2974), True, 'import numpy as np\n'), ((4348, 4366), 'numpy.zeros', 'np.zeros', (['(365, 7)'], {}), '((365, 7))\n', (4356, 4366), True, 'import numpy as np\n'), ((19191, 19221), 'numpy.append', 'np.append', (['PGE_name_list', 'name'], {}), '(PGE_name_list, name)\n', (19200, 19221), True, 'import numpy as np\n'), ((19917, 19935), 'numpy.zeros', 'np.zeros', (['(365, 7)'], {}), '((365, 7))\n', (19925, 19935), True, 'import numpy as np\n'), ((28337, 28367), 'numpy.append', 'np.append', (['SCE_name_list', 'name'], {}), '(SCE_name_list, name)\n', (28346, 28367), True, 'import numpy as np\n'), ((29081, 29119), 'numpy.min', 'np.min', (['(PGE_total[i, fd], 851000 / 7)'], {}), '((PGE_total[i, fd], 851000 / 7))\n', (29087, 29119), True, 'import numpy as np\n'), ((29146, 29184), 'numpy.min', 'np.min', (['(SCE_total[i, fd], 153000 / 7)'], {}), '((SCE_total[i, fd], 153000 / 7))\n', (29152, 29184), True, 'import numpy as np\n'), ((3315, 3333), 'numpy.min', 'np.min', (['Difference'], {}), '(Difference)\n', (3321, 3333), True, 'import numpy as np\n'), ((19067, 19090), 'numpy.vstack', 'np.vstack', (['(A_PGE, GEN)'], {}), '((A_PGE, GEN))\n', (19076, 19090), True, 'import numpy as np\n'), ((19370, 19395), 'numpy.dstack', 'np.dstack', (['(M_PGE, A_PGE)'], {}), '((M_PGE, A_PGE))\n', (19379, 19395), True, 'import numpy as np\n'), ((20165, 20215), 'pandas.read_csv', 'pd.read_csv', (['File_name'], {'delimiter': '""" """', 'header': 'None'}), "(File_name, delimiter=' ', header=None)\n", (20176, 20215), True, 'import pandas as pd\n'), ((22893, 22916), 'numpy.max', 'np.max', (['annual[105:260]'], {}), '(annual[105:260])\n', (22899, 22916), True, 'import numpy as np\n'), ((28213, 28236), 'numpy.vstack', 'np.vstack', (['(A_SCE, GEN)'], {}), '((A_SCE, GEN))\n', (28222, 28236), True, 'import numpy as np\n'), ((28516, 28541), 'numpy.dstack', 'np.dstack', (['(M_SCE, A_SCE)'], {}), '((M_SCE, A_SCE))\n', (28525, 28541), True, 'import numpy as np\n'), ((4714, 4764), 'pandas.read_csv', 'pd.read_csv', (['File_name'], {'delimiter': '""" """', 'header': 'None'}), "(File_name, delimiter=' ', header=None)\n", (4725, 4764), True, 'import pandas as pd\n'), ((10910, 10960), 'pandas.read_csv', 'pd.read_csv', (['File_name'], {'delimiter': '""" """', 'header': 'None'}), "(File_name, delimiter=' ', header=None)\n", (10921, 10960), True, 'import pandas as pd\n'), ((13692, 13715), 'numpy.max', 'np.max', (['annual[105:260]'], {}), '(annual[105:260])\n', (13698, 13715), True, 'import numpy as np\n'), ((24973, 24999), 'numpy.min', 'np.min', (['(surplus, deficit)'], {}), '((surplus, deficit))\n', (24979, 24999), True, 'import numpy as np\n'), ((15729, 15755), 'numpy.min', 'np.min', (['(surplus, deficit)'], {}), '((surplus, deficit))\n', (15735, 15755), True, 'import numpy as np\n'), ((25882, 25908), 'numpy.min', 'np.min', (['(surplus, deficit)'], {}), '((surplus, deficit))\n', (25888, 25908), True, 'import numpy as np\n'), ((16638, 16664), 'numpy.min', 'np.min', (['(surplus, deficit)'], {}), '((surplus, deficit))\n', (16644, 16664), True, 'import numpy as np\n'), ((26803, 26829), 'numpy.min', 'np.min', (['(surplus, deficit)'], {}), '((surplus, deficit))\n', (26809, 26829), True, 'import numpy as np\n'), ((17559, 17585), 'numpy.min', 'np.min', (['(surplus, deficit)'], {}), '((surplus, deficit))\n', (17565, 17585), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import mdtraj as md
from scattering.van_hove import compute_van_hove
from scattering.utils.io import get_fn
def test_van_hove():
trj = md.load(
get_fn('spce.xtc'),
top=get_fn('spce.gro')
)[:100]
chunk_length = 2
r, t, g_r_t = compute_van_hove(trj, chunk_length=chunk_length)
assert len(t) == 2
assert len(r) == 200
assert np.shape(g_r_t) == (2, 200)
# Check normalization to ~1
assert 0.95 < np.mean(g_r_t[:, 100:]) < 1.05
fig, ax = plt.subplots()
for i in range(len(t)):
ax.plot(r, g_r_t[i], '.-', label=t[i])
ax.set_ylim((0, 3))
ax.legend()
fig.savefig('vhf.pdf')
| [
"numpy.shape",
"scattering.utils.io.get_fn",
"numpy.mean",
"matplotlib.pyplot.subplots",
"scattering.van_hove.compute_van_hove"
] | [((313, 361), 'scattering.van_hove.compute_van_hove', 'compute_van_hove', (['trj'], {'chunk_length': 'chunk_length'}), '(trj, chunk_length=chunk_length)\n', (329, 361), False, 'from scattering.van_hove import compute_van_hove\n'), ((547, 561), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (559, 561), True, 'import matplotlib.pyplot as plt\n'), ((422, 437), 'numpy.shape', 'np.shape', (['g_r_t'], {}), '(g_r_t)\n', (430, 437), True, 'import numpy as np\n'), ((501, 524), 'numpy.mean', 'np.mean', (['g_r_t[:, 100:]'], {}), '(g_r_t[:, 100:])\n', (508, 524), True, 'import numpy as np\n'), ((209, 227), 'scattering.utils.io.get_fn', 'get_fn', (['"""spce.xtc"""'], {}), "('spce.xtc')\n", (215, 227), False, 'from scattering.utils.io import get_fn\n'), ((241, 259), 'scattering.utils.io.get_fn', 'get_fn', (['"""spce.gro"""'], {}), "('spce.gro')\n", (247, 259), False, 'from scattering.utils.io import get_fn\n')] |
import numpy as np
import torch
import warnings
from .communication import MPI
from . import constants
from . import dndarray
from . import factories
from . import stride_tricks
from . import tiling
from . import types
from . import operations
__all__ = [
"concatenate",
"diag",
"diagonal",
"expand_dims",
"flatten",
"flip",
"fliplr",
"flipud",
"hstack",
"reshape",
"resplit",
"shape",
"sort",
"squeeze",
"topk",
"unique",
"vstack",
]
def concatenate(arrays, axis=0):
"""
Join 2 arrays along an existing axis.
Parameters
----------
arrays: tuple of 2 DNDarrays
The arrays must have the same shape, except in the dimension corresponding to axis (the first, by default).
axis: int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res: DNDarray
The concatenated DNDarray
Raises
------
RuntimeError
If the concatted DNDarray meta information, e.g. split or comm, does not match.
TypeError
If the passed parameters are not of correct type (see documentation above).
ValueError
If the number of passed arrays is less than two or their shapes do not match.
Examples
--------
>>> x = ht.zeros((3, 5), split=None)
[0/1] tensor([[0., 0., 0., 0., 0.],
[0/1] [0., 0., 0., 0., 0.],
[0/1] [0., 0., 0., 0., 0.]])
[1/1] tensor([[0., 0., 0., 0., 0.],
[1/1] [0., 0., 0., 0., 0.],
[1/1] [0., 0., 0., 0., 0.]])
>>> y = ht.ones((3, 6), split=0)
[0/1] tensor([[1., 1., 1., 1., 1., 1.],
[0/1] [1., 1., 1., 1., 1., 1.]])
[1/1] tensor([[1., 1., 1., 1., 1., 1.]])
>>> ht.concatenate((x, y), axis=1)
[0/1] tensor([[0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.],
[0/1] [0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.]])
[1/1] tensor([[0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.]])
>>> x = ht.zeros((4, 5), split=1)
[0/1] tensor([[0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.]])
[1/1] tensor([[0., 0.],
[1/1] [0., 0.],
[1/1] [0., 0.],
[1/1] [0., 0.]])
>>> y = ht.ones((3, 5), split=1)
[0/1] tensor([[1., 1., 1.],
[0/1] [1., 1., 1.],
[0/1] [1., 1., 1.]])
[1/1] tensor([[1., 1.],
[1/1] [1., 1.],
[1/1] [1., 1.]])
>>> ht.concatenate((x, y), axis=0)
[0/1] tensor([[0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [0., 0., 0.],
[0/1] [1., 1., 1.],
[0/1] [1., 1., 1.],
[0/1] [1., 1., 1.]])
[1/1] tensor([[0., 0.],
[1/1] [0., 0.],
[1/1] [0., 0.],
[1/1] [0., 0.],
[1/1] [1., 1.],
[1/1] [1., 1.],
[1/1] [1., 1.]])
"""
if not isinstance(arrays, (tuple, list)):
raise TypeError("arrays must be a list or a tuple")
# a single array cannot be concatenated
if len(arrays) < 2:
raise ValueError("concatenate requires 2 arrays")
# concatenate multiple arrays
elif len(arrays) > 2:
res = concatenate((arrays[0], arrays[1]), axis=axis)
for a in range(2, len(arrays)):
res = concatenate((res, arrays[a]), axis=axis)
return res
# unpack the arrays
arr0, arr1 = arrays[0], arrays[1]
# input sanitation
if not isinstance(arr0, dndarray.DNDarray) or not isinstance(arr1, dndarray.DNDarray):
raise TypeError("Both arrays must be DNDarrays")
if not isinstance(axis, int):
raise TypeError("axis must be an integer, currently: {}".format(type(axis)))
axis = stride_tricks.sanitize_axis(arr0.gshape, axis)
if arr0.ndim != arr1.ndim:
raise ValueError("DNDarrays must have the same number of dimensions")
if not all([arr0.gshape[i] == arr1.gshape[i] for i in range(len(arr0.gshape)) if i != axis]):
raise ValueError(
"Arrays cannot be concatenated, shapes must be the same in every axis "
"except the selected axis: {}, {}".format(arr0.gshape, arr1.gshape)
)
# different communicators may not be concatenated
if arr0.comm != arr1.comm:
raise RuntimeError("Communicators of passed arrays mismatch.")
# identify common data type
out_dtype = types.promote_types(arr0.dtype, arr1.dtype)
if arr0.dtype != out_dtype:
arr0 = out_dtype(arr0, device=arr0.device)
if arr1.dtype != out_dtype:
arr1 = out_dtype(arr1, device=arr1.device)
s0, s1 = arr0.split, arr1.split
# no splits, local concat
if s0 is None and s1 is None:
return factories.array(
torch.cat((arr0._DNDarray__array, arr1._DNDarray__array), dim=axis),
device=arr0.device,
comm=arr0.comm,
)
# non-matching splits when both arrays are split
elif s0 != s1 and all([s is not None for s in [s0, s1]]):
raise RuntimeError(
"DNDarrays given have differing split axes, arr0 {} arr1 {}".format(s0, s1)
)
# unsplit and split array
elif (s0 is None and s1 != axis) or (s1 is None and s0 != axis):
out_shape = tuple(
arr1.gshape[x] if x != axis else arr0.gshape[x] + arr1.gshape[x]
for x in range(len(arr1.gshape))
)
out = factories.empty(
out_shape, split=s1 if s1 is not None else s0, device=arr1.device, comm=arr0.comm
)
_, _, arr0_slice = arr1.comm.chunk(arr0.shape, arr1.split)
_, _, arr1_slice = arr0.comm.chunk(arr1.shape, arr0.split)
out._DNDarray__array = torch.cat(
(arr0._DNDarray__array[arr0_slice], arr1._DNDarray__array[arr1_slice]), dim=axis
)
out._DNDarray__comm = arr0.comm
return out
elif s0 == s1 or any([s is None for s in [s0, s1]]):
if s0 != axis and all([s is not None for s in [s0, s1]]):
# the axis is different than the split axis, this case can be easily implemented
# torch cat arrays together and return a new array that is_split
out_shape = tuple(
arr1.gshape[x] if x != axis else arr0.gshape[x] + arr1.gshape[x]
for x in range(len(arr1.gshape))
)
out = factories.empty(out_shape, split=s0, dtype=out_dtype, device=arr0.device)
out._DNDarray__array = torch.cat(
(arr0._DNDarray__array, arr1._DNDarray__array), dim=axis
)
out._DNDarray__comm = arr0.comm
return out
else:
arr0 = arr0.copy()
arr1 = arr1.copy()
# maps are created for where the data is and the output shape is calculated
lshape_map = torch.zeros((2, arr0.comm.size, len(arr0.gshape)), dtype=torch.int)
lshape_map[0, arr0.comm.rank, :] = torch.Tensor(arr0.lshape)
lshape_map[1, arr0.comm.rank, :] = torch.Tensor(arr1.lshape)
lshape_map_comm = arr0.comm.Iallreduce(MPI.IN_PLACE, lshape_map, MPI.SUM)
arr0_shape, arr1_shape = list(arr0.shape), list(arr1.shape)
arr0_shape[axis] += arr1_shape[axis]
out_shape = tuple(arr0_shape)
# the chunk map is used for determine how much data should be on each process
chunk_map = torch.zeros((arr0.comm.size, len(arr0.gshape)), dtype=torch.int)
_, _, chk = arr0.comm.chunk(out_shape, s0 if s0 is not None else s1)
for i in range(len(out_shape)):
chunk_map[arr0.comm.rank, i] = chk[i].stop - chk[i].start
chunk_map_comm = arr0.comm.Iallreduce(MPI.IN_PLACE, chunk_map, MPI.SUM)
lshape_map_comm.wait()
chunk_map_comm.wait()
if s0 is not None:
send_slice = [slice(None)] * arr0.ndim
keep_slice = [slice(None)] * arr0.ndim
# data is first front-loaded onto the first size/2 processes
for spr in range(1, arr0.comm.size):
if arr0.comm.rank == spr:
for pr in range(spr):
send_amt = abs((chunk_map[pr, axis] - lshape_map[0, pr, axis]).item())
send_amt = (
send_amt if send_amt < arr0.lshape[axis] else arr0.lshape[axis]
)
if send_amt:
send_slice[arr0.split] = slice(0, send_amt)
keep_slice[arr0.split] = slice(send_amt, arr0.lshape[axis])
send = arr0.comm.Isend(
arr0.lloc[send_slice].clone(),
dest=pr,
tag=pr + arr0.comm.size + spr,
)
arr0._DNDarray__array = arr0.lloc[keep_slice].clone()
send.wait()
for pr in range(spr):
snt = abs((chunk_map[pr, s0] - lshape_map[0, pr, s0]).item())
snt = (
snt
if snt < lshape_map[0, spr, axis]
else lshape_map[0, spr, axis].item()
)
if arr0.comm.rank == pr and snt:
shp = list(arr0.gshape)
shp[arr0.split] = snt
data = torch.zeros(
shp, dtype=out_dtype.torch_type(), device=arr0.device.torch_device
)
arr0.comm.Recv(data, source=spr, tag=pr + arr0.comm.size + spr)
arr0._DNDarray__array = torch.cat(
(arr0._DNDarray__array, data), dim=arr0.split
)
lshape_map[0, pr, arr0.split] += snt
lshape_map[0, spr, arr0.split] -= snt
if s1 is not None:
send_slice = [slice(None)] * arr0.ndim
keep_slice = [slice(None)] * arr0.ndim
# push the data backwards (arr1), making the data the proper size for arr1 on the last nodes
# the data is "compressed" on np/2 processes. data is sent from
for spr in range(arr0.comm.size - 1, -1, -1):
if arr0.comm.rank == spr:
for pr in range(arr0.comm.size - 1, spr, -1):
# calculate the amount of data to send from the chunk map
send_amt = abs((chunk_map[pr, axis] - lshape_map[1, pr, axis]).item())
send_amt = (
send_amt if send_amt < arr1.lshape[axis] else arr1.lshape[axis]
)
if send_amt:
send_slice[axis] = slice(
arr1.lshape[axis] - send_amt, arr1.lshape[axis]
)
keep_slice[axis] = slice(0, arr1.lshape[axis] - send_amt)
send = arr1.comm.Isend(
arr1.lloc[send_slice].clone(),
dest=pr,
tag=pr + arr1.comm.size + spr,
)
arr1._DNDarray__array = arr1.lloc[keep_slice].clone()
send.wait()
for pr in range(arr1.comm.size - 1, spr, -1):
snt = abs((chunk_map[pr, axis] - lshape_map[1, pr, axis]).item())
snt = (
snt
if snt < lshape_map[1, spr, axis]
else lshape_map[1, spr, axis].item()
)
if arr1.comm.rank == pr and snt:
shp = list(arr1.gshape)
shp[axis] = snt
data = torch.zeros(
shp, dtype=out_dtype.torch_type(), device=arr1.device.torch_device
)
arr1.comm.Recv(data, source=spr, tag=pr + arr1.comm.size + spr)
arr1._DNDarray__array = torch.cat(
(data, arr1._DNDarray__array), dim=axis
)
lshape_map[1, pr, axis] += snt
lshape_map[1, spr, axis] -= snt
if s0 is None:
arb_slice = [None] * len(arr1.shape)
for c in range(len(chunk_map)):
arb_slice[axis] = c
# the chunk map is adjusted by subtracting what data is already in the correct place (the data from
# arr1 is already correctly placed) i.e. the chunk map shows how much data is still needed on each
# process, the local
chunk_map[arb_slice] -= lshape_map[tuple([1] + arb_slice)]
# after adjusting arr1 need to now select the target data in arr0 on each node with a local slice
if arr0.comm.rank == 0:
lcl_slice = [slice(None)] * arr0.ndim
lcl_slice[axis] = slice(chunk_map[0, axis].item())
arr0._DNDarray__array = arr0._DNDarray__array[lcl_slice].clone().squeeze()
ttl = chunk_map[0, axis].item()
for en in range(1, arr0.comm.size):
sz = chunk_map[en, axis]
if arr0.comm.rank == en:
lcl_slice = [slice(None)] * arr0.ndim
lcl_slice[axis] = slice(ttl, sz.item() + ttl, 1)
arr0._DNDarray__array = arr0._DNDarray__array[lcl_slice].clone().squeeze()
ttl += sz.item()
if len(arr0.lshape) < len(arr1.lshape):
arr0._DNDarray__array.unsqueeze_(axis)
if s1 is None:
arb_slice = [None] * len(arr0.shape)
for c in range(len(chunk_map)):
arb_slice[axis] = c
chunk_map[arb_slice] -= lshape_map[tuple([0] + arb_slice)]
# get the desired data in arr1 on each node with a local slice
if arr1.comm.rank == arr1.comm.size - 1:
lcl_slice = [slice(None)] * arr1.ndim
lcl_slice[axis] = slice(
arr1.lshape[axis] - chunk_map[-1, axis].item(), arr1.lshape[axis], 1
)
arr1._DNDarray__array = arr1._DNDarray__array[lcl_slice].clone().squeeze()
ttl = chunk_map[-1, axis].item()
for en in range(arr1.comm.size - 2, -1, -1):
sz = chunk_map[en, axis]
if arr1.comm.rank == en:
lcl_slice = [slice(None)] * arr1.ndim
lcl_slice[axis] = slice(
arr1.lshape[axis] - (sz.item() + ttl), arr1.lshape[axis] - ttl, 1
)
arr1._DNDarray__array = arr1._DNDarray__array[lcl_slice].clone().squeeze()
ttl += sz.item()
if len(arr1.lshape) < len(arr0.lshape):
arr1._DNDarray__array.unsqueeze_(axis)
# now that the data is in the proper shape, need to concatenate them on the nodes where they both exist for
# the others, just set them equal
out = factories.empty(
out_shape,
split=s0 if s0 is not None else s1,
dtype=out_dtype,
device=arr0.device,
comm=arr0.comm,
)
res = torch.cat((arr0._DNDarray__array, arr1._DNDarray__array), dim=axis)
out._DNDarray__array = res
return out
def diag(a, offset=0):
"""
Extract a diagonal or construct a diagonal array.
See the documentation for `heat.diagonal` for more information about extracting the diagonal.
Parameters
----------
a: ht.DNDarray
The array holding data for creating a diagonal array or extracting a diagonal.
If a is a 1-dimensional array a diagonal 2d-array will be returned.
If a is a n-dimensional array with n > 1 the diagonal entries will be returned in an n-1 dimensional array.
offset: int, optional
The offset from the main diagonal.
Offset greater than zero means above the main diagonal, smaller than zero is below the main diagonal.
Returns
-------
res: ht.DNDarray
The extracted diagonal or the constructed diagonal array
Examples
--------
>>> import heat as ht
>>> a = ht.array([1, 2])
>>> ht.diag(a)
tensor([[1, 0],
[0, 2]])
>>> ht.diag(a, offset=1)
tensor([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
>>> ht.equal(ht.diag(ht.diag(a)), a)
True
>>> a = ht.array([[1, 2], [3, 4]])
>>> ht.diag(a)
tensor([1, 4])
"""
if len(a.shape) > 1:
return diagonal(a, offset=offset)
elif len(a.shape) < 1:
raise ValueError("input array must be of dimension 1 or greater")
if not isinstance(offset, int):
raise ValueError("offset must be an integer, got", type(offset))
if not isinstance(a, dndarray.DNDarray):
raise ValueError("a must be a DNDarray, got", type(a))
# 1-dimensional array, must be extended to a square diagonal matrix
gshape = (a.shape[0] + abs(offset),) * 2
off, lshape, _ = a.comm.chunk(gshape, a.split)
# This ensures that the data is on the correct nodes
if offset > 0:
padding = factories.empty(
(offset,), dtype=a.dtype, split=None, device=a.device, comm=a.comm
)
a = concatenate((a, padding))
indices_x = torch.arange(0, min(lshape[0], max(gshape[0] - off - offset, 0)))
elif offset < 0:
padding = factories.empty(
(abs(offset),), dtype=a.dtype, split=None, device=a.device, comm=a.comm
)
a = concatenate((padding, a))
indices_x = torch.arange(max(0, min(abs(offset) - off, lshape[0])), lshape[0])
else:
# Offset = 0 values on main diagonal
indices_x = torch.arange(0, lshape[0])
indices_y = indices_x + off + offset
a.balance_()
local = torch.zeros(lshape, dtype=a.dtype.torch_type(), device=a.device.torch_device)
local[indices_x, indices_y] = a._DNDarray__array[indices_x]
return factories.array(local, dtype=a.dtype, is_split=a.split, device=a.device, comm=a.comm)
def diagonal(a, offset=0, dim1=0, dim2=1):
"""
Extract a diagonal of an n-dimensional array with n > 1.
The returned array will be of dimension n-1.
Parameters
----------
a: ht.DNDarray
The array of which the diagonal should be extracted.
offset: int, optional
The offset from the main diagonal.
Offset greater than zero means above the main diagonal, smaller than zero is below the main diagonal.
Default is 0 which means the main diagonal will be selected.
dim1: int, optional
First dimension with respect to which to take the diagonal.
Default is 0.
dim2: int, optional
Second dimension with respect to which to take the diagonal.
Default is 1.
Returns
-------
res: ht.DNDarray
An array holding the extracted diagonal.
Examples
--------
>>> import heat as ht
>>> a = ht.array([[1, 2], [3, 4]])
>>> ht.diagonal(a)
tensor([1, 4])
>>> ht.diagonal(a, offset=1)
tensor([2])
>>> ht.diagonal(a, offset=-1)
tensor([3])
>>> a = ht.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
>>> ht.diagonal(a)
tensor([[0, 6],
[1, 7]])
>>> ht.diagonal(a, dim2=2)
tensor([[0, 5],
[2, 7]])
"""
dim1, dim2 = stride_tricks.sanitize_axis(a.shape, (dim1, dim2))
if dim1 == dim2:
raise ValueError("Dim1 and dim2 need to be different")
if not isinstance(a, dndarray.DNDarray):
raise ValueError("a must be a DNDarray, got", type(a))
if not isinstance(offset, int):
raise ValueError("offset must be an integer, got", type(offset))
shape = a.gshape
ax1 = shape[dim1]
ax2 = shape[dim2]
# determine the number of diagonal elements that will be retrieved
length = min(ax1, ax2 - offset) if offset >= 0 else min(ax2, ax1 + offset)
# Remove dim1 and dim2 from shape and append resulting length
shape = tuple([x for ind, x in enumerate(shape) if ind not in (dim1, dim2)]) + (length,)
x, y = min(dim1, dim2), max(dim1, dim2)
if a.split is None:
split = None
elif a.split < x < y:
split = a.split
elif x < a.split < y:
split = a.split - 1
elif x < y < a.split:
split = a.split - 2
else:
split = len(shape) - 1
if a.split is None or a.split not in (dim1, dim2):
result = torch.diagonal(a._DNDarray__array, offset=offset, dim1=dim1, dim2=dim2)
else:
vz = 1 if a.split == dim1 else -1
off, _, _ = a.comm.chunk(a.shape, a.split)
result = torch.diagonal(a._DNDarray__array, offset=offset + vz * off, dim1=dim1, dim2=dim2)
return factories.array(result, dtype=a.dtype, is_split=split, device=a.device, comm=a.comm)
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis that will appear at the axis position in the expanded array shape.
Parameters
----------
a : ht.DNDarray
Input array to be expanded.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ht.DNDarray
Output array. The number of dimensions is one greater than that of the input array.
Raises
------
ValueError
If the axis is not in range of the axes.
Examples
--------
>>> x = ht.array([1,2])
>>> x.shape
(2,)
>>> y = ht.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = ht.expand_dims(x, axis=1)
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
"""
# ensure type consistency
if not isinstance(a, dndarray.DNDarray):
raise TypeError("expected ht.DNDarray, but was {}".format(type(a)))
# sanitize axis, introduce arbitrary dummy dimension to model expansion
axis = stride_tricks.sanitize_axis(a.shape + (1,), axis)
return dndarray.DNDarray(
a._DNDarray__array.unsqueeze(dim=axis),
a.shape[:axis] + (1,) + a.shape[axis:],
a.dtype,
a.split if a.split is None or a.split < axis else a.split + 1,
a.device,
a.comm,
)
def flatten(a):
"""
Flattens an array into one dimension.
WARNING: if a.split > 0, then the array must be resplit.
Parameters
----------
a : DNDarray
array to collapse
Returns
-------
ret : DNDarray
flattened copy
Examples
--------
>>> a = ht.array([[[1,2],[3,4]],[[5,6],[7,8]]])
>>> ht.flatten(a)
tensor([1,2,3,4,5,6,7,8])
"""
if a.split is None:
return factories.array(
torch.flatten(a._DNDarray__array),
dtype=a.dtype,
is_split=None,
device=a.device,
comm=a.comm,
)
if a.split > 0:
a = resplit(a, 0)
a = factories.array(
torch.flatten(a._DNDarray__array),
dtype=a.dtype,
is_split=a.split,
device=a.device,
comm=a.comm,
)
a.balance_()
return a
def flip(a, axis=None):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
a: ht.DNDarray
Input array to be flipped
axis: int, tuple
A list of axes to be flipped
Returns
-------
res: ht.DNDarray
The flipped array.
Examples
--------
>>> a = ht.array([[0,1],[2,3]])
>>> ht.flip(a, [0])
tensor([[2, 3],
[0, 1]])
>>> b = ht.array([[0,1,2],[3,4,5]], split=1)
>>> ht.flip(a, [0,1])
(1/2) tensor([5,4,3])
(2/2) tensor([2,1,0])
"""
# flip all dimensions
if axis is None:
axis = tuple(range(a.ndim))
# torch.flip only accepts tuples
if isinstance(axis, int):
axis = [axis]
flipped = torch.flip(a._DNDarray__array, axis)
if a.split not in axis:
return factories.array(
flipped, dtype=a.dtype, is_split=a.split, device=a.device, comm=a.comm
)
# Need to redistribute tensors on split axis
# Get local shapes
old_lshape = a.lshape
dest_proc = a.comm.size - 1 - a.comm.rank
new_lshape = a.comm.sendrecv(old_lshape, dest=dest_proc, source=dest_proc)
# Exchange local tensors
req = a.comm.Isend(flipped, dest=dest_proc)
received = torch.empty(new_lshape, dtype=a._DNDarray__array.dtype, device=a.device.torch_device)
a.comm.Recv(received, source=dest_proc)
res = factories.array(received, dtype=a.dtype, is_split=a.split, device=a.device, comm=a.comm)
res.balance_() # after swapping, first processes may be empty
req.Wait()
return res
def fliplr(a):
"""
Flip array in the left/right direction. If a.ndim > 2, flip along dimension 1.
Parameters
----------
a: ht.DNDarray
Input array to be flipped, must be at least 2-D
Returns
-------
res: ht.DNDarray
The flipped array.
Examples
--------
>>> a = ht.array([[0,1],[2,3]])
>>> ht.fliplr(a)
tensor([[1, 0],
[3, 2]])
>>> b = ht.array([[0,1,2],[3,4,5]], split=0)
>>> ht.fliplr(b)
(1/2) tensor([[2, 1, 0]])
(2/2) tensor([[5, 4, 3]])
"""
return flip(a, 1)
def flipud(a):
"""
Flip array in the up/down direction.
Parameters
----------
a: ht.DNDarray
Input array to be flipped
Returns
-------
res: ht.DNDarray
The flipped array.
Examples
--------
>>> a = ht.array([[0,1],[2,3]])
>>> ht.flipud(a)
tensor([[2, 3],
[0, 1]])
>>> b = ht.array([[0,1,2],[3,4,5]], split=0)
>>> ht.flipud(b)
(1/2) tensor([3,4,5])
(2/2) tensor([0,1,2])
"""
return flip(a, 0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
Parameters
----------
tup : sequence of DNDarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : DNDarray
The array formed by stacking the given arrays.
Examples
--------
>>> a = ht.array((1,2,3))
>>> b = ht.array((2,3,4))
>>> ht.hstack((a,b))
[0] tensor([1, 2, 3, 2, 3, 4])
[1] tensor([1, 2, 3, 2, 3, 4])
>>> a = ht.array((1,2,3), split=0)
>>> b = ht.array((2,3,4), split=0)
>>> ht.hstack((a,b))
[0] tensor([1, 2, 3])
[1] tensor([2, 3, 4])
>>> a = ht.array([[1],[2],[3]], split=0)
>>> b = ht.array([[2],[3],[4]], split=0)
>>> ht.hstack((a,b))
[0] tensor([[1, 2],
[0] [2, 3]])
[1] tensor([[3, 4]])
"""
tup = list(tup)
axis = 1
all_vec = False
if len(tup) == 2 and all(len(x.gshape) == 1 for x in tup):
axis = 0
all_vec = True
if not all_vec:
for cn, arr in enumerate(tup):
if len(arr.gshape) == 1:
tup[cn] = arr.expand_dims(1)
return concatenate(tup, axis=axis)
def reshape(a, shape, axis=None):
"""
Returns a tensor with the same data and number of elements as a, but with the specified shape.
Parameters
----------
a : ht.DNDarray
The input tensor
shape : tuple, list
Shape of the new tensor
axis : int, optional
The new split axis. None denotes same axis
Default : None
Returns
-------
reshaped : ht.DNDarray
The DNDarray with the specified shape
Raises
------
ValueError
If the number of elements changes in the new shape.
Examples
--------
>>> a = ht.zeros((3,4))
>>> ht.reshape(a, (4,3))
tensor([[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0]])
>>> a = ht.linspace(0, 14, 8, split=0)
>>> ht.reshape(a, (2,4))
(1/2) tensor([[0., 2., 4., 6.]])
(2/2) tensor([[ 8., 10., 12., 14.]])
"""
if not isinstance(a, dndarray.DNDarray):
raise TypeError("'a' must be a DNDarray, currently {}".format(type(a)))
if not isinstance(shape, (list, tuple)):
raise TypeError("shape must be list, tuple, currently {}".format(type(shape)))
# check axis parameter
if axis is None:
axis = a.split
stride_tricks.sanitize_axis(shape, axis)
tdtype, tdevice = a.dtype.torch_type(), a.device.torch_device
# Check the type of shape and number elements
shape = stride_tricks.sanitize_shape(shape)
if torch.prod(torch.tensor(shape, device=tdevice)) != a.size:
raise ValueError("cannot reshape array of size {} into shape {}".format(a.size, shape))
def reshape_argsort_counts_displs(
shape1, lshape1, displs1, axis1, shape2, displs2, axis2, comm
):
"""
Compute the send order, counts, and displacements.
"""
shape1 = torch.tensor(shape1, dtype=tdtype, device=tdevice)
lshape1 = torch.tensor(lshape1, dtype=tdtype, device=tdevice)
shape2 = torch.tensor(shape2, dtype=tdtype, device=tdevice)
# constants
width = torch.prod(lshape1[axis1:], dtype=torch.int)
height = torch.prod(lshape1[:axis1], dtype=torch.int)
global_len = torch.prod(shape1[axis1:])
ulen = torch.prod(shape2[axis2 + 1 :])
gindex = displs1[comm.rank] * torch.prod(shape1[axis1 + 1 :])
# Get axis position on new split axis
mask = torch.arange(width, device=tdevice) + gindex
mask = mask + torch.arange(height, device=tdevice).reshape([height, 1]) * global_len
mask = (torch.floor_divide(mask, ulen)) % shape2[axis2]
mask = mask.flatten()
# Compute return values
counts = torch.zeros(comm.size, dtype=torch.int, device=tdevice)
displs = torch.zeros_like(counts)
argsort = torch.empty_like(mask, dtype=torch.long)
plz = 0
for i in range(len(displs2) - 1):
mat = torch.where((mask >= displs2[i]) & (mask < displs2[i + 1]))[0]
counts[i] = mat.numel()
argsort[plz : counts[i] + plz] = mat
plz += counts[i]
displs[1:] = torch.cumsum(counts[:-1], dim=0)
return argsort, counts, displs
# Forward to Pytorch directly
if a.split is None:
return factories.array(
torch.reshape(a._DNDarray__array, shape), dtype=a.dtype, device=a.device, comm=a.comm
)
# Create new flat result tensor
_, local_shape, _ = a.comm.chunk(shape, axis)
data = torch.empty(local_shape, dtype=tdtype, device=tdevice).flatten()
# Calculate the counts and displacements
_, old_displs, _ = a.comm.counts_displs_shape(a.shape, a.split)
_, new_displs, _ = a.comm.counts_displs_shape(shape, axis)
old_displs += (a.shape[a.split],)
new_displs += (shape[axis],)
sendsort, sendcounts, senddispls = reshape_argsort_counts_displs(
a.shape, a.lshape, old_displs, a.split, shape, new_displs, axis, a.comm
)
recvsort, recvcounts, recvdispls = reshape_argsort_counts_displs(
shape, local_shape, new_displs, axis, a.shape, old_displs, a.split, a.comm
)
# rearange order
send = a._DNDarray__array.flatten()[sendsort]
a.comm.Alltoallv((send, sendcounts, senddispls), (data, recvcounts, recvdispls))
# original order
backsort = torch.argsort(recvsort)
data = data[backsort]
# Reshape local tensor
data = data.reshape(local_shape)
return factories.array(data, dtype=a.dtype, is_split=axis, device=a.device, comm=a.comm)
def shape(a):
"""
Returns the shape of a DNDarray `a`.
Parameters
----------
a : DNDarray
Returns
-------
tuple of ints
"""
# sanitize input
if not isinstance(a, dndarray.DNDarray):
raise TypeError("Expected a to be a DNDarray but was {}".format(type(a)))
return a.gshape
def sort(a, axis=None, descending=False, out=None):
"""
Sorts the elements of the DNDarray a along the given dimension (by default in ascending order) by their value.
The sorting is not stable which means that equal elements in the result may have a different ordering than in the
original array.
Sorting where `axis == a.split` needs a lot of communication between the processes of MPI.
Parameters
----------
a : ht.DNDarray
Input array to be sorted.
axis : int, optional
The dimension to sort along.
Default is the last axis.
descending : bool, optional
If set to true values are sorted in descending order
Default is false
out : ht.DNDarray or None, optional
A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
or set to None, a fresh tensor is allocated.
Returns
-------
values : ht.DNDarray
The sorted local results.
indices
The indices of the elements in the original data
Raises
------
ValueError
If the axis is not in range of the axes.
Examples
--------
>>> x = ht.array([[4, 1], [2, 3]], split=0)
>>> x.shape
(1, 2)
(1, 2)
>>> y = ht.sort(x, axis=0)
>>> y
(array([[2, 1]], array([[1, 0]]))
(array([[4, 3]], array([[0, 1]]))
>>> ht.sort(x, descending=True)
(array([[4, 1]], array([[0, 1]]))
(array([[3, 2]], array([[1, 0]]))
"""
# default: using last axis
if axis is None:
axis = len(a.shape) - 1
stride_tricks.sanitize_axis(a.shape, axis)
if a.split is None or axis != a.split:
# sorting is not affected by split -> we can just sort along the axis
final_result, final_indices = torch.sort(
a._DNDarray__array, dim=axis, descending=descending
)
else:
# sorting is affected by split, processes need to communicate results
# transpose so we can work along the 0 axis
transposed = a._DNDarray__array.transpose(axis, 0)
local_sorted, local_indices = torch.sort(transposed, dim=0, descending=descending)
size = a.comm.Get_size()
rank = a.comm.Get_rank()
counts, disp, _ = a.comm.counts_displs_shape(a.gshape, axis=axis)
actual_indices = local_indices.to(dtype=local_sorted.dtype) + disp[rank]
length = local_sorted.size()[0]
# Separate the sorted tensor into size + 1 equal length partitions
partitions = [x * length // (size + 1) for x in range(1, size + 1)]
local_pivots = (
local_sorted[partitions]
if counts[rank]
else torch.empty((0,) + local_sorted.size()[1:], dtype=local_sorted.dtype)
)
# Only processes with elements should share their pivots
gather_counts = [int(x > 0) * size for x in counts]
gather_displs = (0,) + tuple(np.cumsum(gather_counts[:-1]))
pivot_dim = list(transposed.size())
pivot_dim[0] = size * sum([1 for x in counts if x > 0])
# share the local pivots with root process
pivot_buffer = torch.empty(
pivot_dim, dtype=a.dtype.torch_type(), device=a.device.torch_device
)
a.comm.Gatherv(local_pivots, (pivot_buffer, gather_counts, gather_displs), root=0)
pivot_dim[0] = size - 1
global_pivots = torch.empty(
pivot_dim, dtype=a.dtype.torch_type(), device=a.device.torch_device
)
# root process creates new pivots and shares them with other processes
if rank == 0:
sorted_pivots, _ = torch.sort(pivot_buffer, descending=descending, dim=0)
length = sorted_pivots.size()[0]
global_partitions = [x * length // size for x in range(1, size)]
global_pivots = sorted_pivots[global_partitions]
a.comm.Bcast(global_pivots, root=0)
lt_partitions = torch.empty((size,) + local_sorted.shape, dtype=torch.int64)
last = torch.zeros_like(local_sorted, dtype=torch.int64)
comp_op = torch.gt if descending else torch.lt
# Iterate over all pivots and store which pivot is the first greater than the elements value
for idx, p in enumerate(global_pivots):
lt = comp_op(local_sorted, p).int()
if idx > 0:
lt_partitions[idx] = lt - last
else:
lt_partitions[idx] = lt
last = lt
lt_partitions[size - 1] = torch.ones_like(local_sorted, dtype=last.dtype) - last
# Matrix holding information how many values will be sent where
local_partitions = torch.sum(lt_partitions, dim=1)
partition_matrix = torch.empty_like(local_partitions)
a.comm.Allreduce(local_partitions, partition_matrix, op=MPI.SUM)
# Matrix that holds information which value will be shipped where
index_matrix = torch.empty_like(local_sorted, dtype=torch.int64)
# Matrix holding information which process get how many values from where
shape = (size,) + transposed.size()[1:]
send_matrix = torch.zeros(shape, dtype=partition_matrix.dtype)
recv_matrix = torch.zeros(shape, dtype=partition_matrix.dtype)
for i, x in enumerate(lt_partitions):
index_matrix[x > 0] = i
send_matrix[i] += torch.sum(x, dim=0)
a.comm.Alltoall(send_matrix, recv_matrix)
scounts = local_partitions
rcounts = recv_matrix
shape = (partition_matrix[rank].max(),) + transposed.size()[1:]
first_result = torch.empty(shape, dtype=local_sorted.dtype)
first_indices = torch.empty_like(first_result)
# Iterate through one layer and send values with alltoallv
for idx in np.ndindex(local_sorted.shape[1:]):
idx_slice = [slice(None)] + [slice(ind, ind + 1) for ind in idx]
send_count = scounts[idx_slice].reshape(-1).tolist()
send_disp = [0] + list(np.cumsum(send_count[:-1]))
s_val = local_sorted[idx_slice].clone()
s_ind = actual_indices[idx_slice].clone().to(dtype=local_sorted.dtype)
recv_count = rcounts[idx_slice].reshape(-1).tolist()
recv_disp = [0] + list(np.cumsum(recv_count[:-1]))
rcv_length = rcounts[idx_slice].sum().item()
r_val = torch.empty((rcv_length,) + s_val.shape[1:], dtype=local_sorted.dtype)
r_ind = torch.empty_like(r_val)
a.comm.Alltoallv((s_val, send_count, send_disp), (r_val, recv_count, recv_disp))
a.comm.Alltoallv((s_ind, send_count, send_disp), (r_ind, recv_count, recv_disp))
first_result[idx_slice][:rcv_length] = r_val
first_indices[idx_slice][:rcv_length] = r_ind
# The process might not have the correct number of values therefore the tensors need to be rebalanced
send_vec = torch.zeros(local_sorted.shape[1:] + (size, size), dtype=torch.int64)
target_cumsum = np.cumsum(counts)
for idx in np.ndindex(local_sorted.shape[1:]):
idx_slice = [slice(None)] + [slice(ind, ind + 1) for ind in idx]
current_counts = partition_matrix[idx_slice].reshape(-1).tolist()
current_cumsum = list(np.cumsum(current_counts))
for proc in range(size):
if current_cumsum[proc] > target_cumsum[proc]:
# process has to many values which will be sent to higher ranks
first = next(i for i in range(size) if send_vec[idx][:, i].sum() < counts[i])
last = next(
i
for i in range(size + 1)
if i == size or current_cumsum[proc] < target_cumsum[i]
)
sent = 0
for i, x in enumerate(counts[first:last]):
# Each following process gets as many elements as it needs
amount = int(x - send_vec[idx][:, first + i].sum())
send_vec[idx][proc][first + i] = amount
current_counts[first + i] += amount
sent += amount
if last < size:
# Send all left over values to the highest last process
amount = partition_matrix[proc][idx]
send_vec[idx][proc][last] = int(amount - sent)
current_counts[last] += int(amount - sent)
elif current_cumsum[proc] < target_cumsum[proc]:
# process needs values from higher rank
first = (
0
if proc == 0
else next(
i for i, x in enumerate(current_cumsum) if target_cumsum[proc - 1] < x
)
)
last = next(i for i, x in enumerate(current_cumsum) if target_cumsum[proc] <= x)
for i, x in enumerate(partition_matrix[idx_slice][first:last]):
# Taking as many elements as possible from each following process
send_vec[idx][first + i][proc] = int(x - send_vec[idx][first + i].sum())
current_counts[first + i] = 0
# Taking just enough elements from the last element to fill the current processes tensor
send_vec[idx][last][proc] = int(target_cumsum[proc] - current_cumsum[last - 1])
current_counts[last] -= int(target_cumsum[proc] - current_cumsum[last - 1])
else:
# process doesn't need more values
send_vec[idx][proc][proc] = (
partition_matrix[proc][idx] - send_vec[idx][proc].sum()
)
current_counts[proc] = counts[proc]
current_cumsum = list(np.cumsum(current_counts))
# Iterate through one layer again to create the final balanced local tensors
second_result = torch.empty_like(local_sorted)
second_indices = torch.empty_like(second_result)
for idx in np.ndindex(local_sorted.shape[1:]):
idx_slice = [slice(None)] + [slice(ind, ind + 1) for ind in idx]
send_count = send_vec[idx][rank]
send_disp = [0] + list(np.cumsum(send_count[:-1]))
recv_count = send_vec[idx][:, rank]
recv_disp = [0] + list(np.cumsum(recv_count[:-1]))
end = partition_matrix[rank][idx]
s_val, indices = first_result[0:end][idx_slice].sort(descending=descending, dim=0)
s_ind = first_indices[0:end][idx_slice][indices].reshape_as(s_val)
r_val = torch.empty((counts[rank],) + s_val.shape[1:], dtype=local_sorted.dtype)
r_ind = torch.empty_like(r_val)
a.comm.Alltoallv((s_val, send_count, send_disp), (r_val, recv_count, recv_disp))
a.comm.Alltoallv((s_ind, send_count, send_disp), (r_ind, recv_count, recv_disp))
second_result[idx_slice] = r_val
second_indices[idx_slice] = r_ind
second_result, tmp_indices = second_result.sort(dim=0, descending=descending)
final_result = second_result.transpose(0, axis)
final_indices = torch.empty_like(second_indices)
# Update the indices in case the ordering changed during the last sort
for idx in np.ndindex(tmp_indices.shape):
val = tmp_indices[idx]
final_indices[idx] = second_indices[val.item()][idx[1:]]
final_indices = final_indices.transpose(0, axis)
return_indices = factories.array(
final_indices, dtype=dndarray.types.int32, is_split=a.split, device=a.device, comm=a.comm
)
if out is not None:
out._DNDarray__array = final_result
return return_indices
else:
tensor = factories.array(
final_result, dtype=a.dtype, is_split=a.split, device=a.device, comm=a.comm
)
return tensor, return_indices
def squeeze(x, axis=None):
"""
Remove single-dimensional entries from the shape of a tensor.
Parameters:
-----------
x : ht.DNDarray
Input data.
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If axis is None, all single-dimensional entries will be removed from the shape.
If an axis is selected with shape entry greater than one, a ValueError is raised.
Returns:
--------
squeezed : ht.DNDarray
The input tensor, but with all or a subset of the dimensions of length 1 removed.
Split semantics: see note below.
Examples:
---------
>>> import heat as ht
>>> import torch
>>> torch.manual_seed(1)
<torch._C.Generator object at 0x115704ad0>
>>> a = ht.random.randn(1,3,1,5)
>>> a
tensor([[[[ 0.2673, -0.4212, -0.5107, -1.5727, -0.1232]],
[[ 3.5870, -1.8313, 1.5987, -1.2770, 0.3255]],
[[-0.4791, 1.3790, 2.5286, 0.4107, -0.9880]]]])
>>> a.shape
(1, 3, 1, 5)
>>> ht.squeeze(a).shape
(3, 5)
>>> ht.squeeze(a)
tensor([[ 0.2673, -0.4212, -0.5107, -1.5727, -0.1232],
[ 3.5870, -1.8313, 1.5987, -1.2770, 0.3255],
[-0.4791, 1.3790, 2.5286, 0.4107, -0.9880]])
>>> ht.squeeze(a,axis=0).shape
(3, 1, 5)
>>> ht.squeeze(a,axis=-2).shape
(1, 3, 5)
>>> ht.squeeze(a,axis=1).shape
Traceback (most recent call last):
...
ValueError: Dimension along axis 1 is not 1 for shape (1, 3, 1, 5)
Note:
-----
Split semantics: a distributed tensor will keep its original split dimension after "squeezing",
which, depending on the squeeze axis, may result in a lower numerical 'split' value, as in:
>>> x.shape
(10, 1, 12, 13)
>>> x.split
2
>>> x.squeeze().shape
(10, 12, 13)
>>> x.squeeze().split
1
"""
# Sanitize input
if not isinstance(x, dndarray.DNDarray):
raise TypeError("expected x to be a ht.DNDarray, but was {}".format(type(x)))
# Sanitize axis
axis = stride_tricks.sanitize_axis(x.shape, axis)
if axis is not None:
if isinstance(axis, int):
dim_is_one = x.shape[axis] == 1
axis = (axis,)
elif isinstance(axis, tuple):
dim_is_one = bool(torch.tensor(list(x.shape[dim] == 1 for dim in axis)).all())
if not dim_is_one:
raise ValueError("Dimension along axis {} is not 1 for shape {}".format(axis, x.shape))
if axis is None:
axis = tuple(i for i, dim in enumerate(x.shape) if dim == 1)
if x.split is not None and x.split in axis:
# split dimension is about to disappear, set split to None
x.resplit_(axis=None)
out_lshape = tuple(x.lshape[dim] for dim in range(x.ndim) if dim not in axis)
out_gshape = tuple(x.gshape[dim] for dim in range(x.ndim) if dim not in axis)
x_lsqueezed = x._DNDarray__array.reshape(out_lshape)
# Calculate new split axis according to squeezed shape
if x.split is not None:
split = x.split - len(list(dim for dim in axis if dim < x.split))
else:
split = None
return dndarray.DNDarray(
x_lsqueezed, out_gshape, x.dtype, split=split, device=x.device, comm=x.comm
)
def unique(a, sorted=False, return_inverse=False, axis=None):
"""
Finds and returns the unique elements of an array.
Works most effective if axis != a.split.
Parameters
----------
a : ht.DNDarray
Input array where unique elements should be found.
sorted : bool, optional
Whether the found elements should be sorted before returning as output.
Warning: sorted is not working if 'axis != None and axis != a.split'
Default: False
return_inverse : bool, optional
Whether to also return the indices for where elements in the original input ended up in the returned
unique list.
Default: False
axis : int, optional
Axis along which unique elements should be found. Default to None, which will return a one dimensional list of
unique values.
Returns
-------
res : ht.DNDarray
Output array. The unique elements. Elements are distributed the same way as the input tensor.
inverse_indices : torch.tensor (optional)
If return_inverse is True, this tensor will hold the list of inverse indices
Examples
--------
>>> x = ht.array([[3, 2], [1, 3]])
>>> ht.unique(x, sorted=True)
array([1, 2, 3])
>>> ht.unique(x, sorted=True, axis=0)
array([[1, 3],
[2, 3]])
>>> ht.unique(x, sorted=True, axis=1)
array([[2, 3],
[3, 1]])
"""
if a.split is None:
torch_output = torch.unique(
a._DNDarray__array, sorted=sorted, return_inverse=return_inverse, dim=axis
)
if isinstance(torch_output, tuple):
heat_output = tuple(
factories.array(i, dtype=a.dtype, split=None, device=a.device) for i in torch_output
)
else:
heat_output = factories.array(torch_output, dtype=a.dtype, split=None, device=a.device)
return heat_output
local_data = a._DNDarray__array
unique_axis = None
inverse_indices = None
if axis is not None:
# transpose so we can work along the 0 axis
local_data = local_data.transpose(0, axis)
unique_axis = 0
# Calculate the unique on the local values
if a.lshape[a.split] == 0:
# Passing an empty vector to torch throws exception
if axis is None:
res_shape = [0]
inv_shape = list(a.gshape)
inv_shape[a.split] = 0
else:
res_shape = list(local_data.shape)
res_shape[0] = 0
inv_shape = [0]
lres = torch.empty(res_shape, dtype=a.dtype.torch_type())
inverse_pos = torch.empty(inv_shape, dtype=torch.int64)
else:
lres, inverse_pos = torch.unique(
local_data, sorted=sorted, return_inverse=True, dim=unique_axis
)
# Share and gather the results with the other processes
uniques = torch.tensor([lres.shape[0]]).to(torch.int32)
uniques_buf = torch.empty((a.comm.Get_size(),), dtype=torch.int32)
a.comm.Allgather(uniques, uniques_buf)
if axis is None or axis == a.split:
is_split = None
split = a.split
output_dim = list(lres.shape)
output_dim[0] = uniques_buf.sum().item()
# Gather all unique vectors
counts = list(uniques_buf.tolist())
displs = list([0] + uniques_buf.cumsum(0).tolist()[:-1])
gres_buf = torch.empty(output_dim, dtype=a.dtype.torch_type())
a.comm.Allgatherv(lres, (gres_buf, counts, displs), recv_axis=0)
if return_inverse:
# Prepare some information to generated the inverse indices list
avg_len = a.gshape[a.split] // a.comm.Get_size()
rem = a.gshape[a.split] % a.comm.Get_size()
# Share the local reverse indices with other processes
counts = [avg_len] * a.comm.Get_size()
add_vec = [1] * rem + [0] * (a.comm.Get_size() - rem)
inverse_counts = [sum(x) for x in zip(counts, add_vec)]
inverse_displs = [0] + list(np.cumsum(inverse_counts[:-1]))
inverse_dim = list(inverse_pos.shape)
inverse_dim[a.split] = a.gshape[a.split]
inverse_buf = torch.empty(inverse_dim, dtype=inverse_pos.dtype)
# Transpose data and buffer so we can use Allgatherv along axis=0 (axis=1 does not work properly yet)
inverse_pos = inverse_pos.transpose(0, a.split)
inverse_buf = inverse_buf.transpose(0, a.split)
a.comm.Allgatherv(
inverse_pos, (inverse_buf, inverse_counts, inverse_displs), recv_axis=0
)
inverse_buf = inverse_buf.transpose(0, a.split)
# Run unique a second time
gres = torch.unique(gres_buf, sorted=sorted, return_inverse=return_inverse, dim=unique_axis)
if return_inverse:
# Use the previously gathered information to generate global inverse_indices
g_inverse = gres[1]
gres = gres[0]
if axis is None:
# Calculate how many elements we have in each layer along the split axis
elements_per_layer = 1
for num, val in enumerate(a.gshape):
if not num == a.split:
elements_per_layer *= val
# Create the displacements for the flattened inverse indices array
local_elements = [displ * elements_per_layer for displ in inverse_displs][1:] + [
float("inf")
]
# Flatten the inverse indices array every element can be updated to represent a global index
transposed = inverse_buf.transpose(0, a.split)
transposed_shape = transposed.shape
flatten_inverse = transposed.flatten()
# Update the index elements iteratively
cur_displ = 0
inverse_indices = [0] * len(flatten_inverse)
for num in range(len(inverse_indices)):
if num >= local_elements[cur_displ]:
cur_displ += 1
index = flatten_inverse[num] + displs[cur_displ]
inverse_indices[num] = g_inverse[index].tolist()
# Convert the flattened array back to the correct global shape of a
inverse_indices = torch.tensor(inverse_indices).reshape(transposed_shape)
inverse_indices = inverse_indices.transpose(0, a.split)
else:
inverse_indices = torch.zeros_like(inverse_buf)
steps = displs + [None]
# Algorithm that creates the correct list for the reverse_indices
for i in range(len(steps) - 1):
begin = steps[i]
end = steps[i + 1]
for num, x in enumerate(inverse_buf[begin:end]):
inverse_indices[begin + num] = g_inverse[begin + x]
else:
# Tensor is already split and does not need to be redistributed afterward
split = None
is_split = a.split
max_uniques, max_pos = uniques_buf.max(0)
# find indices of vectors
if a.comm.Get_rank() == max_pos.item():
# Get indices of the unique vectors to share with all over processes
indices = inverse_pos.reshape(-1).unique()
else:
indices = torch.empty((max_uniques.item(),), dtype=inverse_pos.dtype)
a.comm.Bcast(indices, root=max_pos)
gres = local_data[indices.tolist()]
inverse_indices = indices
if sorted:
raise ValueError(
"Sorting with axis != split is not supported yet. "
"See https://github.com/helmholtz-analytics/heat/issues/363"
)
if axis is not None:
# transpose matrix back
gres = gres.transpose(0, axis)
split = split if a.split < len(gres.shape) else None
result = factories.array(
gres, dtype=a.dtype, device=a.device, comm=a.comm, split=split, is_split=is_split
)
if split is not None:
result.resplit_(a.split)
return_value = result
if return_inverse:
return_value = [return_value, inverse_indices.to(a.device.torch_device)]
return return_value
def resplit(arr, axis=None):
"""
Out-of-place redistribution of the content of the tensor. Allows to "unsplit" (i.e. gather) all values from all
nodes as well as the definition of new axis along which the tensor is split without changes to the values.
WARNING: this operation might involve a significant communication overhead. Use it sparingly and preferably for
small tensors.
Parameters
----------
arr : ht.DNDarray
The tensor from which to resplit
axis : int, None
The new split axis, None denotes gathering, an int will set the new split axis
Returns
-------
resplit: ht.DNDarray
A new tensor that is a copy of 'arr', but split along 'axis'
Examples
--------
>>> a = ht.zeros((4, 5,), split=0)
>>> a.lshape
(0/2) (2, 5)
(1/2) (2, 5)
>>> b = resplit(a, None)
>>> b.split
None
>>> b.lshape
(0/2) (4, 5)
(1/2) (4, 5)
>>> a = ht.zeros((4, 5,), split=0)
>>> a.lshape
(0/2) (2, 5)
(1/2) (2, 5)
>>> b = resplit(a, 1)
>>> b.split
1
>>> b.lshape
(0/2) (4, 3)
(1/2) (4, 2)
"""
# sanitize the axis to check whether it is in range
axis = stride_tricks.sanitize_axis(arr.shape, axis)
# early out for unchanged content
if axis == arr.split:
return arr.copy()
if axis is None:
# new_arr = arr.copy()
gathered = torch.empty(
arr.shape, dtype=arr.dtype.torch_type(), device=arr.device.torch_device
)
counts, displs, _ = arr.comm.counts_displs_shape(arr.shape, arr.split)
arr.comm.Allgatherv(arr._DNDarray__array, (gathered, counts, displs), recv_axis=arr.split)
new_arr = factories.array(gathered, is_split=axis, device=arr.device, dtype=arr.dtype)
return new_arr
# tensor needs be split/sliced locally
if arr.split is None:
temp = arr._DNDarray__array[arr.comm.chunk(arr.shape, axis)[2]]
new_arr = factories.array(temp, is_split=axis, device=arr.device, dtype=arr.dtype)
return new_arr
arr_tiles = tiling.SplitTiles(arr)
new_arr = factories.empty(arr.gshape, split=axis, dtype=arr.dtype, device=arr.device)
new_tiles = tiling.SplitTiles(new_arr)
rank = arr.comm.rank
waits = []
rcv_waits = {}
for rpr in range(arr.comm.size):
# need to get where the tiles are on the new one first
# rpr is the destination
new_locs = torch.where(new_tiles.tile_locations == rpr)
new_locs = torch.stack([new_locs[i] for i in range(arr.ndim)], dim=1)
for i in range(new_locs.shape[0]):
key = tuple(new_locs[i].tolist())
spr = arr_tiles.tile_locations[key].item()
to_send = arr_tiles[key]
if spr == rank and spr != rpr:
waits.append(arr.comm.Isend(to_send.clone(), dest=rpr, tag=rank))
elif spr == rpr and rpr == rank:
new_tiles[key] = to_send.clone()
elif rank == rpr:
buf = torch.zeros_like(new_tiles[key])
rcv_waits[key] = [arr.comm.Irecv(buf=buf, source=spr, tag=spr), buf]
for w in waits:
w.wait()
for k in rcv_waits.keys():
rcv_waits[k][0].wait()
new_tiles[k] = rcv_waits[k][1]
return new_arr
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
NOTE: the split axis will be switched to 1 in the case that both elements are 1D and split=0
Parameters
----------
tup : sequence of DNDarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = ht.array([1, 2, 3])
>>> b = ht.array([2, 3, 4])
>>> ht.vstack((a,b))
[0] tensor([[1, 2, 3],
[0] [2, 3, 4]])
[1] tensor([[1, 2, 3],
[1] [2, 3, 4]])
>>> a = ht.array([1, 2, 3], split=0)
>>> b = ht.array([2, 3, 4], split=0)
>>> ht.vstack((a,b))
[0] tensor([[1, 2],
[0] [2, 3]])
[1] tensor([[3],
[1] [4]])
>>> a = ht.array([[1], [2], [3]], split=0)
>>> b = ht.array([[2], [3], [4]], split=0)
>>> ht.vstack((a,b))
[0] tensor([[1],
[0] [2],
[0] [3]])
[1] tensor([[2],
[1] [3],
[1] [4]])
"""
tup = list(tup)
for cn, arr in enumerate(tup):
if len(arr.gshape) == 1:
tup[cn] = arr.expand_dims(0).resplit_(arr.split)
return concatenate(tup, axis=0)
def topk(a, k, dim=None, largest=True, sorted=True, out=None):
"""
Returns the k highest entries in the array.
(Not Stable for split arrays)
Parameters:
-------
a: DNDarray
Array to take items from
k: int
Number of items to take
dim: int
Dimension along which to take, per default the last dimension
largest: bool
Return either the k largest or smallest items
sorted: bool
Whether to sort the output (descending if largest=True, else ascending)
out: tuple of ht.DNDarrays
(items, indices) to put the result in
Returns
-------
items: ht.DNDarray of shape (k,)
The selected items
indices: ht.DNDarray of shape (k,)
The respective indices
Examples
--------
>>> a = ht.array([1, 2, 3])
>>> ht.topk(a,2)
(tensor([3, 2]), tensor([2, 1]))
>>> a = ht.array([[1,2,3],[1,2,3]])
>>> ht.topk(a,2,dim=1)
(tensor([[3, 2],
[3, 2]]),
tensor([[2, 1],
[2, 1]]))
>>> a = ht.array([[1,2,3],[1,2,3]], split=1)
>>> ht.topk(a,2,dim=1)
(tensor([[3],
[3]]), tensor([[1],
[1]]))
(tensor([[2],
[2]]), tensor([[1],
[1]]))
"""
if dim is None:
dim = len(a.shape) - 1
if largest:
neutral_value = -constants.sanitize_infinity(a._DNDarray__array.dtype)
else:
neutral_value = constants.sanitize_infinity(a._DNDarray__array.dtype)
def local_topk(*args, **kwargs):
shape = a.lshape
if shape[dim] < k:
result, indices = torch.topk(args[0], shape[dim], largest=largest, sorted=sorted)
if dim == a.split:
# Pad the result with neutral values to fill the buffer
size = list(result.shape)
padding_sizes = [
k - size[dim] if index == dim else 0
for index, item in enumerate(list(result.shape))
]
padding = torch.nn.ConstantPad1d(padding_sizes, neutral_value)
result = padding(result)
# Different value for indices padding to prevent type casting issues
padding = torch.nn.ConstantPad1d(padding_sizes, 0)
indices = padding(indices)
else:
result, indices = torch.topk(args[0], k=k, dim=dim, largest=largest, sorted=sorted)
# add offset of data chunks if reduction is computed across split axis
if dim == a.split:
offset, _, _ = a.comm.chunk(shape, a.split)
indices = indices.clone()
indices += torch.tensor(offset * a.comm.rank, dtype=indices.dtype)
local_shape = list(result.shape)
local_shape_len = len(shape)
metadata = torch.tensor([k, dim, largest, sorted, local_shape_len, *local_shape])
send_buffer = torch.cat(
(metadata.double(), result.double().flatten(), indices.flatten().double())
)
return send_buffer
gres = operations.__reduce_op(
a,
local_topk,
MPI_TOPK,
axis=dim,
neutral=neutral_value,
dim=dim,
sorted=sorted,
largest=largest,
)
# Split data again to return a tuple
local_result = gres._DNDarray__array
shape_len = int(local_result[4])
gres, gindices = local_result[5 + shape_len :].chunk(2)
gres = gres.reshape(*local_result[5 : 5 + shape_len].int())
gindices = gindices.reshape(*local_result[5 : 5 + shape_len].int())
# Create output with correct split
if dim == a.split:
is_split = None
split = a.split
else:
is_split = a.split
split = None
final_array = factories.array(
gres, dtype=a.dtype, device=a.device, split=split, is_split=is_split
)
final_indices = factories.array(
gindices, dtype=torch.int64, device=a.device, split=split, is_split=is_split
)
if out is not None:
if out[0].shape != final_array.shape or out[1].shape != final_indices.shape:
raise ValueError(
"Expecting output buffer tuple of shape ({}, {}), got ({}, {})".format(
gres.shape, gindices.shape, out[0].shape, out[1].shape
)
)
out[0]._DNDarray__array.storage().copy_(final_array._DNDarray__array.storage())
out[1]._DNDarray__array.storage().copy_(final_indices._DNDarray__array.storage())
out[0]._DNDarray__dtype = a.dtype
out[1]._DNDarray__dtype = types.int64
return final_array, final_indices
def mpi_topk(a, b, mpi_type):
# Parse Buffer
a_parsed = torch.from_numpy(np.frombuffer(a, dtype=np.float64))
b_parsed = torch.from_numpy(np.frombuffer(b, dtype=np.float64))
# Collect metadata from Buffer
k = int(a_parsed[0].item())
dim = int(a_parsed[1].item())
largest = bool(a_parsed[2].item())
sorted = bool(a_parsed[3].item())
# Offset is the length of the shape on the buffer
len_shape_a = int(a_parsed[4])
shape_a = a_parsed[5 : 5 + len_shape_a].int().tolist()
len_shape_b = int(b_parsed[4])
shape_b = b_parsed[5 : 5 + len_shape_b].int().tolist()
# separate the data into values, indices
a_values, a_indices = a_parsed[len_shape_a + 5 :].chunk(2)
b_values, b_indices = b_parsed[len_shape_b + 5 :].chunk(2)
# reconstruct the flatened data by shape
a_values = a_values.reshape(shape_a)
a_indices = a_indices.reshape(shape_a)
b_values = b_values.reshape(shape_b)
b_indices = b_indices.reshape(shape_b)
# stack the data to actually run topk on
values = torch.cat((a_values, b_values), dim=dim)
indices = torch.cat((a_indices, b_indices), dim=dim)
result, k_indices = torch.topk(values, k, dim=dim, largest=largest, sorted=sorted)
indices = torch.gather(indices, dim, k_indices)
metadata = a_parsed[0 : len_shape_a + 5]
final_result = torch.cat((metadata, result.double().flatten(), indices.double().flatten()))
b_parsed.copy_(final_result)
MPI_TOPK = MPI.Op.Create(mpi_topk, commute=True)
| [
"torch.diagonal",
"torch.empty",
"torch.cat",
"torch.empty_like",
"torch.arange",
"torch.flatten",
"torch.gather",
"numpy.cumsum",
"torch.Tensor",
"torch.zeros",
"torch.topk",
"torch.unique",
"torch.zeros_like",
"torch.where",
"numpy.frombuffer",
"torch.argsort",
"torch.nn.ConstantPa... | [((24842, 24878), 'torch.flip', 'torch.flip', (['a._DNDarray__array', 'axis'], {}), '(a._DNDarray__array, axis)\n', (24852, 24878), False, 'import torch\n'), ((25350, 25440), 'torch.empty', 'torch.empty', (['new_lshape'], {'dtype': 'a._DNDarray__array.dtype', 'device': 'a.device.torch_device'}), '(new_lshape, dtype=a._DNDarray__array.dtype, device=a.device.\n torch_device)\n', (25361, 25440), False, 'import torch\n'), ((32602, 32625), 'torch.argsort', 'torch.argsort', (['recvsort'], {}), '(recvsort)\n', (32615, 32625), False, 'import torch\n'), ((67536, 67576), 'torch.cat', 'torch.cat', (['(a_values, b_values)'], {'dim': 'dim'}), '((a_values, b_values), dim=dim)\n', (67545, 67576), False, 'import torch\n'), ((67591, 67633), 'torch.cat', 'torch.cat', (['(a_indices, b_indices)'], {'dim': 'dim'}), '((a_indices, b_indices), dim=dim)\n', (67600, 67633), False, 'import torch\n'), ((67659, 67721), 'torch.topk', 'torch.topk', (['values', 'k'], {'dim': 'dim', 'largest': 'largest', 'sorted': 'sorted'}), '(values, k, dim=dim, largest=largest, sorted=sorted)\n', (67669, 67721), False, 'import torch\n'), ((67736, 67773), 'torch.gather', 'torch.gather', (['indices', 'dim', 'k_indices'], {}), '(indices, dim, k_indices)\n', (67748, 67773), False, 'import torch\n'), ((21363, 21434), 'torch.diagonal', 'torch.diagonal', (['a._DNDarray__array'], {'offset': 'offset', 'dim1': 'dim1', 'dim2': 'dim2'}), '(a._DNDarray__array, offset=offset, dim1=dim1, dim2=dim2)\n', (21377, 21434), False, 'import torch\n'), ((21555, 21641), 'torch.diagonal', 'torch.diagonal', (['a._DNDarray__array'], {'offset': '(offset + vz * off)', 'dim1': 'dim1', 'dim2': 'dim2'}), '(a._DNDarray__array, offset=offset + vz * off, dim1=dim1,\n dim2=dim2)\n', (21569, 21641), False, 'import torch\n'), ((23836, 23869), 'torch.flatten', 'torch.flatten', (['a._DNDarray__array'], {}), '(a._DNDarray__array)\n', (23849, 23869), False, 'import torch\n'), ((30137, 30187), 'torch.tensor', 'torch.tensor', (['shape1'], {'dtype': 'tdtype', 'device': 'tdevice'}), '(shape1, dtype=tdtype, device=tdevice)\n', (30149, 30187), False, 'import torch\n'), ((30206, 30257), 'torch.tensor', 'torch.tensor', (['lshape1'], {'dtype': 'tdtype', 'device': 'tdevice'}), '(lshape1, dtype=tdtype, device=tdevice)\n', (30218, 30257), False, 'import torch\n'), ((30275, 30325), 'torch.tensor', 'torch.tensor', (['shape2'], {'dtype': 'tdtype', 'device': 'tdevice'}), '(shape2, dtype=tdtype, device=tdevice)\n', (30287, 30325), False, 'import torch\n'), ((30362, 30406), 'torch.prod', 'torch.prod', (['lshape1[axis1:]'], {'dtype': 'torch.int'}), '(lshape1[axis1:], dtype=torch.int)\n', (30372, 30406), False, 'import torch\n'), ((30424, 30468), 'torch.prod', 'torch.prod', (['lshape1[:axis1]'], {'dtype': 'torch.int'}), '(lshape1[:axis1], dtype=torch.int)\n', (30434, 30468), False, 'import torch\n'), ((30490, 30516), 'torch.prod', 'torch.prod', (['shape1[axis1:]'], {}), '(shape1[axis1:])\n', (30500, 30516), False, 'import torch\n'), ((30532, 30562), 'torch.prod', 'torch.prod', (['shape2[axis2 + 1:]'], {}), '(shape2[axis2 + 1:])\n', (30542, 30562), False, 'import torch\n'), ((30978, 31033), 'torch.zeros', 'torch.zeros', (['comm.size'], {'dtype': 'torch.int', 'device': 'tdevice'}), '(comm.size, dtype=torch.int, device=tdevice)\n', (30989, 31033), False, 'import torch\n'), ((31051, 31075), 'torch.zeros_like', 'torch.zeros_like', (['counts'], {}), '(counts)\n', (31067, 31075), False, 'import torch\n'), ((31094, 31134), 'torch.empty_like', 'torch.empty_like', (['mask'], {'dtype': 'torch.long'}), '(mask, dtype=torch.long)\n', (31110, 31134), False, 'import torch\n'), ((31409, 31441), 'torch.cumsum', 'torch.cumsum', (['counts[:-1]'], {'dim': '(0)'}), '(counts[:-1], dim=0)\n', (31421, 31441), False, 'import torch\n'), ((34947, 35010), 'torch.sort', 'torch.sort', (['a._DNDarray__array'], {'dim': 'axis', 'descending': 'descending'}), '(a._DNDarray__array, dim=axis, descending=descending)\n', (34957, 35010), False, 'import torch\n'), ((35271, 35323), 'torch.sort', 'torch.sort', (['transposed'], {'dim': '(0)', 'descending': 'descending'}), '(transposed, dim=0, descending=descending)\n', (35281, 35323), False, 'import torch\n'), ((37100, 37160), 'torch.empty', 'torch.empty', (['((size,) + local_sorted.shape)'], {'dtype': 'torch.int64'}), '((size,) + local_sorted.shape, dtype=torch.int64)\n', (37111, 37160), False, 'import torch\n'), ((37176, 37225), 'torch.zeros_like', 'torch.zeros_like', (['local_sorted'], {'dtype': 'torch.int64'}), '(local_sorted, dtype=torch.int64)\n', (37192, 37225), False, 'import torch\n'), ((37818, 37849), 'torch.sum', 'torch.sum', (['lt_partitions'], {'dim': '(1)'}), '(lt_partitions, dim=1)\n', (37827, 37849), False, 'import torch\n'), ((37878, 37912), 'torch.empty_like', 'torch.empty_like', (['local_partitions'], {}), '(local_partitions)\n', (37894, 37912), False, 'import torch\n'), ((38084, 38133), 'torch.empty_like', 'torch.empty_like', (['local_sorted'], {'dtype': 'torch.int64'}), '(local_sorted, dtype=torch.int64)\n', (38100, 38133), False, 'import torch\n'), ((38287, 38335), 'torch.zeros', 'torch.zeros', (['shape'], {'dtype': 'partition_matrix.dtype'}), '(shape, dtype=partition_matrix.dtype)\n', (38298, 38335), False, 'import torch\n'), ((38358, 38406), 'torch.zeros', 'torch.zeros', (['shape'], {'dtype': 'partition_matrix.dtype'}), '(shape, dtype=partition_matrix.dtype)\n', (38369, 38406), False, 'import torch\n'), ((38753, 38797), 'torch.empty', 'torch.empty', (['shape'], {'dtype': 'local_sorted.dtype'}), '(shape, dtype=local_sorted.dtype)\n', (38764, 38797), False, 'import torch\n'), ((38822, 38852), 'torch.empty_like', 'torch.empty_like', (['first_result'], {}), '(first_result)\n', (38838, 38852), False, 'import torch\n'), ((38940, 38974), 'numpy.ndindex', 'np.ndindex', (['local_sorted.shape[1:]'], {}), '(local_sorted.shape[1:])\n', (38950, 38974), True, 'import numpy as np\n'), ((40070, 40139), 'torch.zeros', 'torch.zeros', (['(local_sorted.shape[1:] + (size, size))'], {'dtype': 'torch.int64'}), '(local_sorted.shape[1:] + (size, size), dtype=torch.int64)\n', (40081, 40139), False, 'import torch\n'), ((40164, 40181), 'numpy.cumsum', 'np.cumsum', (['counts'], {}), '(counts)\n', (40173, 40181), True, 'import numpy as np\n'), ((40201, 40235), 'numpy.ndindex', 'np.ndindex', (['local_sorted.shape[1:]'], {}), '(local_sorted.shape[1:])\n', (40211, 40235), True, 'import numpy as np\n'), ((43261, 43291), 'torch.empty_like', 'torch.empty_like', (['local_sorted'], {}), '(local_sorted)\n', (43277, 43291), False, 'import torch\n'), ((43317, 43348), 'torch.empty_like', 'torch.empty_like', (['second_result'], {}), '(second_result)\n', (43333, 43348), False, 'import torch\n'), ((43368, 43402), 'numpy.ndindex', 'np.ndindex', (['local_sorted.shape[1:]'], {}), '(local_sorted.shape[1:])\n', (43378, 43402), True, 'import numpy as np\n'), ((44507, 44539), 'torch.empty_like', 'torch.empty_like', (['second_indices'], {}), '(second_indices)\n', (44523, 44539), False, 'import torch\n'), ((44638, 44667), 'numpy.ndindex', 'np.ndindex', (['tmp_indices.shape'], {}), '(tmp_indices.shape)\n', (44648, 44667), True, 'import numpy as np\n'), ((50059, 50152), 'torch.unique', 'torch.unique', (['a._DNDarray__array'], {'sorted': 'sorted', 'return_inverse': 'return_inverse', 'dim': 'axis'}), '(a._DNDarray__array, sorted=sorted, return_inverse=\n return_inverse, dim=axis)\n', (50071, 50152), False, 'import torch\n'), ((51215, 51256), 'torch.empty', 'torch.empty', (['inv_shape'], {'dtype': 'torch.int64'}), '(inv_shape, dtype=torch.int64)\n', (51226, 51256), False, 'import torch\n'), ((51296, 51373), 'torch.unique', 'torch.unique', (['local_data'], {'sorted': 'sorted', 'return_inverse': '(True)', 'dim': 'unique_axis'}), '(local_data, sorted=sorted, return_inverse=True, dim=unique_axis)\n', (51308, 51373), False, 'import torch\n'), ((53303, 53393), 'torch.unique', 'torch.unique', (['gres_buf'], {'sorted': 'sorted', 'return_inverse': 'return_inverse', 'dim': 'unique_axis'}), '(gres_buf, sorted=sorted, return_inverse=return_inverse, dim=\n unique_axis)\n', (53315, 53393), False, 'import torch\n'), ((59331, 59375), 'torch.where', 'torch.where', (['(new_tiles.tile_locations == rpr)'], {}), '(new_tiles.tile_locations == rpr)\n', (59342, 59375), False, 'import torch\n'), ((64674, 64744), 'torch.tensor', 'torch.tensor', (['[k, dim, largest, sorted, local_shape_len, *local_shape]'], {}), '([k, dim, largest, sorted, local_shape_len, *local_shape])\n', (64686, 64744), False, 'import torch\n'), ((66565, 66599), 'numpy.frombuffer', 'np.frombuffer', (['a'], {'dtype': 'np.float64'}), '(a, dtype=np.float64)\n', (66578, 66599), True, 'import numpy as np\n'), ((66633, 66667), 'numpy.frombuffer', 'np.frombuffer', (['b'], {'dtype': 'np.float64'}), '(b, dtype=np.float64)\n', (66646, 66667), True, 'import numpy as np\n'), ((4807, 4874), 'torch.cat', 'torch.cat', (['(arr0._DNDarray__array, arr1._DNDarray__array)'], {'dim': 'axis'}), '((arr0._DNDarray__array, arr1._DNDarray__array), dim=axis)\n', (4816, 4874), False, 'import torch\n'), ((18634, 18660), 'torch.arange', 'torch.arange', (['(0)', 'lshape[0]'], {}), '(0, lshape[0])\n', (18646, 18660), False, 'import torch\n'), ((23602, 23635), 'torch.flatten', 'torch.flatten', (['a._DNDarray__array'], {}), '(a._DNDarray__array)\n', (23615, 23635), False, 'import torch\n'), ((29776, 29811), 'torch.tensor', 'torch.tensor', (['shape'], {'device': 'tdevice'}), '(shape, device=tdevice)\n', (29788, 29811), False, 'import torch\n'), ((30602, 30632), 'torch.prod', 'torch.prod', (['shape1[axis1 + 1:]'], {}), '(shape1[axis1 + 1:])\n', (30612, 30632), False, 'import torch\n'), ((30696, 30731), 'torch.arange', 'torch.arange', (['width'], {'device': 'tdevice'}), '(width, device=tdevice)\n', (30708, 30731), False, 'import torch\n'), ((30850, 30880), 'torch.floor_divide', 'torch.floor_divide', (['mask', 'ulen'], {}), '(mask, ulen)\n', (30868, 30880), False, 'import torch\n'), ((31584, 31624), 'torch.reshape', 'torch.reshape', (['a._DNDarray__array', 'shape'], {}), '(a._DNDarray__array, shape)\n', (31597, 31624), False, 'import torch\n'), ((31778, 31832), 'torch.empty', 'torch.empty', (['local_shape'], {'dtype': 'tdtype', 'device': 'tdevice'}), '(local_shape, dtype=tdtype, device=tdevice)\n', (31789, 31832), False, 'import torch\n'), ((36792, 36846), 'torch.sort', 'torch.sort', (['pivot_buffer'], {'descending': 'descending', 'dim': '(0)'}), '(pivot_buffer, descending=descending, dim=0)\n', (36802, 36846), False, 'import torch\n'), ((37663, 37710), 'torch.ones_like', 'torch.ones_like', (['local_sorted'], {'dtype': 'last.dtype'}), '(local_sorted, dtype=last.dtype)\n', (37678, 37710), False, 'import torch\n'), ((38520, 38539), 'torch.sum', 'torch.sum', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (38529, 38539), False, 'import torch\n'), ((39523, 39593), 'torch.empty', 'torch.empty', (['((rcv_length,) + s_val.shape[1:])'], {'dtype': 'local_sorted.dtype'}), '((rcv_length,) + s_val.shape[1:], dtype=local_sorted.dtype)\n', (39534, 39593), False, 'import torch\n'), ((39614, 39637), 'torch.empty_like', 'torch.empty_like', (['r_val'], {}), '(r_val)\n', (39630, 39637), False, 'import torch\n'), ((43944, 44016), 'torch.empty', 'torch.empty', (['((counts[rank],) + s_val.shape[1:])'], {'dtype': 'local_sorted.dtype'}), '((counts[rank],) + s_val.shape[1:], dtype=local_sorted.dtype)\n', (43955, 44016), False, 'import torch\n'), ((44037, 44060), 'torch.empty_like', 'torch.empty_like', (['r_val'], {}), '(r_val)\n', (44053, 44060), False, 'import torch\n'), ((51471, 51500), 'torch.tensor', 'torch.tensor', (['[lres.shape[0]]'], {}), '([lres.shape[0]])\n', (51483, 51500), False, 'import torch\n'), ((52774, 52823), 'torch.empty', 'torch.empty', (['inverse_dim'], {'dtype': 'inverse_pos.dtype'}), '(inverse_dim, dtype=inverse_pos.dtype)\n', (52785, 52823), False, 'import torch\n'), ((63483, 63546), 'torch.topk', 'torch.topk', (['args[0]', 'shape[dim]'], {'largest': 'largest', 'sorted': 'sorted'}), '(args[0], shape[dim], largest=largest, sorted=sorted)\n', (63493, 63546), False, 'import torch\n'), ((64229, 64294), 'torch.topk', 'torch.topk', (['args[0]'], {'k': 'k', 'dim': 'dim', 'largest': 'largest', 'sorted': 'sorted'}), '(args[0], k=k, dim=dim, largest=largest, sorted=sorted)\n', (64239, 64294), False, 'import torch\n'), ((64519, 64574), 'torch.tensor', 'torch.tensor', (['(offset * a.comm.rank)'], {'dtype': 'indices.dtype'}), '(offset * a.comm.rank, dtype=indices.dtype)\n', (64531, 64574), False, 'import torch\n'), ((5748, 5844), 'torch.cat', 'torch.cat', (['(arr0._DNDarray__array[arr0_slice], arr1._DNDarray__array[arr1_slice])'], {'dim': 'axis'}), '((arr0._DNDarray__array[arr0_slice], arr1._DNDarray__array[\n arr1_slice]), dim=axis)\n', (5757, 5844), False, 'import torch\n'), ((31211, 31270), 'torch.where', 'torch.where', (['((mask >= displs2[i]) & (mask < displs2[i + 1]))'], {}), '((mask >= displs2[i]) & (mask < displs2[i + 1]))\n', (31222, 31270), False, 'import torch\n'), ((36090, 36119), 'numpy.cumsum', 'np.cumsum', (['gather_counts[:-1]'], {}), '(gather_counts[:-1])\n', (36099, 36119), True, 'import numpy as np\n'), ((40426, 40451), 'numpy.cumsum', 'np.cumsum', (['current_counts'], {}), '(current_counts)\n', (40435, 40451), True, 'import numpy as np\n'), ((55118, 55147), 'torch.zeros_like', 'torch.zeros_like', (['inverse_buf'], {}), '(inverse_buf)\n', (55134, 55147), False, 'import torch\n'), ((63896, 63948), 'torch.nn.ConstantPad1d', 'torch.nn.ConstantPad1d', (['padding_sizes', 'neutral_value'], {}), '(padding_sizes, neutral_value)\n', (63918, 63948), False, 'import torch\n'), ((64101, 64141), 'torch.nn.ConstantPad1d', 'torch.nn.ConstantPad1d', (['padding_sizes', '(0)'], {}), '(padding_sizes, 0)\n', (64123, 64141), False, 'import torch\n'), ((39154, 39180), 'numpy.cumsum', 'np.cumsum', (['send_count[:-1]'], {}), '(send_count[:-1])\n', (39163, 39180), True, 'import numpy as np\n'), ((39418, 39444), 'numpy.cumsum', 'np.cumsum', (['recv_count[:-1]'], {}), '(recv_count[:-1])\n', (39427, 39444), True, 'import numpy as np\n'), ((43124, 43149), 'numpy.cumsum', 'np.cumsum', (['current_counts'], {}), '(current_counts)\n', (43133, 43149), True, 'import numpy as np\n'), ((43562, 43588), 'numpy.cumsum', 'np.cumsum', (['send_count[:-1]'], {}), '(send_count[:-1])\n', (43571, 43588), True, 'import numpy as np\n'), ((43674, 43700), 'numpy.cumsum', 'np.cumsum', (['recv_count[:-1]'], {}), '(recv_count[:-1])\n', (43683, 43700), True, 'import numpy as np\n'), ((52613, 52643), 'numpy.cumsum', 'np.cumsum', (['inverse_counts[:-1]'], {}), '(inverse_counts[:-1])\n', (52622, 52643), True, 'import numpy as np\n'), ((6518, 6585), 'torch.cat', 'torch.cat', (['(arr0._DNDarray__array, arr1._DNDarray__array)'], {'dim': 'axis'}), '((arr0._DNDarray__array, arr1._DNDarray__array), dim=axis)\n', (6527, 6585), False, 'import torch\n'), ((6988, 7013), 'torch.Tensor', 'torch.Tensor', (['arr0.lshape'], {}), '(arr0.lshape)\n', (7000, 7013), False, 'import torch\n'), ((7061, 7086), 'torch.Tensor', 'torch.Tensor', (['arr1.lshape'], {}), '(arr1.lshape)\n', (7073, 7086), False, 'import torch\n'), ((16095, 16162), 'torch.cat', 'torch.cat', (['(arr0._DNDarray__array, arr1._DNDarray__array)'], {'dim': 'axis'}), '((arr0._DNDarray__array, arr1._DNDarray__array), dim=axis)\n', (16104, 16162), False, 'import torch\n'), ((30763, 30799), 'torch.arange', 'torch.arange', (['height'], {'device': 'tdevice'}), '(height, device=tdevice)\n', (30775, 30799), False, 'import torch\n'), ((54937, 54966), 'torch.tensor', 'torch.tensor', (['inverse_indices'], {}), '(inverse_indices)\n', (54949, 54966), False, 'import torch\n'), ((59907, 59939), 'torch.zeros_like', 'torch.zeros_like', (['new_tiles[key]'], {}), '(new_tiles[key])\n', (59923, 59939), False, 'import torch\n'), ((9935, 9991), 'torch.cat', 'torch.cat', (['(arr0._DNDarray__array, data)'], {'dim': 'arr0.split'}), '((arr0._DNDarray__array, data), dim=arr0.split)\n', (9944, 9991), False, 'import torch\n'), ((12593, 12643), 'torch.cat', 'torch.cat', (['(data, arr1._DNDarray__array)'], {'dim': 'axis'}), '((data, arr1._DNDarray__array), dim=axis)\n', (12602, 12643), False, 'import torch\n')] |
import sys
sys.path.append('../..')
import os
import json
import logging
import argparse
import numpy as np
from datetime import datetime
from seqeval import metrics
from seqeval.scheme import IOB2
from data_constr.Src.IO import set_logging
logger = logging.getLogger(__name__)
_time = datetime.now().strftime("%m.%d.%y-%H.%M")
_current_file_name = os.path.basename(__file__)
if _current_file_name.endswith('.py'):
_current_file_name = _current_file_name[:-3]
def parse_args():
"""
Wrapper function of argument parsing process.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--save_loc', type=str, default='.',
help='where to save results'
)
parser.add_argument(
'--log_dir', type=str, default=os.path.join('logs', f'{_current_file_name}.{_time}.log'),
help='the directory of the log file'
)
args = parser.parse_args()
return args
def main(args):
set_logging(args.log_dir)
logger.setLevel(logging.INFO)
logger.info(f"Parameters: {args}")
logger.info('Reading data...')
with open(os.path.join(args.save_loc, f"train.json"), 'r', encoding='utf-8') as f:
train_data = json.load(f)
with open(os.path.join(args.save_loc, f"valid.json"), 'r', encoding='utf-8') as f:
valid_data = json.load(f)
with open(os.path.join(args.save_loc, f"test.json"), 'r', encoding='utf-8') as f:
test_data = json.load(f)
logger.info('Reading metadata...')
with open(os.path.join(args.save_loc, "meta.json"), 'r', encoding='utf-8') as f:
meta = json.load(f)
logger.info('Getting new metadata')
max_length = 0
for data in [train_data, valid_data, test_data]:
for k, v in data.items():
l_sent = len(v['data']['text'])
if l_sent > max_length:
max_length = l_sent
meta['train_size'] = len(train_data)
meta['valid_size'] = len(valid_data)
meta['test_size '] = len(test_data)
meta['max_length'] = max_length
meta['num_lf'] = len(meta['lf'])
meta['num_labels'] = 2 * len(meta['entity_types']) + 1
# get the performance of each source
t_lbs = list()
w_lbs = [[] for _ in range(meta['num_lf'])]
for k, v in train_data.items():
t_lbs.append(v['label'])
for i, w_lb in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
for k, v in valid_data.items():
t_lbs.append(v['label'])
for i, w_lb in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
for k, v in test_data.items():
t_lbs.append(v['label'])
for i, w_lb in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
rec_src = list()
logger.info(f'Source performance (F1 score)')
for i, src_name in enumerate(meta['lf']):
f1 = metrics.f1_score(t_lbs, w_lbs[i], mode='strict', scheme=IOB2)
logger.info(f'{src_name}: {f1}')
if f1 > 0.05:
rec_src.append(src_name)
logger.info(f'The following sources are recommended for model evaluation:\n'
f'\t{rec_src}')
meta['lf_rec'] = rec_src
meta['num_lf_rec'] = len(rec_src)
logger.info('Saving results...')
with open(os.path.join(args.save_loc, "meta.json"), 'w', encoding='utf-8') as f:
json.dump(meta, f, ensure_ascii=False, indent=2)
logger.info('Exit with no error')
if __name__ == '__main__':
argument = parse_args()
main(argument)
| [
"sys.path.append",
"json.dump",
"json.load",
"argparse.ArgumentParser",
"os.path.join",
"os.path.basename",
"numpy.asarray",
"data_constr.Src.IO.set_logging",
"seqeval.metrics.f1_score",
"datetime.datetime.now",
"logging.getLogger"
] | [((11, 35), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (26, 35), False, 'import sys\n'), ((253, 280), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (270, 280), False, 'import logging\n'), ((352, 378), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (368, 378), False, 'import os\n'), ((567, 592), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (590, 592), False, 'import argparse\n'), ((951, 976), 'data_constr.Src.IO.set_logging', 'set_logging', (['args.log_dir'], {}), '(args.log_dir)\n', (962, 976), False, 'from data_constr.Src.IO import set_logging\n'), ((289, 303), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (301, 303), False, 'from datetime import datetime\n'), ((1194, 1206), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1203, 1206), False, 'import json\n'), ((1315, 1327), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1324, 1327), False, 'import json\n'), ((1434, 1446), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1443, 1446), False, 'import json\n'), ((1587, 1599), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1596, 1599), False, 'import json\n'), ((2895, 2956), 'seqeval.metrics.f1_score', 'metrics.f1_score', (['t_lbs', 'w_lbs[i]'], {'mode': '"""strict"""', 'scheme': 'IOB2'}), "(t_lbs, w_lbs[i], mode='strict', scheme=IOB2)\n", (2911, 2956), False, 'from seqeval import metrics\n'), ((3371, 3419), 'json.dump', 'json.dump', (['meta', 'f'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(meta, f, ensure_ascii=False, indent=2)\n', (3380, 3419), False, 'import json\n'), ((770, 827), 'os.path.join', 'os.path.join', (['"""logs"""', 'f"""{_current_file_name}.{_time}.log"""'], {}), "('logs', f'{_current_file_name}.{_time}.log')\n", (782, 827), False, 'import os\n'), ((1100, 1142), 'os.path.join', 'os.path.join', (['args.save_loc', 'f"""train.json"""'], {}), "(args.save_loc, f'train.json')\n", (1112, 1142), False, 'import os\n'), ((1221, 1263), 'os.path.join', 'os.path.join', (['args.save_loc', 'f"""valid.json"""'], {}), "(args.save_loc, f'valid.json')\n", (1233, 1263), False, 'import os\n'), ((1342, 1383), 'os.path.join', 'os.path.join', (['args.save_loc', 'f"""test.json"""'], {}), "(args.save_loc, f'test.json')\n", (1354, 1383), False, 'import os\n'), ((1501, 1541), 'os.path.join', 'os.path.join', (['args.save_loc', '"""meta.json"""'], {}), "(args.save_loc, 'meta.json')\n", (1513, 1541), False, 'import os\n'), ((3292, 3332), 'os.path.join', 'os.path.join', (['args.save_loc', '"""meta.json"""'], {}), "(args.save_loc, 'meta.json')\n", (3304, 3332), False, 'import os\n'), ((2331, 2359), 'numpy.asarray', 'np.asarray', (["v['weak_labels']"], {}), "(v['weak_labels'])\n", (2341, 2359), True, 'import numpy as np\n'), ((2510, 2538), 'numpy.asarray', 'np.asarray', (["v['weak_labels']"], {}), "(v['weak_labels'])\n", (2520, 2538), True, 'import numpy as np\n'), ((2688, 2716), 'numpy.asarray', 'np.asarray', (["v['weak_labels']"], {}), "(v['weak_labels'])\n", (2698, 2716), True, 'import numpy as np\n')] |
"""Matplotlib dotplot."""
import math
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, create_axes_grid, backend_show
from ...plot_utils import plot_point_interval
from ...dotplot import wilkinson_algorithm, layout_stacks
def plot_dot(
values,
binwidth,
dotsize,
stackratio,
hdi_prob,
quartiles,
rotated,
dotcolor,
intervalcolor,
markersize,
markercolor,
marker,
figsize,
linewidth,
point_estimate,
nquantiles,
point_interval,
ax,
show,
backend_kwargs,
plot_kwargs,
):
"""Matplotlib dotplot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {**backend_kwarg_defaults(), **backend_kwargs}
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs["squeeze"] = True
(figsize, _, _, _, auto_linewidth, auto_markersize) = _scale_fig_size(figsize, None)
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", dotcolor)
if linewidth is None:
linewidth = auto_linewidth
if markersize is None:
markersize = auto_markersize
if ax is None:
fig_manager = _pylab_helpers.Gcf.get_active()
if fig_manager is not None:
ax = fig_manager.canvas.figure.gca()
else:
_, ax = create_axes_grid(
1,
backend_kwargs=backend_kwargs,
)
if point_interval:
ax = plot_point_interval(
ax,
values,
point_estimate,
hdi_prob,
quartiles,
linewidth,
markersize,
markercolor,
marker,
rotated,
intervalcolor,
"matplotlib",
)
if nquantiles > values.shape[0]:
warnings.warn(
"nquantiles must be less than or equal to the number of data points", UserWarning
)
nquantiles = values.shape[0]
else:
qlist = np.linspace(1 / (2 * nquantiles), 1 - 1 / (2 * nquantiles), nquantiles)
values = np.quantile(values, qlist)
if binwidth is None:
binwidth = math.sqrt((values[-1] - values[0] + 1) ** 2 / (2 * nquantiles * np.pi))
## Wilkinson's Algorithm
stack_locs, stack_count = wilkinson_algorithm(values, binwidth)
x, y = layout_stacks(stack_locs, stack_count, binwidth, stackratio, rotated)
for (x_i, y_i) in zip(x, y):
dot = plt.Circle((x_i, y_i), dotsize * binwidth / 2, **plot_kwargs)
ax.add_patch(dot)
if rotated:
ax.tick_params(bottom=False, labelbottom=False)
else:
ax.tick_params(left=False, labelleft=False)
ax.set_aspect("equal", adjustable="box")
ax.autoscale()
if backend_show(show):
plt.show()
return ax
| [
"numpy.quantile",
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.Circle",
"numpy.linspace",
"warnings.warn",
"matplotlib._pylab_helpers.Gcf.get_active"
] | [((1301, 1332), 'matplotlib._pylab_helpers.Gcf.get_active', '_pylab_helpers.Gcf.get_active', ([], {}), '()\n', (1330, 1332), False, 'from matplotlib import _pylab_helpers\n'), ((1939, 2044), 'warnings.warn', 'warnings.warn', (['"""nquantiles must be less than or equal to the number of data points"""', 'UserWarning'], {}), "(\n 'nquantiles must be less than or equal to the number of data points',\n UserWarning)\n", (1952, 2044), False, 'import warnings\n'), ((2121, 2192), 'numpy.linspace', 'np.linspace', (['(1 / (2 * nquantiles))', '(1 - 1 / (2 * nquantiles))', 'nquantiles'], {}), '(1 / (2 * nquantiles), 1 - 1 / (2 * nquantiles), nquantiles)\n', (2132, 2192), True, 'import numpy as np\n'), ((2210, 2236), 'numpy.quantile', 'np.quantile', (['values', 'qlist'], {}), '(values, qlist)\n', (2221, 2236), True, 'import numpy as np\n'), ((2282, 2353), 'math.sqrt', 'math.sqrt', (['((values[-1] - values[0] + 1) ** 2 / (2 * nquantiles * np.pi))'], {}), '((values[-1] - values[0] + 1) ** 2 / (2 * nquantiles * np.pi))\n', (2291, 2353), False, 'import math\n'), ((2581, 2642), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x_i, y_i)', '(dotsize * binwidth / 2)'], {}), '((x_i, y_i), dotsize * binwidth / 2, **plot_kwargs)\n', (2591, 2642), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2915), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2913, 2915), True, 'import matplotlib.pyplot as plt\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for new Kernel-based SVMs"""
import numpy as np
from time import time
from mvpa2.testing import *
from mvpa2.testing.datasets import datasets
skip_if_no_external("shogun")
from mvpa2.kernels.base import CachedKernel
from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel
from mvpa2.misc.data_generators import normal_feature_dataset
from mvpa2.clfs.libsvmc.svm import SVM as lsSVM
from mvpa2.clfs.sg.svm import SVM as sgSVM
from mvpa2.generators.splitters import Splitter
from mvpa2.generators.partition import NFoldPartitioner
from mvpa2.measures.base import CrossValidation, TransferMeasure, ProxyMeasure
from mvpa2.mappers.fx import BinaryFxNode
from mvpa2.misc.errorfx import mean_mismatch_error
class SVMKernelTests(unittest.TestCase):
@sweepargs(clf=[lsSVM(), sgSVM()])
def test_basic_clf_train_predict(self, clf):
d = datasets["uni4medium"]
clf.train(d)
clf.predict(d)
pass
@reseed_rng()
def test_cache_speedup(self):
skip_if_no_external("shogun", ver_dep="shogun:rev", min_version=4455)
ck = sgSVM(kernel=CachedKernel(kernel=RbfSGKernel(sigma=2)), C=1)
sk = sgSVM(kernel=RbfSGKernel(sigma=2), C=1)
cv_c = CrossValidation(ck, NFoldPartitioner())
cv_s = CrossValidation(sk, NFoldPartitioner())
# data = datasets['uni4large']
P = 5000
data = normal_feature_dataset(
snr=2, perlabel=200, nchunks=10, means=np.random.randn(2, P), nfeatures=P
)
t0 = time()
ck.params.kernel.compute(data)
cachetime = time() - t0
t0 = time()
cached_err = cv_c(data)
ccv_time = time() - t0
t0 = time()
norm_err = cv_s(data)
ncv_time = time() - t0
assert_almost_equal(np.asanyarray(cached_err), np.asanyarray(norm_err))
ok_(cachetime < ncv_time)
ok_(ccv_time < ncv_time)
# print 'Regular CV time: %s seconds'%ncv_time
# print 'Caching time: %s seconds'%cachetime
# print 'Cached CV time: %s seconds'%ccv_time
speedup = ncv_time / (ccv_time + cachetime)
# print 'Speedup factor: %s'%speedup
# Speedup ideally should be 10, though it's not purely linear
self.assertFalse(speedup < 2, "Problem caching data - too slow!")
def test_cached_kernel_different_datasets(self):
skip_if_no_external("shogun", ver_dep="shogun:rev", min_version=4455)
# Inspired by the problem Swaroop ran into
k = LinearSGKernel(normalizer_cls=False)
k_ = LinearSGKernel(normalizer_cls=False) # to be cached
ck = CachedKernel(k_)
clf = sgSVM(svm_impl="libsvm", kernel=k, C=-1)
clf_ = sgSVM(svm_impl="libsvm", kernel=ck, C=-1)
cvte = CrossValidation(clf, NFoldPartitioner())
cvte_ = CrossValidation(clf_, NFoldPartitioner())
postproc = BinaryFxNode(mean_mismatch_error, "targets")
te = ProxyMeasure(clf, postproc=postproc)
te_ = ProxyMeasure(clf_, postproc=postproc)
for r in range(2):
ds1 = datasets["uni2medium"]
errs1 = cvte(ds1)
ck.compute(ds1)
ok_(ck._recomputed)
errs1_ = cvte_(ds1)
ok_(~ck._recomputed)
assert_array_equal(errs1, errs1_)
ds2 = datasets["uni3small"]
errs2 = cvte(ds2)
ck.compute(ds2)
ok_(ck._recomputed)
errs2_ = cvte_(ds2)
ok_(~ck._recomputed)
assert_array_equal(errs2, errs2_)
ssel = np.round(datasets["uni2large"].samples[:5, 0]).astype(int)
te.train(datasets["uni3small"][::2])
terr = np.asscalar(te(datasets["uni3small"][ssel]))
te_.train(datasets["uni3small"][::2])
terr_ = np.asscalar(te_(datasets["uni3small"][ssel]))
ok_(~ck._recomputed)
ok_(terr == terr_)
def test_vstack_and_origids_issue(self):
# That is actually what swaroop hit
skip_if_no_external("shogun", ver_dep="shogun:rev", min_version=4455)
# Inspired by the problem Swaroop ran into
k = LinearSGKernel(normalizer_cls=False)
k_ = LinearSGKernel(normalizer_cls=False) # to be cached
ck = CachedKernel(k_)
clf = sgSVM(svm_impl="libsvm", kernel=k, C=-1)
clf_ = sgSVM(svm_impl="libsvm", kernel=ck, C=-1)
cvte = CrossValidation(clf, NFoldPartitioner())
cvte_ = CrossValidation(clf_, NFoldPartitioner())
ds = datasets["uni2large"].copy(deep=True)
ok_(~("orig_ids" in ds.sa)) # assure that there are None
ck.compute(ds) # so we initialize origids
ok_("origids" in ds.sa)
ds2 = ds.copy(deep=True)
ds2.samples = np.zeros(ds2.shape)
from mvpa2.base.dataset import vstack
ds_vstacked = vstack((ds2, ds))
# should complaint now since there would not be unique
# samples' origids
if __debug__:
assert_raises(ValueError, ck.compute, ds_vstacked)
ds_vstacked.init_origids("samples") # reset origids
ck.compute(ds_vstacked)
errs = cvte(ds_vstacked)
errs_ = cvte_(ds_vstacked)
# Following test would have failed since origids
# were just ints, and then non-unique after vstack
assert_array_equal(errs.samples, errs_.samples)
def suite(): # pragma: no cover
return unittest.makeSuite(SVMKernelTests)
if __name__ == "__main__": # pragma: no cover
from . import runner
runner.run()
| [
"mvpa2.clfs.sg.svm.SVM",
"numpy.random.randn",
"numpy.asanyarray",
"mvpa2.kernels.sg.LinearSGKernel",
"mvpa2.measures.base.ProxyMeasure",
"numpy.zeros",
"time.time",
"mvpa2.base.dataset.vstack",
"mvpa2.clfs.libsvmc.svm.SVM",
"mvpa2.generators.partition.NFoldPartitioner",
"mvpa2.mappers.fx.Binary... | [((1902, 1908), 'time.time', 'time', ([], {}), '()\n', (1906, 1908), False, 'from time import time\n'), ((1994, 2000), 'time.time', 'time', ([], {}), '()\n', (1998, 2000), False, 'from time import time\n'), ((2078, 2084), 'time.time', 'time', ([], {}), '()\n', (2082, 2084), False, 'from time import time\n'), ((2895, 2931), 'mvpa2.kernels.sg.LinearSGKernel', 'LinearSGKernel', ([], {'normalizer_cls': '(False)'}), '(normalizer_cls=False)\n', (2909, 2931), False, 'from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel\n'), ((2945, 2981), 'mvpa2.kernels.sg.LinearSGKernel', 'LinearSGKernel', ([], {'normalizer_cls': '(False)'}), '(normalizer_cls=False)\n', (2959, 2981), False, 'from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel\n'), ((3011, 3027), 'mvpa2.kernels.base.CachedKernel', 'CachedKernel', (['k_'], {}), '(k_)\n', (3023, 3027), False, 'from mvpa2.kernels.base import CachedKernel\n'), ((3043, 3083), 'mvpa2.clfs.sg.svm.SVM', 'sgSVM', ([], {'svm_impl': '"""libsvm"""', 'kernel': 'k', 'C': '(-1)'}), "(svm_impl='libsvm', kernel=k, C=-1)\n", (3048, 3083), True, 'from mvpa2.clfs.sg.svm import SVM as sgSVM\n'), ((3099, 3140), 'mvpa2.clfs.sg.svm.SVM', 'sgSVM', ([], {'svm_impl': '"""libsvm"""', 'kernel': 'ck', 'C': '(-1)'}), "(svm_impl='libsvm', kernel=ck, C=-1)\n", (3104, 3140), True, 'from mvpa2.clfs.sg.svm import SVM as sgSVM\n'), ((3276, 3320), 'mvpa2.mappers.fx.BinaryFxNode', 'BinaryFxNode', (['mean_mismatch_error', '"""targets"""'], {}), "(mean_mismatch_error, 'targets')\n", (3288, 3320), False, 'from mvpa2.mappers.fx import BinaryFxNode\n'), ((3334, 3370), 'mvpa2.measures.base.ProxyMeasure', 'ProxyMeasure', (['clf'], {'postproc': 'postproc'}), '(clf, postproc=postproc)\n', (3346, 3370), False, 'from mvpa2.measures.base import CrossValidation, TransferMeasure, ProxyMeasure\n'), ((3385, 3422), 'mvpa2.measures.base.ProxyMeasure', 'ProxyMeasure', (['clf_'], {'postproc': 'postproc'}), '(clf_, postproc=postproc)\n', (3397, 3422), False, 'from mvpa2.measures.base import CrossValidation, TransferMeasure, ProxyMeasure\n'), ((4539, 4575), 'mvpa2.kernels.sg.LinearSGKernel', 'LinearSGKernel', ([], {'normalizer_cls': '(False)'}), '(normalizer_cls=False)\n', (4553, 4575), False, 'from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel\n'), ((4589, 4625), 'mvpa2.kernels.sg.LinearSGKernel', 'LinearSGKernel', ([], {'normalizer_cls': '(False)'}), '(normalizer_cls=False)\n', (4603, 4625), False, 'from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel\n'), ((4655, 4671), 'mvpa2.kernels.base.CachedKernel', 'CachedKernel', (['k_'], {}), '(k_)\n', (4667, 4671), False, 'from mvpa2.kernels.base import CachedKernel\n'), ((4687, 4727), 'mvpa2.clfs.sg.svm.SVM', 'sgSVM', ([], {'svm_impl': '"""libsvm"""', 'kernel': 'k', 'C': '(-1)'}), "(svm_impl='libsvm', kernel=k, C=-1)\n", (4692, 4727), True, 'from mvpa2.clfs.sg.svm import SVM as sgSVM\n'), ((4743, 4784), 'mvpa2.clfs.sg.svm.SVM', 'sgSVM', ([], {'svm_impl': '"""libsvm"""', 'kernel': 'ck', 'C': '(-1)'}), "(svm_impl='libsvm', kernel=ck, C=-1)\n", (4748, 4784), True, 'from mvpa2.clfs.sg.svm import SVM as sgSVM\n'), ((5156, 5175), 'numpy.zeros', 'np.zeros', (['ds2.shape'], {}), '(ds2.shape)\n', (5164, 5175), True, 'import numpy as np\n'), ((5245, 5262), 'mvpa2.base.dataset.vstack', 'vstack', (['(ds2, ds)'], {}), '((ds2, ds))\n', (5251, 5262), False, 'from mvpa2.base.dataset import vstack\n'), ((1621, 1639), 'mvpa2.generators.partition.NFoldPartitioner', 'NFoldPartitioner', ([], {}), '()\n', (1637, 1639), False, 'from mvpa2.generators.partition import NFoldPartitioner\n'), ((1676, 1694), 'mvpa2.generators.partition.NFoldPartitioner', 'NFoldPartitioner', ([], {}), '()\n', (1692, 1694), False, 'from mvpa2.generators.partition import NFoldPartitioner\n'), ((1968, 1974), 'time.time', 'time', ([], {}), '()\n', (1972, 1974), False, 'from time import time\n'), ((2052, 2058), 'time.time', 'time', ([], {}), '()\n', (2056, 2058), False, 'from time import time\n'), ((2134, 2140), 'time.time', 'time', ([], {}), '()\n', (2138, 2140), False, 'from time import time\n'), ((2175, 2200), 'numpy.asanyarray', 'np.asanyarray', (['cached_err'], {}), '(cached_err)\n', (2188, 2200), True, 'import numpy as np\n'), ((2202, 2225), 'numpy.asanyarray', 'np.asanyarray', (['norm_err'], {}), '(norm_err)\n', (2215, 2225), True, 'import numpy as np\n'), ((3178, 3196), 'mvpa2.generators.partition.NFoldPartitioner', 'NFoldPartitioner', ([], {}), '()\n', (3194, 3196), False, 'from mvpa2.generators.partition import NFoldPartitioner\n'), ((3236, 3254), 'mvpa2.generators.partition.NFoldPartitioner', 'NFoldPartitioner', ([], {}), '()\n', (3252, 3254), False, 'from mvpa2.generators.partition import NFoldPartitioner\n'), ((4822, 4840), 'mvpa2.generators.partition.NFoldPartitioner', 'NFoldPartitioner', ([], {}), '()\n', (4838, 4840), False, 'from mvpa2.generators.partition import NFoldPartitioner\n'), ((4880, 4898), 'mvpa2.generators.partition.NFoldPartitioner', 'NFoldPartitioner', ([], {}), '()\n', (4896, 4898), False, 'from mvpa2.generators.partition import NFoldPartitioner\n'), ((1166, 1173), 'mvpa2.clfs.libsvmc.svm.SVM', 'lsSVM', ([], {}), '()\n', (1171, 1173), True, 'from mvpa2.clfs.libsvmc.svm import SVM as lsSVM\n'), ((1175, 1182), 'mvpa2.clfs.sg.svm.SVM', 'sgSVM', ([], {}), '()\n', (1180, 1182), True, 'from mvpa2.clfs.sg.svm import SVM as sgSVM\n'), ((1558, 1578), 'mvpa2.kernels.sg.RbfSGKernel', 'RbfSGKernel', ([], {'sigma': '(2)'}), '(sigma=2)\n', (1569, 1578), False, 'from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel\n'), ((1843, 1864), 'numpy.random.randn', 'np.random.randn', (['(2)', 'P'], {}), '(2, P)\n', (1858, 1864), True, 'import numpy as np\n'), ((3955, 4001), 'numpy.round', 'np.round', (["datasets['uni2large'].samples[:5, 0]"], {}), "(datasets['uni2large'].samples[:5, 0])\n", (3963, 4001), True, 'import numpy as np\n'), ((1504, 1524), 'mvpa2.kernels.sg.RbfSGKernel', 'RbfSGKernel', ([], {'sigma': '(2)'}), '(sigma=2)\n', (1515, 1524), False, 'from mvpa2.kernels.sg import RbfSGKernel, LinearSGKernel\n')] |
#!python
# External dependencies
import numpy as np
import pandas as pd
"""
Usage :
data = {
"data": {
"candles": [
["05-09-2013", 5553.75, 5625.75, 5552.700195, 5592.950195, 274900],
["06-09-2013", 5617.450195, 5688.600098, 5566.149902, 5680.399902, 253000],
["10-09-2013", 5738.5, 5904.850098, 5738.200195, 5896.75, 275200],
["11-09-2013", 5887.25, 5924.350098, 5832.700195, 5913.149902, 265000],
["12-09-2013", 5931.149902, 5932, 5815.799805, 5850.700195, 273000],
...
["27-01-2014", 6186.299805, 6188.549805, 6130.25, 6135.850098, 190400],
["28-01-2014", 6131.850098, 6163.600098, 6085.950195, 6126.25, 184100],
["29-01-2014", 6161, 6170.450195, 6109.799805, 6120.25, 146700],
["30-01-2014", 6067, 6082.850098, 6027.25, 6073.700195, 208100],
["31-01-2014", 6082.75, 6097.850098, 6067.350098, 6089.5, 146700]
]
}
}
# Date must be present as a Pandas DataFrame with ['date', 'open', 'high', 'low', 'close', 'volume'] as columns
df = pd.DataFrame(data["data"]["candles"], columns=['date', 'open', 'high', 'low', 'close', 'volume'])
# Columns as added by each function specific to their computations
EMA(df, 'close', 'ema_5', 5)
ATR(df, 14)
SuperTrend(df, 10, 3)
MACD(df)
"""
def HA(df, ohlc=['Open', 'High', 'Low', 'Close']):
"""
Function to compute Heiken Ashi Candles (HA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
Heiken Ashi Close (HA_$ohlc[3])
Heiken Ashi Open (HA_$ohlc[0])
Heiken Ashi High (HA_$ohlc[1])
Heiken Ashi Low (HA_$ohlc[2])
"""
ha_open = 'HA_' + ohlc[0]
ha_high = 'HA_' + ohlc[1]
ha_low = 'HA_' + ohlc[2]
ha_close = 'HA_' + ohlc[3]
df[ha_close] = (df[ohlc[0]] + df[ohlc[1]] + df[ohlc[2]] + df[ohlc[3]]) / 4
df[ha_open] = 0.00
for i in range(0, len(df)):
if i == 0:
df[ha_open].iat[i] = (df[ohlc[0]].iat[i] + df[ohlc[3]].iat[i]) / 2
else:
df[ha_open].iat[i] = (df[ha_open].iat[i - 1] + df[ha_close].iat[i - 1]) / 2
df[ha_high]=df[[ha_open, ha_close, ohlc[1]]].max(axis=1)
df[ha_low]=df[[ha_open, ha_close, ohlc[2]]].min(axis=1)
return df
def SMA(df, base, target, period):
"""
Function to compute Simple Moving Average (SMA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the SMA needs to be computed from
target : String indicates the column name to which the computed data needs to be stored
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new column added with name 'target'
"""
df[target] = df[base].rolling(window=period).mean()
df[target].fillna(0, inplace=True)
return df
def STDDEV(df, base, target, period):
"""
Function to compute Standard Deviation (STDDEV)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the SMA needs to be computed from
target : String indicates the column name to which the computed data needs to be stored
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new column added with name 'target'
"""
df[target] = df[base].rolling(window=period).std()
df[target].fillna(0, inplace=True)
return df
def EMA(df, base, target, period, alpha=False):
"""
Function to compute Exponential Moving Average (EMA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the EMA needs to be computed from
target : String indicates the column name to which the computed data needs to be stored
period : Integer indicates the period of computation in terms of number of candles
alpha : Boolean if True indicates to use the formula for computing EMA using alpha (default is False)
Returns :
df : Pandas DataFrame with new column added with name 'target'
"""
con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]])
if (alpha == True):
# (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period
df[target] = con.ewm(alpha=1 / period, adjust=False).mean()
else:
# ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1)
df[target] = con.ewm(span=period, adjust=False).mean()
df[target].fillna(0, inplace=True)
return df
def ATR(df, period, ohlc=['Open', 'High', 'Low', 'Close']):
"""
Function to compute Average True Range (ATR)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
period : Integer indicates the period of computation in terms of number of candles
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
True Range (TR)
ATR (ATR_$period)
"""
atr = 'ATR_' + str(period)
# Compute true range only if it is not computed and stored earlier in the df
if not 'TR' in df.columns:
df['h-l'] = df[ohlc[1]] - df[ohlc[2]]
df['h-yc'] = abs(df[ohlc[1]] - df[ohlc[3]].shift())
df['l-yc'] = abs(df[ohlc[2]] - df[ohlc[3]].shift())
df['TR'] = df[['h-l', 'h-yc', 'l-yc']].max(axis=1)
df.drop(['h-l', 'h-yc', 'l-yc'], inplace=True, axis=1)
# Compute EMA of true range using ATR formula after ignoring first row
EMA(df, 'TR', atr, period, alpha=True)
return df
def SuperTrend(df, period, multiplier, ohlc=['Open', 'High', 'Low', 'Close']):
"""
Function to compute SuperTrend
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
period : Integer indicates the period of computation in terms of number of candles
multiplier : Integer indicates value to multiply the ATR
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
True Range (TR), ATR (ATR_$period)
SuperTrend (ST_$period_$multiplier)
SuperTrend Direction (STX_$period_$multiplier)
"""
ATR(df, period, ohlc=ohlc)
atr = 'ATR_' + str(period)
st = 'ST_' + str(period) + '_' + str(multiplier)
stx = 'STX_' + str(period) + '_' + str(multiplier)
"""
SuperTrend Algorithm :
BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR
BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR
FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND))
THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND)
FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND))
THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND)
SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN
Current FINAL UPPERBAND
ELSE
IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN
Current FINAL LOWERBAND
ELSE
IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN
Current FINAL LOWERBAND
ELSE
IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN
Current FINAL UPPERBAND
"""
# Compute basic upper and lower bands
df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr]
df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]
# Compute final upper and lower bands
df['final_ub'] = 0.00
df['final_lb'] = 0.00
for i in range(period, len(df)):
df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df[ohlc[3]].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1]
df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df[ohlc[3]].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1]
# Set the Supertrend value
df[st] = 0.00
for i in range(period, len(df)):
df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df[ohlc[3]].iat[i] <= df['final_ub'].iat[i] else \
df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df[ohlc[3]].iat[i] > df['final_ub'].iat[i] else \
df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df[ohlc[3]].iat[i] >= df['final_lb'].iat[i] else \
df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df[ohlc[3]].iat[i] < df['final_lb'].iat[i] else 0.00
# Mark the trend direction up/down
df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)
# Remove basic and final bands from the columns
df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1)
df.fillna(0, inplace=True)
return df
def MACD(df, fastEMA=12, slowEMA=26, signal=9, base='Close'):
"""
Function to compute Moving Average Convergence Divergence (MACD)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
fastEMA : Integer indicates faster EMA
slowEMA : Integer indicates slower EMA
signal : Integer indicates the signal generator for MACD
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
Returns :
df : Pandas DataFrame with new columns added for
Fast EMA (ema_$fastEMA)
Slow EMA (ema_$slowEMA)
MACD (macd_$fastEMA_$slowEMA_$signal)
MACD Signal (signal_$fastEMA_$slowEMA_$signal)
MACD Histogram (MACD (hist_$fastEMA_$slowEMA_$signal))
"""
fE = "ema_" + str(fastEMA)
sE = "ema_" + str(slowEMA)
macd = "macd_" + str(fastEMA) + "_" + str(slowEMA) + "_" + str(signal)
sig = "signal_" + str(fastEMA) + "_" + str(slowEMA) + "_" + str(signal)
hist = "hist_" + str(fastEMA) + "_" + str(slowEMA) + "_" + str(signal)
# Compute fast and slow EMA
EMA(df, base, fE, fastEMA)
EMA(df, base, sE, slowEMA)
# Compute MACD
df[macd] = np.where(np.logical_and(np.logical_not(df[fE] == 0), np.logical_not(df[sE] == 0)), df[fE] - df[sE], 0)
# Compute MACD Signal
EMA(df, macd, sig, signal)
# Compute MACD Histogram
df[hist] = np.where(np.logical_and(np.logical_not(df[macd] == 0), np.logical_not(df[sig] == 0)), df[macd] - df[sig], 0)
return df
def BBand(df, base='Close', period=20, multiplier=2):
"""
Function to compute Bollinger Band (BBand)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
period : Integer indicates the period of computation in terms of number of candles
multiplier : Integer indicates value to multiply the SD
Returns :
df : Pandas DataFrame with new columns added for
Upper Band (UpperBB_$period_$multiplier)
Lower Band (LowerBB_$period_$multiplier)
"""
upper = 'UpperBB_' + str(period) + '_' + str(multiplier)
lower = 'LowerBB_' + str(period) + '_' + str(multiplier)
sma = df[base].rolling(window=period, min_periods=period - 1).mean()
sd = df[base].rolling(window=period).std()
df[upper] = sma + (multiplier * sd)
df[lower] = sma - (multiplier * sd)
df[upper].fillna(0, inplace=True)
df[lower].fillna(0, inplace=True)
return df
def RSI(df, base="Close", period=21):
"""
Function to compute Relative Strength Index (RSI)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new columns added for
Relative Strength Index (RSI_$period)
"""
delta = df[base].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
rUp = up.ewm(com=period - 1, adjust=False).mean()
rDown = down.ewm(com=period - 1, adjust=False).mean().abs()
df['RSI_' + str(period)] = 100 - 100 / (1 + rUp / rDown)
df['RSI_' + str(period)].fillna(0, inplace=True)
return df
def Ichimoku(df, ohlc=['Open', 'High', 'Low', 'Close'], param=[9, 26, 52, 26]):
"""
Function to compute Ichimoku Cloud parameter (Ichimoku)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
param: Periods to be used in computation (default [tenkan_sen_period, kijun_sen_period, senkou_span_period, chikou_span_period] = [9, 26, 52, 26])
Returns :
df : Pandas DataFrame with new columns added for ['Tenkan Sen', 'Kijun Sen', 'Senkou Span A', 'Senkou Span B', 'Chikou Span']
"""
high = df[ohlc[1]]
low = df[ohlc[2]]
close = df[ohlc[3]]
tenkan_sen_period = param[0]
kijun_sen_period = param[1]
senkou_span_period = param[2]
chikou_span_period = param[3]
tenkan_sen_column = 'Tenkan Sen'
kijun_sen_column = 'Kijun Sen'
senkou_span_a_column = 'Senkou Span A'
senkou_span_b_column = 'Senkou Span B'
chikou_span_column = 'Chikou Span'
# Tenkan-sen (Conversion Line)
tenkan_sen_high = high.rolling(window=tenkan_sen_period).max()
tenkan_sen_low = low.rolling(window=tenkan_sen_period).min()
df[tenkan_sen_column] = (tenkan_sen_high + tenkan_sen_low) / 2
# Kijun-sen (Base Line)
kijun_sen_high = high.rolling(window=kijun_sen_period).max()
kijun_sen_low = low.rolling(window=kijun_sen_period).min()
df[kijun_sen_column] = (kijun_sen_high + kijun_sen_low) / 2
# Senkou Span A (Leading Span A)
df[senkou_span_a_column] = ((df[tenkan_sen_column] + df[kijun_sen_column]) / 2).shift(kijun_sen_period)
# Senkou Span B (Leading Span B)
senkou_span_high = high.rolling(window=senkou_span_period).max()
senkou_span_low = low.rolling(window=senkou_span_period).min()
df[senkou_span_b_column] = ((senkou_span_high + senkou_span_low) / 2).shift(kijun_sen_period)
# The most current closing price plotted chikou_span_period time periods behind
df[chikou_span_column] = close.shift(-1 * chikou_span_period)
return df | [
"numpy.logical_not",
"numpy.where"
] | [((10551, 10595), 'numpy.where', 'np.where', (['(df[ohlc[3]] < df[st])', '"""down"""', '"""up"""'], {}), "(df[ohlc[3]] < df[st], 'down', 'up')\n", (10559, 10595), True, 'import numpy as np\n'), ((12155, 12182), 'numpy.logical_not', 'np.logical_not', (['(df[fE] == 0)'], {}), '(df[fE] == 0)\n', (12169, 12182), True, 'import numpy as np\n'), ((12184, 12211), 'numpy.logical_not', 'np.logical_not', (['(df[sE] == 0)'], {}), '(df[sE] == 0)\n', (12198, 12211), True, 'import numpy as np\n'), ((12375, 12404), 'numpy.logical_not', 'np.logical_not', (['(df[macd] == 0)'], {}), '(df[macd] == 0)\n', (12389, 12404), True, 'import numpy as np\n'), ((12406, 12434), 'numpy.logical_not', 'np.logical_not', (['(df[sig] == 0)'], {}), '(df[sig] == 0)\n', (12420, 12434), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import torch
import torch.utils.data as data
import yaml
from PIL import Image
from sklearn.preprocessing import MinMaxScaler
from .transformer import ScaleTransformer
class LaparoDataset(data.Dataset):
def __init__(self, ds_num, phase, transform):
# self.ds_dir = os.getcwd() + '/Database/ds_{:03d}'.format(ds_num)
self.ds_dir = "~/workspace/Database/ds_{:03d}".format(ds_num)
self.phase = phase
self.transform = transform
df = pd.read_csv(self.ds_dir + "/{}.csv".format(phase))
df["z"] *= -1
df["nz"] *= -1
self.dataframe = df
self.PARAMS = ["x", "y", "z", "nx", "ny", "nz", "gamma_s", "gamma_c", "phi"]
with open(self.ds_dir + "/ds_config.yaml") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
camera = config["camera"]
x = camera["z_max"] * np.tan(np.radians(camera["fov"] / 2))
y = x * camera["aspect"]
# Range of each parameter
X = [-x, x]
Y = [-y, y]
Z = [camera["z_min"], camera["z_max"]]
N = [-1.0, 1.0]
NZ = [0.25, 0.95]
GAMMA = [-1.0, 1.0]
PHI = [0.0, config["articulation"]["phi_max"]]
RANGE = np.stack([X, Y, Z, N, N, NZ, GAMMA, GAMMA, PHI], 0)
self.scaler = MinMaxScaler()
self.scaler.fit(RANGE.T)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img = Image.open(
os.path.join(self.ds_dir, self.phase, "img_{:05d}.jpg".format(idx + 1))
)
img = self.transform(img)
t = np.array([self.dataframe.loc[idx, self.PARAMS]])
target = torch.Tensor(self.scaler.transform(t)).squeeze()
return img, target
class NpLaparoDataset(data.Dataset):
def __init__(self, ds_num, phase, input_size):
self.input_size = input_size
self.ds_dir = os.getcwd() + "/Database/ds_{:03d}".format(ds_num)
self.phase = phase
df = pd.read_csv(self.ds_dir + "/{}.csv".format(phase))
df["z"] *= -1
df["nz"] *= -1
self.dataframe = df
self.PARAMS = ["x", "y", "z", "nx", "ny", "nz", "gamma_s", "gamma_c", "phi"]
self.scaler = ScaleTransformer(ds_num)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img = np.load(
os.path.join(
self.ds_dir,
"{}x{}/numpy".format(self.input_size[0], self.input_size[1]),
self.phase,
"img_{:05d}.npy".format(idx + 1),
)
)
img = torch.tensor(img)
t = np.array([self.dataframe.loc[idx, self.PARAMS]])
target = torch.Tensor(self.scaler.transform(t)).squeeze()
return img, target
| [
"numpy.stack",
"numpy.radians",
"yaml.load",
"os.getcwd",
"sklearn.preprocessing.MinMaxScaler",
"numpy.array",
"torch.tensor"
] | [((1267, 1318), 'numpy.stack', 'np.stack', (['[X, Y, Z, N, N, NZ, GAMMA, GAMMA, PHI]', '(0)'], {}), '([X, Y, Z, N, N, NZ, GAMMA, GAMMA, PHI], 0)\n', (1275, 1318), True, 'import numpy as np\n'), ((1342, 1356), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1354, 1356), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1649, 1697), 'numpy.array', 'np.array', (['[self.dataframe.loc[idx, self.PARAMS]]'], {}), '([self.dataframe.loc[idx, self.PARAMS]])\n', (1657, 1697), True, 'import numpy as np\n'), ((2654, 2671), 'torch.tensor', 'torch.tensor', (['img'], {}), '(img)\n', (2666, 2671), False, 'import torch\n'), ((2685, 2733), 'numpy.array', 'np.array', (['[self.dataframe.loc[idx, self.PARAMS]]'], {}), '([self.dataframe.loc[idx, self.PARAMS]])\n', (2693, 2733), True, 'import numpy as np\n'), ((822, 858), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (831, 858), False, 'import yaml\n'), ((1941, 1952), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1950, 1952), False, 'import os\n'), ((931, 960), 'numpy.radians', 'np.radians', (["(camera['fov'] / 2)"], {}), "(camera['fov'] / 2)\n", (941, 960), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from .backbones import *
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class ImageTCN(nn.Module):
def __init__(self,
# Encoder
backbone,
num_classes,
dropout,
pretrained,
# TCN
num_inputs,
num_channels,
kernel_size=3,
tcn_dropout=0.5):
super().__init__()
self.encoder, dim_feats = eval(backbone)(pretrained=pretrained)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.tcn = TemporalConvNet(num_inputs, num_channels, kernel_size, tcn_dropout)
self.linear1 = nn.Conv1d(num_inputs, 1, kernel_size=1)
self.linear2 = nn.Linear(dim_feats, num_classes)
self.num_inputs = num_inputs
self._freeze_encoder()
def _freeze_encoder(self):
for param in self.encoder.parameters():
param.requires_grad = False
def train(self, mode=True):
super().train(mode=mode)
self._freeze_encoder()
def forward_train(self, x):
features = []
for i in range(x.size(2)):
features.append(self.dropout1(self.encoder(x[:,:,i])))
features = torch.stack(features, dim=1)
features = self.tcn(features)
out = self.linear1(features)
out = self.dropout2(out)
out = self.linear2(out[:,0])
return out[:,0]
def forward_test(self, x):
# x.shape = (1, C, N, H, W)
if x.size(0) != 1: raise Exception('Batch size must be 1 for inference')
if x.size(2) < self.num_inputs:
x = torch.cat([x, torch.from_numpy(np.zeros((x.shape[0],x.shape[1],self.num_inputs-x.shape[2],x.shape[3],x.shape[4]))).float()], dim=2)
preds = []
for i in range(0, x.size(2)-self.num_inputs+1, self.num_inputs):
preds.append(self.forward_train(x[:,:,i:i+self.num_inputs]))
if x.size(2) > self.num_inputs and x.size(2) % self.num_inputs != 0:
preds.append(self.forward_train(x[:,:,x.size(2)-self.num_inputs:x.size(2)]))
preds = torch.stack(preds, dim=1)
return torch.mean(torch.sigmoid(preds), dim=1)
def forward(self, x):
if self.training:
return self.forward_train(x)
else:
return self.forward_test(x)
if __name__ == '__main__':
import torch, numpy as np
from factory.models.tcn import ImageTCN
model = ImageTCN('se_resnext50', 1, 0.5, None, 50, [50,50,50,50])
model.eval()
X = torch.from_numpy(np.ones((1,3,60,64,64))).float()
out = model(X)
| [
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.stack",
"factory.models.tcn.ImageTCN",
"torch.nn.Sequential",
"torch.nn.Conv1d",
"numpy.zeros",
"numpy.ones",
"torch.sigmoid",
"torch.nn.Linear"
] | [((4956, 5016), 'factory.models.tcn.ImageTCN', 'ImageTCN', (['"""se_resnext50"""', '(1)', '(0.5)', 'None', '(50)', '[50, 50, 50, 50]'], {}), "('se_resnext50', 1, 0.5, None, 50, [50, 50, 50, 50])\n", (4964, 5016), False, 'from factory.models.tcn import ImageTCN\n'), ((766, 775), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (773, 775), True, 'import torch.nn as nn\n'), ((800, 819), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (810, 819), True, 'import torch.nn as nn\n'), ((1054, 1063), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1061, 1063), True, 'import torch.nn as nn\n'), ((1088, 1107), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1098, 1107), True, 'import torch.nn as nn\n'), ((1128, 1250), 'torch.nn.Sequential', 'nn.Sequential', (['self.conv1', 'self.chomp1', 'self.relu1', 'self.dropout1', 'self.conv2', 'self.chomp2', 'self.relu2', 'self.dropout2'], {}), '(self.conv1, self.chomp1, self.relu1, self.dropout1, self.\n conv2, self.chomp2, self.relu2, self.dropout2)\n', (1141, 1250), True, 'import torch.nn as nn\n'), ((1394, 1403), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1401, 1403), True, 'import torch.nn as nn\n'), ((2446, 2468), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2459, 2468), True, 'import torch.nn as nn\n'), ((2996, 3015), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3006, 3015), True, 'import torch.nn as nn\n'), ((3040, 3059), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3050, 3059), True, 'import torch.nn as nn\n'), ((3170, 3209), 'torch.nn.Conv1d', 'nn.Conv1d', (['num_inputs', '(1)'], {'kernel_size': '(1)'}), '(num_inputs, 1, kernel_size=1)\n', (3179, 3209), True, 'import torch.nn as nn\n'), ((3233, 3266), 'torch.nn.Linear', 'nn.Linear', (['dim_feats', 'num_classes'], {}), '(dim_feats, num_classes)\n', (3242, 3266), True, 'import torch.nn as nn\n'), ((3729, 3757), 'torch.stack', 'torch.stack', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (3740, 3757), False, 'import torch, numpy as np\n'), ((4611, 4636), 'torch.stack', 'torch.stack', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (4622, 4636), False, 'import torch, numpy as np\n'), ((567, 665), 'torch.nn.Conv1d', 'nn.Conv1d', (['n_inputs', 'n_outputs', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation'}), '(n_inputs, n_outputs, kernel_size, stride=stride, padding=padding,\n dilation=dilation)\n', (576, 665), True, 'import torch.nn as nn\n'), ((854, 953), 'torch.nn.Conv1d', 'nn.Conv1d', (['n_outputs', 'n_outputs', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation'}), '(n_outputs, n_outputs, kernel_size, stride=stride, padding=padding,\n dilation=dilation)\n', (863, 953), True, 'import torch.nn as nn\n'), ((1305, 1338), 'torch.nn.Conv1d', 'nn.Conv1d', (['n_inputs', 'n_outputs', '(1)'], {}), '(n_inputs, n_outputs, 1)\n', (1314, 1338), True, 'import torch.nn as nn\n'), ((4663, 4683), 'torch.sigmoid', 'torch.sigmoid', (['preds'], {}), '(preds)\n', (4676, 4683), False, 'import torch, numpy as np\n'), ((5056, 5083), 'numpy.ones', 'np.ones', (['(1, 3, 60, 64, 64)'], {}), '((1, 3, 60, 64, 64))\n', (5063, 5083), True, 'import torch, numpy as np\n'), ((4163, 4255), 'numpy.zeros', 'np.zeros', (['(x.shape[0], x.shape[1], self.num_inputs - x.shape[2], x.shape[3], x.shape[4])'], {}), '((x.shape[0], x.shape[1], self.num_inputs - x.shape[2], x.shape[3],\n x.shape[4]))\n', (4171, 4255), True, 'import torch, numpy as np\n')] |
import numpy as np
import dgramCreate as dc
import numpy as np
import dgramCreate as dc
import os, pickle, json, base64
from psana.dgrammanager import DgramManager
from psana import DataSource
def load_json(filename):
with open(filename, 'r') as f:
data = json.load(f)
event_dict = []
for event in data:
for key,val in event.items():
try:
event[key]= np.frombuffer(base64.b64decode(val[0]), dtype = np.dtype(val[2])).reshape(val[1])
except TypeError:
pass
event_dict.append(event)
return event_dict
def translate_xtc_demo(det_type, offset=1):
xtcfile = '%s.xtc2' % det_type
try:
os.remove(xtcfile)
except:
pass
lcls1_xtc = load_json(det_type+'.json')
cfg_namesId = 0
cfg_alg = dc.alg('cfg', [1, 2, 3])
cfg_ninfo = dc.nameinfo('my'+det_type, det_type, 'serialnum1234', cfg_namesId)
evt_namesId = 1
evt_alg = dc.alg('raw', [4, 5, 6])
evt_ninfo = dc.nameinfo('my'+det_type, det_type, 'serialnum1234', evt_namesId)
# This is needed for some reason.
# Perhaps a collision of lcls1 xtc "version" with lcls2 counterpart
try:
lcls1_xtc[0]['version.'] = lcls1_xtc[0]['version']
del lcls1_xtc[0]['version']
except KeyError:
pass
cydgram = dc.CyDgram()
with open(xtcfile, 'wb') as f:
# the order of these two lines must match the of the namesId's above
cydgram.addDet(cfg_ninfo, cfg_alg, lcls1_xtc[0])
cydgram.addDet(evt_ninfo, evt_alg, lcls1_xtc[offset])
df = cydgram.get(0,0,0)
f.write(df)
# this currently duplicates the first dgram
for event_dgram in lcls1_xtc[offset:]:
cydgram.addDet(evt_ninfo, evt_alg, event_dgram)
df = cydgram.get(0,0,0)
f.write(df)
# Examples
# The LCLS1 dgrams are organized differently than the LCLS2 version
# In LCLS2, the configure contains all the names in the subsequent event dgrams
# This isn't the case for LCLS1, so I define the first event as a pseudo-configure
# and the second event as a configure for the rest of the events
# There is an edge case for the crystal data, as the second event is blank
# This causes a segfault when loading the LCLS2 xtc file with DgramManager
# My workaround is to define the second event as the configure for the events
# with the optional offset argument to translate_xtc_demo
translate_xtc_demo('jungfrau')
#translate_xtc_demo('epix')
#translate_xtc_demo('crystal_dark', 2)
#translate_xtc_demo('crystal_xray', 2)
| [
"os.remove",
"json.load",
"numpy.dtype",
"dgramCreate.alg",
"dgramCreate.nameinfo",
"base64.b64decode",
"dgramCreate.CyDgram"
] | [((822, 846), 'dgramCreate.alg', 'dc.alg', (['"""cfg"""', '[1, 2, 3]'], {}), "('cfg', [1, 2, 3])\n", (828, 846), True, 'import dgramCreate as dc\n'), ((863, 931), 'dgramCreate.nameinfo', 'dc.nameinfo', (["('my' + det_type)", 'det_type', '"""serialnum1234"""', 'cfg_namesId'], {}), "('my' + det_type, det_type, 'serialnum1234', cfg_namesId)\n", (874, 931), True, 'import dgramCreate as dc\n'), ((965, 989), 'dgramCreate.alg', 'dc.alg', (['"""raw"""', '[4, 5, 6]'], {}), "('raw', [4, 5, 6])\n", (971, 989), True, 'import dgramCreate as dc\n'), ((1006, 1074), 'dgramCreate.nameinfo', 'dc.nameinfo', (["('my' + det_type)", 'det_type', '"""serialnum1234"""', 'evt_namesId'], {}), "('my' + det_type, det_type, 'serialnum1234', evt_namesId)\n", (1017, 1074), True, 'import dgramCreate as dc\n'), ((1337, 1349), 'dgramCreate.CyDgram', 'dc.CyDgram', ([], {}), '()\n', (1347, 1349), True, 'import dgramCreate as dc\n'), ((271, 283), 'json.load', 'json.load', (['f'], {}), '(f)\n', (280, 283), False, 'import os, pickle, json, base64\n'), ((698, 716), 'os.remove', 'os.remove', (['xtcfile'], {}), '(xtcfile)\n', (707, 716), False, 'import os, pickle, json, base64\n'), ((425, 449), 'base64.b64decode', 'base64.b64decode', (['val[0]'], {}), '(val[0])\n', (441, 449), False, 'import os, pickle, json, base64\n'), ((459, 475), 'numpy.dtype', 'np.dtype', (['val[2]'], {}), '(val[2])\n', (467, 475), True, 'import numpy as np\n')] |
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn import datasets
from scipy.io import loadmat
import pandas as pd
'''
The Principal Component Analysis gives us the directions which holds the most
of the variation in the dataset
Works by finding the eigenvectors of the covariance matrix
Unsupervised dimension reduction algorithm
'''
df = loadmat('Datasets\mnist_train.mat')
y = df.get('train_labels')
X = df.get('train_X')
indexes = [i in [1,3,5,7,9] for i in y]
y = y[indexes]
X = X[indexes]
def pca_plot(X,y=[]):
'''Produces a 2D-plot unsing the two first dimensions of the data.
Labels according to y (optinal)
Args:
X ([n,d]): input data
y ([n,1], optinal): labels
'''
title = "PCA plot "
plt.figure(figsize=(5,3))
if len(y) == len(X):
for i,v in enumerate(np.unique(y)):
indices = np.where(y == v)
plt.scatter(X[indices,0],X[indices,1],label=v)
title += 'of classes ' + ' ,'.join([str(k) for k in np.unique(y)])
plt.legend()
else:
plt.scatter(X[:,0], X[:,1])
plt.title(title)
plt.xlabel('PCA_1')
plt.ylabel('PCA_2')
plt.show()
def pca(X, dim_kept=2, var_kept=0.8,):
"""Principal Component Analysis (PCA) as outlined in Alpaydin.
Args:
X ([n,d]): input data
dim_kept (int): dimension restriction on output
var_kept (float, optinal): variance retained restriction on output
Returns:
out ([n, dim_kept]): transformed data
"""
# 1. Nomralize the data
mu = np.sum(X,axis = 0)/X.shape[0]
std = np.sqrt(np.sum(np.power(X - mu,2),axis=0) / X.shape[0])
X = (X-mu)/(std+0.01)
# 2. Get the eigenvalues and eigenvectors of the covariance matrix
eigval, eigvec = np.linalg.eig(np.cov(X.T,bias=True,ddof=1))
# 3. Decicde the output dimension
cumvar = np.cumsum(eigval / eigval.sum())
var_restriction = np.searchsorted(cumvar, var_kept) + 1
out_dim = min(dim_kept, var_restriction)
# 3. Transform the data
X = X.dot(eigvec[:,:out_dim])
return X
X, y = datasets.load_iris(True)
X = pca(X,2)
pca_plot(X,y) | [
"matplotlib.pyplot.title",
"sklearn.datasets.load_iris",
"matplotlib.pyplot.show",
"numpy.sum",
"scipy.io.loadmat",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.power",
"numpy.searchsorted",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.cov",
"matplotlib.pyplot.ylabel"... | [((372, 408), 'scipy.io.loadmat', 'loadmat', (['"""Datasets\\\\mnist_train.mat"""'], {}), "('Datasets\\\\mnist_train.mat')\n", (379, 408), False, 'from scipy.io import loadmat\n'), ((1973, 1997), 'sklearn.datasets.load_iris', 'datasets.load_iris', (['(True)'], {}), '(True)\n', (1991, 1997), False, 'from sklearn import datasets\n'), ((749, 775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 3)'}), '(figsize=(5, 3))\n', (759, 775), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1055), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1048, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1076), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""PCA_1"""'], {}), "('PCA_1')\n", (1067, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1097), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PCA_2"""'], {}), "('PCA_2')\n", (1088, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1109, 1111), True, 'import matplotlib.pyplot as plt\n'), ((987, 999), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (997, 999), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1038), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {}), '(X[:, 0], X[:, 1])\n', (1020, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1467, 1484), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1473, 1484), True, 'import numpy as np\n'), ((1685, 1715), 'numpy.cov', 'np.cov', (['X.T'], {'bias': '(True)', 'ddof': '(1)'}), '(X.T, bias=True, ddof=1)\n', (1691, 1715), True, 'import numpy as np\n'), ((1813, 1846), 'numpy.searchsorted', 'np.searchsorted', (['cumvar', 'var_kept'], {}), '(cumvar, var_kept)\n', (1828, 1846), True, 'import numpy as np\n'), ((821, 833), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (830, 833), True, 'import numpy as np\n'), ((849, 865), 'numpy.where', 'np.where', (['(y == v)'], {}), '(y == v)\n', (857, 865), True, 'import numpy as np\n'), ((869, 919), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[indices, 0]', 'X[indices, 1]'], {'label': 'v'}), '(X[indices, 0], X[indices, 1], label=v)\n', (880, 919), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1539), 'numpy.power', 'np.power', (['(X - mu)', '(2)'], {}), '(X - mu, 2)\n', (1528, 1539), True, 'import numpy as np\n'), ((970, 982), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (979, 982), True, 'import numpy as np\n')] |
import unittest
import hylite
from hylite import HyFeature
from hylite.reference.features import Minerals
import numpy as np
class MyTestCase(unittest.TestCase):
def test_construct(self):
assert Minerals.Mica_K is not None
assert Minerals.Chlorite_Fe is not None
def test_multigauss(self):
x = np.linspace(2100., 2400., 500 )
y = HyFeature.gaussian(x, 2200., 200., 0.5 )
self.assertAlmostEquals( np.max(y), 1.0, 2 )
self.assertAlmostEquals(np.min(y), 0.5, 2)
y = HyFeature.multi_gauss(x, [2200.,2340.], [200., 200.], [0.5,0.5])
self.assertAlmostEquals(np.max(y), 1.0, 2)
self.assertAlmostEquals(np.min(y), 0.5, 2)
def test_plotting(self):
x = np.linspace(2100., 2400., 500)
Minerals.Mica_K[0].quick_plot()
Minerals.Mica_K[0].quick_plot(method='')
def test_fitting(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"hylite.HyFeature.gaussian",
"hylite.HyFeature.multi_gauss",
"numpy.max",
"numpy.min",
"numpy.linspace"
] | [((934, 949), 'unittest.main', 'unittest.main', ([], {}), '()\n', (947, 949), False, 'import unittest\n'), ((328, 360), 'numpy.linspace', 'np.linspace', (['(2100.0)', '(2400.0)', '(500)'], {}), '(2100.0, 2400.0, 500)\n', (339, 360), True, 'import numpy as np\n'), ((372, 413), 'hylite.HyFeature.gaussian', 'HyFeature.gaussian', (['x', '(2200.0)', '(200.0)', '(0.5)'], {}), '(x, 2200.0, 200.0, 0.5)\n', (390, 413), False, 'from hylite import HyFeature\n'), ((530, 600), 'hylite.HyFeature.multi_gauss', 'HyFeature.multi_gauss', (['x', '[2200.0, 2340.0]', '[200.0, 200.0]', '[0.5, 0.5]'], {}), '(x, [2200.0, 2340.0], [200.0, 200.0], [0.5, 0.5])\n', (551, 600), False, 'from hylite import HyFeature\n'), ((739, 771), 'numpy.linspace', 'np.linspace', (['(2100.0)', '(2400.0)', '(500)'], {}), '(2100.0, 2400.0, 500)\n', (750, 771), True, 'import numpy as np\n'), ((446, 455), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (452, 455), True, 'import numpy as np\n'), ((498, 507), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (504, 507), True, 'import numpy as np\n'), ((627, 636), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (633, 636), True, 'import numpy as np\n'), ((678, 687), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (684, 687), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from os.path import split, dirname, abspath
from scipy.optimize import minimize
import sys
sys.path.append(split(split(dirname(abspath(__file__)))[0])[0])
import jobmanager as jm
from calculations import *
class FitFunc_Client(jm.JobManager_Client):
def __init__(self):
super(FitFunc_Client, self).__init__(server="localhost",
authkey='<PASSWORD>',
port = 42524,
nproc = 0,
nice=19,
no_warnings=True,
verbose=2)
@staticmethod
def func(args, const_args):
eta, Gamma, s, p, tau_max, tau_n = const_args
tau = np.linspace(0, tau_max, tau_n)
alpha_true = alpha_func(tau, eta, Gamma, s)
f_min = lambda x: diff(x, tau, alpha_true, p)
res = minimize(fun=f_min,
x0=np.array(args),
method="BFGS")
return res.x, res.fun
class FitFunc_Server(jm.JobManager_Server):
def __init__(self, const_args, num_samples, n, g_max, w_max):
# setup init parameters for the ancestor class
authkey = '<PASSWORD>'
fname_dump = None
const_args = const_args
port = 42524
verbose = 1
msg_interval = 1
# init ancestor class
super(FitFunc_Server, self).__init__(authkey=authkey,
const_arg = const_args,
port = port,
verbose = verbose,
msg_interval = msg_interval,
fname_dump = fname_dump)
self.final_result = None
for i in range(num_samples):
g_re = (np.random.rand(n)*2 - 1)*g_max
g_im = (np.random.rand(n)*2 - 1)*g_max
w = (np.random.rand(n)*2 - 1)*w_max
x0 = tuple(g_re) + tuple(g_im) + tuple(w) # need tuple instead if numpy ndarray
# here because it needs to be hashable
self.put_arg(x0)
def process_new_result(self, arg, result):
x, fun = result
g, w, n = x_to_g_w(x)
if ((self.final_result == None) or
(self.final_result[0] > fun)):
print("\nnew result {}".format(fun))
self.final_result = (fun, g, w)
def process_final_result(self):
print("final Res")
if self.final_result != None:
eta, Gamma, s, p, tau_max, tau_n = self.const_arg
(fun, g, w) = self.final_result
tau = np.linspace(0, tau_max, tau_n)
alpha_true = alpha_func(tau, eta, Gamma, s)
alpha = alpha_apprx(tau, g, w)
plt.plot(tau, np.real(alpha_true), c='k', label='true')
plt.plot(tau, np.imag(alpha_true), c='k', ls='--')
plt.plot(tau, np.real(alpha), c='r', label='approx')
plt.plot(tau, np.imag(alpha), c='r', ls='--')
plt.legend()
plt.grid()
plt.show()
args = {}
args['eta'] = 1
args['Gamma'] = 1
args['s'] = 0.7
args['p'] = 0.99
args['tau_max'] = 2
args['tau_n'] = 500
args['num_samples'] = 300
args['n'] = 5
args['g_max'] = 10
args['w_max'] = 5
const_args = (args['eta'],
args['Gamma'],
args['s'],
args['p'],
args['tau_max'],
args['tau_n'])
def FitFunc_Server_from_args():
return FitFunc_Server(const_args = const_args,
num_samples = args['num_samples'],
n = args['n'],
g_max = args['g_max'],
w_max = args['w_max'])
if __name__ == "__main__":
fitfunc_server = FitFunc_Server_from_args()
# note the server does not get started, but as the init
# function of the subclass generates the arguments
# we can check if they can be process by the
# clinet's static function func
arg0 = fitfunc_server.job_q.get()
x, fun = FitFunc_Client.func(arg0, const_args=const_args)
print("arg0 :", arg0)
print("x :", x)
print("fmin :", fun)
| [
"os.path.abspath",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.imag",
"numpy.array",
"numpy.linspace",
"numpy.real",
"numpy.random.rand",
"matplotlib.pyplot.grid"
] | [((978, 1008), 'numpy.linspace', 'np.linspace', (['(0)', 'tau_max', 'tau_n'], {}), '(0, tau_max, tau_n)\n', (989, 1008), True, 'import numpy as np\n'), ((3069, 3099), 'numpy.linspace', 'np.linspace', (['(0)', 'tau_max', 'tau_n'], {}), '(0, tau_max, tau_n)\n', (3080, 3099), True, 'import numpy as np\n'), ((3491, 3503), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3501, 3503), True, 'import matplotlib.pyplot as plt\n'), ((3516, 3526), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3524, 3526), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3547, 3549), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1208), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (1202, 1208), True, 'import numpy as np\n'), ((3238, 3257), 'numpy.real', 'np.real', (['alpha_true'], {}), '(alpha_true)\n', (3245, 3257), True, 'import numpy as np\n'), ((3306, 3325), 'numpy.imag', 'np.imag', (['alpha_true'], {}), '(alpha_true)\n', (3313, 3325), True, 'import numpy as np\n'), ((3369, 3383), 'numpy.real', 'np.real', (['alpha'], {}), '(alpha)\n', (3376, 3383), True, 'import numpy as np\n'), ((3434, 3448), 'numpy.imag', 'np.imag', (['alpha'], {}), '(alpha)\n', (3441, 3448), True, 'import numpy as np\n'), ((284, 301), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (291, 301), False, 'from os.path import split, dirname, abspath\n'), ((2154, 2171), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2168, 2171), True, 'import numpy as np\n'), ((2205, 2222), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2219, 2222), True, 'import numpy as np\n'), ((2253, 2270), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2267, 2270), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import math
import numpy as np
def direction(d):
three_points = []
direct = []
for i in range(0, len(d), 3):
j = i
while j < (i + 3) and j < len(d):
three_points.append(d[j])
j = j + 1
if len(three_points) == 3:
direct.append(calculate_direction(three_points))
three_points = []
final_direct = np.std(direct)
return final_direct
def calculate_direction(list):
x1 = list[0][0] - list[2][0]
y1 = list[0][1] - list[2][1]
s = math.sqrt(x1 * x1 + y1 * y1)
if s != 0:
cos = x1 / s
else:
cos = 0.0
alpha = math.acos(min(max(cos, -1.0), 1.0))
if 0 <= alpha < 2 * math.pi:
return alpha
else:
return 0
| [
"numpy.std",
"math.sqrt"
] | [((399, 413), 'numpy.std', 'np.std', (['direct'], {}), '(direct)\n', (405, 413), True, 'import numpy as np\n'), ((545, 573), 'math.sqrt', 'math.sqrt', (['(x1 * x1 + y1 * y1)'], {}), '(x1 * x1 + y1 * y1)\n', (554, 573), False, 'import math\n')] |
"""Exercise object that can effectively be utilized in workout class"""
import random
from statistics import mean
from copy import deepcopy
from typing import List, Tuple
import numpy as np
from rengine.config import EXERCISE_CATEGORY_DATA, EquipmentAvailable, MuscleGroup
from rengine.config import ExerciseLoad, ExerciseType, EXERCISE_DF
from rengine.config import ExperienceLevel
def pick_random_exercise(
muscle_groups_targeted: List[str],
exercise_type: ExerciseType,
allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT],
experience_levels = [ExperienceLevel.BEGINNER, ExperienceLevel.INTERMEDIATE, ExperienceLevel.EXPERIENCED],
equipment_available = EquipmentAvailable.ALL,
excluded_exercise_names: List[str] = []
):
"""Picks random exercise based on many parameters"""
global EXERCISE_DF
df = EXERCISE_DF.copy()
if(equipment_available != EquipmentAvailable.ALL):
df = df[df["Equipment"].isin(equipment_available)]
df = df[
(~df["EXERCISE"].isin(excluded_exercise_names)) &
(df["Muscle Group"].isin(muscle_groups_targeted)) &
(df[exercise_type] == 1) &
(df.loc[:,experience_levels].sum(axis = 1) > 0)
]
df.index = range(len(df.iloc[:,0]))
if(len(df) == 0):
return None
exercise_ind = random.randint(0, len(df.iloc[:,0]) - 1)
exercise_chose = df.iloc[exercise_ind, :]
return ExerciseFromTypePreset(exercise_chose["EXERCISE"], exercise_type, allowed_loads)
def listify_if_non_iterable(obj):
obj = deepcopy(obj)
if(type(obj) in [tuple, list]):
return obj
return [obj]
def get_variables_based_on_exercise_type_and_load(exercise_type: ExerciseType, exercise_load: ExerciseLoad):
variables = EXERCISE_CATEGORY_DATA[exercise_type][exercise_load]
return {
"sets": variables["sets"],
"rep_range": variables["rep_range"],
"rest_time_range": variables["rest_time_range"]
}
def get_muscle_group(exercise_name):
"""Finds muscle group based on exercise name. If does not exist returns 'UNKNOWN'"""
return EXERCISE_DF[EXERCISE_DF["EXERCISE"]==exercise_name]["Muscle Group"].values[0]
class Exercise:
"""Basic implementation of an exercise"""
def __init__(self, exercise_name: str, sets, rep_range: Tuple[int], rest_time_range: Tuple[float], muscle_group: MuscleGroup = None):
self.exercise_name = exercise_name
self.sets = sets
self.rep_range = rep_range
self.rest_time_range = rest_time_range
self.muscle_group = muscle_group
@property
def length(self):
"""Length in minutes. Currently with assumption that each set takes 1 minute"""
rest_time = listify_if_non_iterable(self.rest_time_range)
return self.sets * (1 + mean(rest_time))
def __str__(self) -> str:
return f"{{exercise_name: {self.exercise_name}, muscle_group: {self.muscle_group}, sets: {str(self.sets)}, rep_range: {str(self.rep_range)}, rest_time_range: {str(self.rest_time_range)}}}"
class ExerciseFromTypePreset(Exercise):
"""Similar to Exercise class but sets, rep_range and rest_time determined by ExerciseType"""
def __init__(self, exercise_name: str, exercise_type: ExerciseType, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
self.exercise_type = exercise_type
self.exercise_load = exercise_load or self.pick_random_load(allowed_loads)
super().__init__(exercise_name = exercise_name, muscle_group = get_muscle_group(exercise_name),**get_variables_based_on_exercise_type_and_load(self.exercise_type, self.exercise_load))
def pick_random_load(self, allowed_loads):
"""Picks randomly the load based on ExerciseType and valid ExerciseLoad"""
initial_probabilities = [EXERCISE_CATEGORY_DATA[self.exercise_type][load]["chance"] for load in allowed_loads]
normalized_probabilities = [prob/sum(initial_probabilities) for prob in initial_probabilities]
return np.random.choice(allowed_loads, p = normalized_probabilities)
def __str__(self):
return Exercise.__str__(self).rstrip("}") + f", exercise_type: {self.exercise_type}, exercise_load: {self.exercise_load}}}"
class StrengthExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.STRENGTH, allowed_loads=allowed_loads, exercise_load=exercise_load)
class EnduranceExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.ENDURANCE, allowed_loads=allowed_loads, exercise_load=exercise_load)
class HypertExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.HYPERTROPHY, allowed_loads=allowed_loads, exercise_load=exercise_load)
| [
"rengine.config.EXERCISE_DF.copy",
"copy.deepcopy",
"statistics.mean",
"numpy.random.choice"
] | [((893, 911), 'rengine.config.EXERCISE_DF.copy', 'EXERCISE_DF.copy', ([], {}), '()\n', (909, 911), False, 'from rengine.config import ExerciseLoad, ExerciseType, EXERCISE_DF\n'), ((1587, 1600), 'copy.deepcopy', 'deepcopy', (['obj'], {}), '(obj)\n', (1595, 1600), False, 'from copy import deepcopy\n'), ((4157, 4216), 'numpy.random.choice', 'np.random.choice', (['allowed_loads'], {'p': 'normalized_probabilities'}), '(allowed_loads, p=normalized_probabilities)\n', (4173, 4216), True, 'import numpy as np\n'), ((2858, 2873), 'statistics.mean', 'mean', (['rest_time'], {}), '(rest_time)\n', (2862, 2873), False, 'from statistics import mean\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[53]:
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Flatten, Reshape
high = 100000000
digits = 12 #len(str(high))
pad = 12
from num2words import num2words
import random
def index_list(pos):
index_list = [0] * (pos)
index_list.append(1)
index_list += [0] * (10-pos-1)
return index_list
def create_data(low, high, num):
x_data = []
y_data = []
for i in range(num):
a = random.randrange(low, high)
b = a
words = num2words(b)
c = str(b).zfill(digits)
x_data.append(words.replace("-", " ").replace(",", "").replace(" and "," "))
num_list = []
for i in range(digits):
num_list.append(index_list(int(c[i])))
y_data.append(num_list)
return x_data, np.array(y_data)
# appends some data to dataset
def append_data(x, y, x_data, y_data):
x_data.append(x.replace("-", " ").replace(",", "").replace(" and "," "))
c = str(y).zfill(digits)
num_list = [[]]
for i in range(digits):
num_list[0].append(index_list(int(c[i])))
num_list = np.array(num_list)
y_data = np.concatenate((y_data, num_list), axis=0)
return x_data, y_data
# In[54]:
x_train, y_train = create_data(0, high, 600000)
x_test, y_test = create_data(0, high, 400000)
# change some "one thousand six hundred" to "sixteen hundred" etc.
for i in range(10):
for j in range(1000, 1500, 100):
x_train, y_train = append_data(num2words(j//100) + " hundred", j, x_train, y_train)
# In[55]:
num_words = 0
for i in x_train:
num_words += len(i.split(" "))
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(x_train)
x_train = tokenizer.texts_to_sequences(x_train)
x_train = np.array([[0]*(pad-len(i)) + i for i in x_train])
x_test = tokenizer.texts_to_sequences(x_test)
x_test = np.array([[0]*(pad-len(i)) + i for i in x_test])
print(tokenizer.sequences_to_texts(x_train)[342])
print(x_train[342])
print(y_train[342])
vocab_size = len(tokenizer.word_index) + 1
# In[56]:
model = tf.keras.models.Sequential()
model.add(Embedding(vocab_size, pad, input_length=pad))
model.add(Flatten())
model.add(Dense(digits*10, activation=tf.nn.softmax))
model.add(Reshape((digits,10)))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.fit(x_train, y_train, epochs=3)
# In[57]:
val_loss, val_acc = model.evaluate(x_test, y_test)
model.save('count2.model')
# In[58]:
new_model = tf.keras.models.load_model('count2.model')
# In[59]:
x = tokenizer.texts_to_sequences(["twelve"
,"thirteen"
,"one hundred twenty three"
,"four hundred seventy two thousand two hundred twenty two"
,"two hundred thirty seven thousand one hundred forty"
,"spongebob"
,"forty two million two hundred thousand one hundred thirteen"])
x = np.array([[0]*(pad-len(i)) + i for i in x])
predictions = new_model.predict(np.array(x))
for prediction in predictions:
num = ""
for i in prediction:
num += str(i.argmax())
print(int(num))
# In[ ]:
# In[ ]:
| [
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"numpy.array",
"tensorflow.keras.models.Sequential",
"random.randrange",
"num2words.num2words",
"tensorflow.keras.layers.Embedding",
"numpy.con... | [((1738, 1796), 'tensorflow.keras.preprocessing.text.Tokenizer', 'tf.keras.preprocessing.text.Tokenizer', ([], {'num_words': 'num_words'}), '(num_words=num_words)\n', (1775, 1796), True, 'import tensorflow as tf\n'), ((2199, 2227), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (2225, 2227), True, 'import tensorflow as tf\n'), ((2646, 2688), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""count2.model"""'], {}), "('count2.model')\n", (2672, 2688), True, 'import tensorflow as tf\n'), ((1219, 1237), 'numpy.array', 'np.array', (['num_list'], {}), '(num_list)\n', (1227, 1237), True, 'import numpy as np\n'), ((1251, 1293), 'numpy.concatenate', 'np.concatenate', (['(y_data, num_list)'], {'axis': '(0)'}), '((y_data, num_list), axis=0)\n', (1265, 1293), True, 'import numpy as np\n'), ((2238, 2282), 'tensorflow.keras.layers.Embedding', 'Embedding', (['vocab_size', 'pad'], {'input_length': 'pad'}), '(vocab_size, pad, input_length=pad)\n', (2247, 2282), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Flatten, Reshape\n'), ((2294, 2303), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2301, 2303), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Flatten, Reshape\n'), ((2315, 2359), 'tensorflow.keras.layers.Dense', 'Dense', (['(digits * 10)'], {'activation': 'tf.nn.softmax'}), '(digits * 10, activation=tf.nn.softmax)\n', (2320, 2359), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Flatten, Reshape\n'), ((2369, 2390), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(digits, 10)'], {}), '((digits, 10))\n', (2376, 2390), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Flatten, Reshape\n'), ((3257, 3268), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3265, 3268), True, 'import numpy as np\n'), ((564, 591), 'random.randrange', 'random.randrange', (['low', 'high'], {}), '(low, high)\n', (580, 591), False, 'import random\n'), ((622, 634), 'num2words.num2words', 'num2words', (['b'], {}), '(b)\n', (631, 634), False, 'from num2words import num2words\n'), ((911, 927), 'numpy.array', 'np.array', (['y_data'], {}), '(y_data)\n', (919, 927), True, 'import numpy as np\n'), ((1592, 1611), 'num2words.num2words', 'num2words', (['(j // 100)'], {}), '(j // 100)\n', (1601, 1611), False, 'from num2words import num2words\n')] |
from sciapp.action import Filter, Simple
from pystackreg import StackReg
import numpy as np
import pandas as pd
from skimage import transform as tf
import scipy.ndimage as ndimg
class Register(Simple):
title = "Stack Register"
note = ["8-bit", "16-bit", "int", "float", "stack"]
para = {
"trans": "RIGID_BODY",
"ref": "previous",
"tab": False,
"new": "Inplace",
"diag": 0,
"sigma": 0,
}
view = [
(
list,
"trans",
["TRANSLATION", "RIGID_BODY", "SCALED_ROTATION", "AFFINE", "BILINEAR"],
str,
"transform",
"",
),
(list, "ref", ["previous", "first", "mean"], str, "reference", ""),
(list, "new", ["Inplace", "New", "None"], str, "image", ""),
(int, "diag", (0, 2048), 0, "diagonal", "scale"),
(float, "sigma", (0, 30), 1, "sigma", "blur"),
(bool, "tab", "show table"),
]
def run(self, ips, imgs, para=None):
k = para["diag"] / np.sqrt((np.array(ips.img.shape) ** 2).sum())
size = tuple((np.array(ips.img.shape) * k).astype(np.int16))
IPy.info("down sample...")
news = []
for img in imgs:
if k != 0:
img = tf.resize(img, size)
if para["sigma"] != 0:
img = ndimg.gaussian_filter(img, para["sigma"])
news.append(img)
IPy.info("register...")
sr = StackReg(eval("StackReg.%s" % para["trans"]))
sr.register_stack(np.array(news), reference=para["ref"])
mats = sr._tmats.reshape((sr._tmats.shape[0], -1))
if k != 0:
mats[:, [0, 1, 3, 4, 6, 7]] *= k
if k != 0:
mats[:, [0, 1, 2, 3, 4, 5]] /= k
if para["tab"]:
IPy.show_table(
pd.DataFrame(
mats, columns=["A%d" % (i + 1) for i in range(mats.shape[1])]
),
title="%s-Tmats" % ips.title,
)
if para["new"] == "None":
return
IPy.info("transform...")
for i in range(sr._tmats.shape[0]):
tform = tf.ProjectiveTransform(matrix=sr._tmats[i])
img = tf.warp(imgs[i], tform)
img -= imgs[i].min()
img *= imgs[i].max() - imgs[i].min()
if para["new"] == "Inplace":
imgs[i][:] = img
if para["new"] == "New":
news[i] = img.astype(ips.img.dtype)
self.progress(i, len(imgs))
if para["new"] == "New":
IPy.show_img(news, "%s-reg" % ips.title)
class Transform(Simple):
title = "Register By Mats"
note = ["all"]
para = {"mat": None, "new": True}
view = [("tab", "mat", "transfrom", "matrix"), (bool, "new", "new image")]
def run(self, ips, imgs, para=None):
mats = TableManager.get(para["mat"]).data.values
if len(imgs) != len(mats):
IPy.alert("image stack must has the same length as transfrom mats!")
return
newimgs = []
img = np.zeros_like(ips.img, dtype=np.float64)
for i in range(len(mats)):
tform = tf.ProjectiveTransform(matrix=mats[i].reshape((3, 3)))
if imgs[i].ndim == 2:
img[:] = tf.warp(imgs[i], tform)
else:
for c in range(img.shape[2]):
img[:, :, c] = tf.warp(imgs[i][:, :, c], tform)
img -= imgs[i].min()
img *= imgs[i].max() - imgs[i].min()
if para["new"]:
newimgs.append(img.astype(ips.img.dtype))
else:
imgs[i] = img
self.progress(i, len(mats))
if para["new"]:
IPy.show_img(newimgs, "%s-trans" % ips.title)
plgs = [Register, Transform]
| [
"numpy.zeros_like",
"scipy.ndimage.gaussian_filter",
"skimage.transform.ProjectiveTransform",
"skimage.transform.resize",
"numpy.array",
"skimage.transform.warp"
] | [((3087, 3127), 'numpy.zeros_like', 'np.zeros_like', (['ips.img'], {'dtype': 'np.float64'}), '(ips.img, dtype=np.float64)\n', (3100, 3127), True, 'import numpy as np\n'), ((1544, 1558), 'numpy.array', 'np.array', (['news'], {}), '(news)\n', (1552, 1558), True, 'import numpy as np\n'), ((2166, 2209), 'skimage.transform.ProjectiveTransform', 'tf.ProjectiveTransform', ([], {'matrix': 'sr._tmats[i]'}), '(matrix=sr._tmats[i])\n', (2188, 2209), True, 'from skimage import transform as tf\n'), ((2228, 2251), 'skimage.transform.warp', 'tf.warp', (['imgs[i]', 'tform'], {}), '(imgs[i], tform)\n', (2235, 2251), True, 'from skimage import transform as tf\n'), ((1277, 1297), 'skimage.transform.resize', 'tf.resize', (['img', 'size'], {}), '(img, size)\n', (1286, 1297), True, 'from skimage import transform as tf\n'), ((1355, 1396), 'scipy.ndimage.gaussian_filter', 'ndimg.gaussian_filter', (['img', "para['sigma']"], {}), "(img, para['sigma'])\n", (1376, 1396), True, 'import scipy.ndimage as ndimg\n'), ((3297, 3320), 'skimage.transform.warp', 'tf.warp', (['imgs[i]', 'tform'], {}), '(imgs[i], tform)\n', (3304, 3320), True, 'from skimage import transform as tf\n'), ((3420, 3452), 'skimage.transform.warp', 'tf.warp', (['imgs[i][:, :, c]', 'tform'], {}), '(imgs[i][:, :, c], tform)\n', (3427, 3452), True, 'from skimage import transform as tf\n'), ((1107, 1130), 'numpy.array', 'np.array', (['ips.img.shape'], {}), '(ips.img.shape)\n', (1115, 1130), True, 'import numpy as np\n'), ((1048, 1071), 'numpy.array', 'np.array', (['ips.img.shape'], {}), '(ips.img.shape)\n', (1056, 1071), True, 'import numpy as np\n')] |
"""Defines `DesignMatrix` and `DesignMatrixCollection`.
These classes are intended to make linear regression problems with a large
design matrix more easy.
"""
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .. import MPLSTYLE
from ..utils import LightkurveWarning, plot_image
__all__ = ['DesignMatrix', 'DesignMatrixCollection']
class DesignMatrix():
"""A matrix of column vectors for use in linear regression.
The purpose of this class is to provide a convenient method to interact
with a set of one or more regressors which are known to correlate with
trends or systematic noise signals which we want to remove from a light
curve. Specifically, this class is designed to provide the design matrix
for use by Lightkurve's `.RegressionCorrector` class.
Parameters
----------
df : dict, array, or `pandas.DataFrame` object
Columns to include in the design matrix. If this object is not a
`~pandas.DataFrame` then it will be passed to the DataFrame constructor.
columns : iterable of str (optional)
Column names, if not already provided via ``df``.
name : str
Name of the matrix.
prior_mu : array
Prior means of the coefficients associated with each column in a linear
regression problem.
prior_sigma : array
Prior standard deviations of the coefficients associated with each
column in a linear regression problem.
"""
def __init__(self, df, columns=None, name='unnamed_matrix', prior_mu=None,
prior_sigma=None):
if not isinstance(df, pd.DataFrame):
df = pd.DataFrame(df)
if columns is not None:
df.columns = columns
self.df = df
self.name = name
if prior_mu is None:
prior_mu = np.zeros(len(df.T))
if prior_sigma is None:
prior_sigma = np.ones(len(df.T)) * np.inf
self.prior_mu = prior_mu
self.prior_sigma = prior_sigma
def plot(self, ax=None, **kwargs):
"""Visualize the design matrix values as an image.
Uses Matplotlib's `~lightkurve.utils.plot_image` to visualize the
matrix values.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
**kwargs : dict
Extra parameters to be passed to `.plot_image`.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
with plt.style.context(MPLSTYLE):
ax = plot_image(self.values, ax=ax, xlabel='Component', ylabel='X',
clabel='Component Value', title=self.name, **kwargs)
ax.set_aspect(self.shape[1]/(1.6*self.shape[0]))
if self.shape[1] <= 40:
ax.set_xticks(np.arange(self.shape[1]))
ax.set_xticklabels([r'${}$'.format(i) for i in self.columns],
rotation=90, fontsize=8)
return ax
def plot_priors(self, ax=None):
"""Visualize the coefficient priors.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
def gauss(x, mu=0, sigma=1):
return np.exp(-(x - mu)**2/(2*sigma**2))
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots()
for m, s in zip(self.prior_mu, self.prior_sigma):
if ~np.isfinite(s):
ax.axhline(1, color='k')
else:
x = np.linspace(m - 5*s, m + 5*s, 1000)
ax.plot(x, gauss(x, m, s), c='k')
ax.set_xlabel('Value')
ax.set_title('{} Priors'.format(self.name))
return ax
def _get_prior_sample(self):
"""Returns a random sample from the prior distribution."""
return np.random.normal(self.prior_mu, self.prior_sigma)
def split(self, row_indices):
"""Returns a new `.DesignMatrix` with regressors split into multiple
columns.
This method will return a new design matrix containing
n_columns * len(row_indices) regressors. This is useful in situations
where the linear regression can be improved by fitting separate
coefficients for different contiguous parts of the regressors.
Parameters
----------
row_indices : iterable of integers
Every regressor (i.e. column) in the design matrix will be split
up over multiple columns separated at the indices provided.
Returns
-------
`.DesignMatrix`
A new design matrix with shape (n_rows, len(row_indices)*n_columns).
"""
if isinstance(row_indices, int):
row_indices = [row_indices]
if (len(row_indices) == 0) or (row_indices == [0]) or (row_indices is None):
return self
# Where do the submatrices begin and end?
lower_idx = np.append(0, row_indices)
upper_idx = np.append(row_indices, len(self.df))
dfs = []
for idx, a, b in zip(range(len(lower_idx)), lower_idx, upper_idx):
new_columns = dict(
('{}'.format(val), '{}'.format(val) + ' {}'.format(idx + 1))
for val in list(self.df.columns))
dfs.append(self.df[a:b].rename(columns=new_columns))
new_df = pd.concat(dfs, axis=1).fillna(0)
prior_mu = np.hstack([self.prior_mu for idx in range(len(dfs))])
prior_sigma = np.hstack([self.prior_sigma for idx in range(len(dfs))])
return DesignMatrix(new_df, name=self.name, prior_mu=prior_mu,
prior_sigma=prior_sigma)
def standardize(self):
"""Returns a new `.DesignMatrix` in which the columns have been
median-subtracted and sigma-divided.
For each column in the matrix, this method will subtract the median of
the column and divide by the column's standard deviation, i.e. it
will compute the column's so-called "standard scores" or "z-values".
This operation is useful because it will make the matrix easier to
visualize and makes fitted coefficients easier to interpret.
Notes:
* Standardizing a spline design matrix will break the splines.
* Columns with constant values (i.e. zero standard deviation) will be
left unchanged.
Returns
-------
`.DesignMatrix`
A new design matrix with median-subtracted & sigma-divided columns.
"""
ar = np.asarray(np.copy(self.df))
ar[ar == 0] = np.nan
# If a column has zero standard deviation, it will not change!
is_const = np.nanstd(ar, axis=0) == 0
median = np.atleast_2d(np.nanmedian(ar, axis=0)[~is_const])
std = np.atleast_2d(np.nanstd(ar, axis=0)[~is_const])
ar[:, ~is_const] = (ar[:, ~is_const] - median) / std
new_df = pd.DataFrame(ar, columns=self.columns).fillna(0)
return DesignMatrix(new_df, name=self.name)
def pca(self, nterms=6):
"""Returns a new `.DesignMatrix` with a smaller number of regressors.
This method will use Principal Components Analysis (PCA) to reduce
the number of columns in the matrix.
Parameters
----------
nterms : int
Number of columns in the new matrix.
Returns
-------
`.DesignMatrix`
A new design matrix with PCA applied.
"""
# nterms cannot be langer than the number of columns in the matrix
if nterms > self.shape[1]:
nterms = self.shape[1]
# We use `fbpca.pca` instead of `np.linalg.svd` because it is faster.
# Note that fbpca is randomized, and has n_iter=2 as default,
# we find this to be too few, and that n_iter=10 is still fast but
# produces more stable results.
from fbpca import pca # local import because not used elsewhere
new_values, _, _ = pca(self.values, nterms, n_iter=10)
return DesignMatrix(new_values, name=self.name)
def append_constant(self, prior_mu=0, prior_sigma=np.inf):
"""Returns a new `.DesignMatrix` with a column of ones appended.
Returns
-------
`.DesignMatrix`
New design matrix with a column of ones appended. This column is
named "offset".
"""
extra_df = pd.DataFrame(np.atleast_2d(np.ones(self.shape[0])).T, columns=['offset'])
new_df = pd.concat([self.df, extra_df], axis=1)
prior_mu = np.append(self.prior_mu, prior_mu)
prior_sigma = np.append(self.prior_sigma, prior_sigma)
return DesignMatrix(new_df, name=self.name,
prior_mu=prior_mu, prior_sigma=prior_sigma)
def _validate(self):
"""Raises a `LightkurveWarning` if the matrix has a low rank."""
# Matrix rank shouldn't be significantly smaller than the # of columns
if self.rank < (0.5*self.shape[1]):
warnings.warn("The design matrix has low rank ({}) compared to the "
"number of columns ({}), which suggests that the "
"matrix contains duplicate or correlated columns. "
"This may prevent the regression from succeeding. "
"Consider reducing the dimensionality by calling the "
"`pca()` method.".format(self.rank, self.shape[1]),
LightkurveWarning)
@property
def rank(self):
"""Matrix rank computed using `numpy.linalg.matrix_rank`."""
return np.linalg.matrix_rank(self.values)
@property
def columns(self):
"""List of column names."""
return list(self.df.columns)
@property
def shape(self):
"""Tuple specifying the shape of the matrix as (n_rows, n_columns)."""
return self.df.shape
@property
def values(self):
"""2D numpy array containing the matrix values."""
return self.df.values
def __getitem__(self, key):
return self.df[key]
def __repr__(self):
return '{} DesignMatrix {}'.format(self.name, self.shape)
class DesignMatrixCollection():
"""A set of design matrices."""
def __init__(self, matrices):
self.matrices = matrices
@property
def values(self):
"""2D numpy array containing the matrix values."""
return np.hstack(tuple(m.values for m in self.matrices))
@property
def prior_mu(self):
"""Coefficient prior means."""
return np.hstack([m.prior_mu for m in self])
@property
def prior_sigma(self):
"""Coefficient prior standard deviations."""
return np.hstack([m.prior_sigma for m in self])
def plot(self, ax=None, **kwargs):
"""Visualize the design matrix values as an image.
Uses Matplotlib's `~lightkurve.utils.plot_image` to visualize the
matrix values.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
**kwargs : dict
Extra parameters to be passed to `.plot_image`.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
temp_dm = DesignMatrix(pd.concat([d.df for d in self], axis=1))
ax = temp_dm.plot(**kwargs)
ax.set_title("Design Matrix Collection")
return ax
def plot_priors(self, ax=None):
"""Visualize the `prior_mu` and `prior_sigma` attributes.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
[dm.plot_priors(ax=ax) for dm in self]
return ax
def _get_prior_sample(self):
"""Returns a random sample from the prior distribution."""
return np.hstack([dm.sample_priors() for dm in self])
def split(self, row_indices):
"""Returns a new `.DesignMatrixCollection` with regressors split into
multiple columns.
This method will return a new design matrix collection by calling
`DesignMatrix.split` on each matrix in the collection.
Parameters
----------
row_indices : iterable of integers
Every regressor (i.e. column) in the design matrix will be split
up over multiple columns separated at the indices provided.
Returns
-------
`.DesignMatrixCollection`
A new design matrix collection.
"""
return DesignMatrixCollection([d.split(row_indices) for d in self])
def standardize(self):
"""Returns a new `.DesignMatrixCollection` in which all the
matrices have been standardized using the `DesignMatrix.standardize`
method.
Returns
-------
`.DesignMatrixCollection`
The new design matrix collection.
"""
return DesignMatrixCollection([d.standardize() for d in self])
@property
def columns(self):
"""List of column names."""
return np.hstack([d.columns for d in self])
def __getitem__(self, key):
try:
return self.matrices[key]
except Exception:
arg = np.argwhere([m.name == key for m in self.matrices])
return self.matrices[arg[0][0]]
def _validate(self):
[d._validate() for d in self]
def __repr__(self):
return 'DesignMatrixCollection:\n' + \
''.join(['\t{}\n'.format(i.__repr__()) for i in self])
| [
"pandas.DataFrame",
"numpy.copy",
"numpy.nanmedian",
"numpy.nanstd",
"matplotlib.pyplot.style.context",
"fbpca.pca",
"numpy.hstack",
"numpy.isfinite",
"numpy.append",
"numpy.ones",
"numpy.linalg.matrix_rank",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"numpy.linspace",
"numpy.... | [((4218, 4267), 'numpy.random.normal', 'np.random.normal', (['self.prior_mu', 'self.prior_sigma'], {}), '(self.prior_mu, self.prior_sigma)\n', (4234, 4267), True, 'import numpy as np\n'), ((5324, 5349), 'numpy.append', 'np.append', (['(0)', 'row_indices'], {}), '(0, row_indices)\n', (5333, 5349), True, 'import numpy as np\n'), ((8370, 8405), 'fbpca.pca', 'pca', (['self.values', 'nterms'], {'n_iter': '(10)'}), '(self.values, nterms, n_iter=10)\n', (8373, 8405), False, 'from fbpca import pca\n'), ((8883, 8921), 'pandas.concat', 'pd.concat', (['[self.df, extra_df]'], {'axis': '(1)'}), '([self.df, extra_df], axis=1)\n', (8892, 8921), True, 'import pandas as pd\n'), ((8941, 8975), 'numpy.append', 'np.append', (['self.prior_mu', 'prior_mu'], {}), '(self.prior_mu, prior_mu)\n', (8950, 8975), True, 'import numpy as np\n'), ((8998, 9038), 'numpy.append', 'np.append', (['self.prior_sigma', 'prior_sigma'], {}), '(self.prior_sigma, prior_sigma)\n', (9007, 9038), True, 'import numpy as np\n'), ((10022, 10056), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.values'], {}), '(self.values)\n', (10043, 10056), True, 'import numpy as np\n'), ((10981, 11018), 'numpy.hstack', 'np.hstack', (['[m.prior_mu for m in self]'], {}), '([m.prior_mu for m in self])\n', (10990, 11018), True, 'import numpy as np\n'), ((11129, 11169), 'numpy.hstack', 'np.hstack', (['[m.prior_sigma for m in self]'], {}), '([m.prior_sigma for m in self])\n', (11138, 11169), True, 'import numpy as np\n'), ((13752, 13788), 'numpy.hstack', 'np.hstack', (['[d.columns for d in self]'], {}), '([d.columns for d in self])\n', (13761, 13788), True, 'import numpy as np\n'), ((1669, 1685), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (1681, 1685), True, 'import pandas as pd\n'), ((2628, 2655), 'matplotlib.pyplot.style.context', 'plt.style.context', (['MPLSTYLE'], {}), '(MPLSTYLE)\n', (2645, 2655), True, 'import matplotlib.pyplot as plt\n'), ((3572, 3613), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sigma ** 2))\n', (3578, 3613), True, 'import numpy as np\n'), ((3619, 3646), 'matplotlib.pyplot.style.context', 'plt.style.context', (['MPLSTYLE'], {}), '(MPLSTYLE)\n', (3636, 3646), True, 'import matplotlib.pyplot as plt\n'), ((6932, 6948), 'numpy.copy', 'np.copy', (['self.df'], {}), '(self.df)\n', (6939, 6948), True, 'import numpy as np\n'), ((7069, 7090), 'numpy.nanstd', 'np.nanstd', (['ar'], {'axis': '(0)'}), '(ar, axis=0)\n', (7078, 7090), True, 'import numpy as np\n'), ((11789, 11828), 'pandas.concat', 'pd.concat', (['[d.df for d in self]'], {'axis': '(1)'}), '([d.df for d in self], axis=1)\n', (11798, 11828), True, 'import pandas as pd\n'), ((3699, 3713), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3711, 3713), True, 'import matplotlib.pyplot as plt\n'), ((5740, 5762), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (5749, 5762), True, 'import pandas as pd\n'), ((7127, 7151), 'numpy.nanmedian', 'np.nanmedian', (['ar'], {'axis': '(0)'}), '(ar, axis=0)\n', (7139, 7151), True, 'import numpy as np\n'), ((7192, 7213), 'numpy.nanstd', 'np.nanstd', (['ar'], {'axis': '(0)'}), '(ar, axis=0)\n', (7201, 7213), True, 'import numpy as np\n'), ((7304, 7342), 'pandas.DataFrame', 'pd.DataFrame', (['ar'], {'columns': 'self.columns'}), '(ar, columns=self.columns)\n', (7316, 7342), True, 'import pandas as pd\n'), ((13917, 13970), 'numpy.argwhere', 'np.argwhere', (['[(m.name == key) for m in self.matrices]'], {}), '([(m.name == key) for m in self.matrices])\n', (13928, 13970), True, 'import numpy as np\n'), ((2945, 2969), 'numpy.arange', 'np.arange', (['self.shape[1]'], {}), '(self.shape[1])\n', (2954, 2969), True, 'import numpy as np\n'), ((3796, 3810), 'numpy.isfinite', 'np.isfinite', (['s'], {}), '(s)\n', (3807, 3810), True, 'import numpy as np\n'), ((3903, 3942), 'numpy.linspace', 'np.linspace', (['(m - 5 * s)', '(m + 5 * s)', '(1000)'], {}), '(m - 5 * s, m + 5 * s, 1000)\n', (3914, 3942), True, 'import numpy as np\n'), ((8819, 8841), 'numpy.ones', 'np.ones', (['self.shape[0]'], {}), '(self.shape[0])\n', (8826, 8841), True, 'import numpy as np\n')] |
import glob
import os
import numpy as np
import pytest
from sklearn.datasets import load_breast_cancer
from fedot.core.composer.cache import OperationsCache
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
@pytest.fixture()
def data_setup():
task = Task(TaskTypesEnum.classification)
predictors, response = load_breast_cancer(return_X_y=True)
np.random.seed(1)
np.random.shuffle(predictors)
np.random.shuffle(response)
response = response[:100]
predictors = predictors[:100]
input_data = InputData(idx=np.arange(0, len(predictors)),
features=predictors,
target=response,
task=task,
data_type=DataTypesEnum.table)
train_data, test_data = train_test_data_setup(data=input_data)
train_data_x = train_data.features
test_data_x = test_data.features
train_data_y = train_data.target
test_data_y = test_data.target
train_data = InputData(features=train_data_x, target=train_data_y,
idx=np.arange(0, len(train_data_y)),
task=task, data_type=DataTypesEnum.table)
test_data = InputData(features=test_data_x, target=test_data_y,
idx=np.arange(0, len(test_data_y)),
task=task, data_type=DataTypesEnum.table)
return train_data, test_data
@pytest.fixture
def cache_cleanup():
OperationsCache().reset()
yield
OperationsCache().reset()
def create_func_delete_files(paths):
"""
Create function to delete cache files after tests.
"""
def wrapper():
for path in paths:
file_list = glob.glob(path)
# Iterate over the list of filepaths & remove each file.
for file_path in file_list:
try:
os.remove(file_path)
except OSError:
pass
return wrapper
@pytest.fixture(scope='session', autouse=True)
def preprocessing_files_before_and_after_tests(request):
paths = ['*.bak', '*.dat', '*.dir']
delete_files = create_func_delete_files(paths)
delete_files()
request.addfinalizer(delete_files)
def pipeline_first():
# XG
# | \
# XG KNN
# | \ | \
# LR LDA LR LDA
pipeline = Pipeline()
root_of_tree, root_child_first, root_child_second = \
[SecondaryNode(model) for model in ('rf', 'rf', 'knn')]
for root_node_child in (root_child_first, root_child_second):
for requirement_model in ('logit', 'lda'):
new_node = PrimaryNode(requirement_model)
root_node_child.nodes_from.append(new_node)
pipeline.add_node(new_node)
pipeline.add_node(root_node_child)
root_of_tree.nodes_from.append(root_node_child)
pipeline.add_node(root_of_tree)
return pipeline
def pipeline_second():
# XG
# | \
# DT KNN
# | \ | \
# KNN KNN LR LDA
pipeline = pipeline_first()
new_node = SecondaryNode('dt')
for model_type in ('knn', 'knn'):
new_node.nodes_from.append(PrimaryNode(model_type))
pipeline.update_subtree(pipeline.root_node.nodes_from[0], new_node)
return pipeline
def pipeline_third():
# QDA
# | \
# RF RF
pipeline = Pipeline()
new_node = SecondaryNode('qda')
for model_type in ('rf', 'rf'):
new_node.nodes_from.append(PrimaryNode(model_type))
pipeline.add_node(new_node)
[pipeline.add_node(node_from) for node_from in new_node.nodes_from]
return pipeline
def pipeline_fourth():
# XG
# | \
# XG KNN
# | \ | \
# QDA KNN LR LDA
# | \ | \
# RF RF KNN KNN
pipeline = pipeline_first()
new_node = SecondaryNode('qda')
for model_type in ('rf', 'rf'):
new_node.nodes_from.append(PrimaryNode(model_type))
pipeline.update_subtree(pipeline.root_node.nodes_from[0].nodes_from[1], new_node)
new_node = SecondaryNode('knn')
for model_type in ('knn', 'knn'):
new_node.nodes_from.append(PrimaryNode(model_type))
pipeline.update_subtree(pipeline.root_node.nodes_from[0].nodes_from[0], new_node)
return pipeline
def pipeline_fifth():
# KNN
# | \
# XG KNN
# | \ | \
# LR LDA KNN KNN
pipeline = pipeline_first()
new_node = SecondaryNode('knn')
pipeline.update_node(pipeline.root_node, new_node)
new_node1 = PrimaryNode('knn')
new_node2 = PrimaryNode('knn')
pipeline.update_node(pipeline.root_node.nodes_from[1].nodes_from[0], new_node1)
pipeline.update_node(pipeline.root_node.nodes_from[1].nodes_from[1], new_node2)
return pipeline
def test_cache_actuality_after_model_change(data_setup, cache_cleanup):
"""The non-affected nodes has actual cache after changing the model"""
cache = OperationsCache()
pipeline = pipeline_first()
train, _ = data_setup
pipeline.fit(input_data=train)
cache.save_pipeline(pipeline)
new_node = SecondaryNode(operation_type='logit')
pipeline.update_node(old_node=pipeline.root_node.nodes_from[0],
new_node=new_node)
root_parent_first = pipeline.root_node.nodes_from[0]
nodes_with_non_actual_cache = [pipeline.root_node, root_parent_first]
nodes_with_actual_cache = [node for node in pipeline.nodes if node not in nodes_with_non_actual_cache]
# non-affected nodes are actual
cache.try_load_nodes(nodes_with_actual_cache)
assert all(node.fitted_operation is not None for node in nodes_with_actual_cache)
# affected nodes and their childs has no any actual cache
cache.try_load_nodes(nodes_with_non_actual_cache)
assert all(node.fitted_operation is None for node in nodes_with_non_actual_cache)
def test_cache_actuality_after_subtree_change_to_identical(data_setup, cache_cleanup):
"""The non-affected nodes has actual cache after changing the subtree to other pre-fitted subtree"""
cache = OperationsCache()
train, _ = data_setup
pipeline = pipeline_first()
other_pipeline = pipeline_second()
pipeline.fit(input_data=train)
cache.save_pipeline(pipeline)
other_pipeline.fit(input_data=train)
cache.save_pipeline(Pipeline(other_pipeline.root_node.nodes_from[0]))
pipeline.update_subtree(pipeline.root_node.nodes_from[0],
other_pipeline.root_node.nodes_from[0])
nodes_with_actual_cache = [node for node in pipeline.nodes if node not in [pipeline.root_node]]
# non-affected nodes of initial pipeline and fitted nodes of new subtree are actual
cache.try_load_nodes(nodes_with_actual_cache)
assert all(node.fitted_operation is not None for node in nodes_with_actual_cache)
# affected root node has no any actual cache
cache.try_load_nodes(pipeline.root_node)
assert pipeline.root_node.fitted_operation is None
def test_cache_actuality_after_primary_node_changed_to_subtree(data_setup, cache_cleanup):
""" The non-affected nodes has actual cache after changing the primary node to pre-fitted subtree"""
cache = OperationsCache()
train, _ = data_setup
pipeline = pipeline_first()
other_pipeline = pipeline_second()
pipeline.fit(input_data=train)
cache.save_pipeline(pipeline)
other_pipeline.fit(input_data=train)
pipeline.update_subtree(pipeline.root_node.nodes_from[0].nodes_from[0],
other_pipeline.root_node.nodes_from[0])
cache.save_pipeline(Pipeline(other_pipeline.root_node.nodes_from[0]))
root_parent_first = pipeline.root_node.nodes_from[0]
nodes_with_non_actual_cache = [pipeline.root_node, root_parent_first]
nodes_with_actual_cache = [node for node in pipeline.nodes if node not in nodes_with_non_actual_cache]
# non-affected nodes of initial pipeline and fitted nodes of new subtree are actual
cache.try_load_nodes(nodes_with_actual_cache)
assert all(node.fitted_operation is not None for node in nodes_with_actual_cache)
# affected root nodes and their childs has no any actual cache
cache.try_load_nodes(nodes_with_non_actual_cache)
assert all(node.fitted_operation is None for node in nodes_with_non_actual_cache)
def test_cache_historical_state_using_with_cv(data_setup, cache_cleanup):
cv_fold = 1
cache = OperationsCache()
train, _ = data_setup
pipeline = pipeline_first()
# pipeline fitted, model goes to cache
pipeline.fit(input_data=train)
cache.save_pipeline(pipeline, fold_id=cv_fold)
new_node = SecondaryNode(operation_type='logit')
old_node = pipeline.root_node.nodes_from[0]
# change child node to new one
pipeline.update_node(old_node=old_node,
new_node=new_node)
# cache is not actual
cache.try_load_nodes(pipeline.root_node)
assert pipeline.root_node.fitted_operation is None
# fit modified pipeline
pipeline.fit(input_data=train)
cache.save_pipeline(pipeline, fold_id=cv_fold)
# cache is actual now
cache.try_load_nodes(pipeline.root_node, fold_id=cv_fold)
assert pipeline.root_node.fitted_operation is not None
# change node back
pipeline.update_node(old_node=pipeline.root_node.nodes_from[0],
new_node=old_node)
# cache is actual without new fitting,
# because the cached model was saved after first fit
cache.try_load_nodes(pipeline.root_node, fold_id=cv_fold)
assert pipeline.root_node.fitted_operation is not None
def test_multi_pipeline_caching_with_cache(data_setup, cache_cleanup):
train, _ = data_setup
cache = OperationsCache()
main_pipeline = pipeline_second()
other_pipeline = pipeline_first()
# fit other_pipeline that contains the parts identical to main_pipeline
other_pipeline.fit(input_data=train)
cache.save_pipeline(other_pipeline)
nodes_with_non_actual_cache = [main_pipeline.root_node, main_pipeline.root_node.nodes_from[0]] + \
[_ for _ in main_pipeline.root_node.nodes_from[0].nodes_from]
nodes_with_actual_cache = [node for node in main_pipeline.nodes if node not in nodes_with_non_actual_cache]
# check that using of other_pipeline make identical of the main_pipeline fitted,
# despite the main_pipeline.fit() was not called
cache.try_load_nodes(nodes_with_actual_cache)
assert all(node.fitted_operation is not None for node in nodes_with_actual_cache)
# the non-identical parts are still not fitted
cache.try_load_nodes(nodes_with_non_actual_cache)
assert all(node.fitted_operation is None for node in nodes_with_non_actual_cache)
# check the same case with another pipelines
cache.reset()
main_pipeline = pipeline_fourth()
prev_pipeline_first = pipeline_third()
prev_pipeline_second = pipeline_fifth()
prev_pipeline_first.fit(input_data=train)
cache.save_pipeline(prev_pipeline_first)
prev_pipeline_second.fit(input_data=train)
cache.save_pipeline(prev_pipeline_second)
nodes_with_non_actual_cache = [main_pipeline.root_node, main_pipeline.root_node.nodes_from[1]]
nodes_with_actual_cache = [child for child in main_pipeline.root_node.nodes_from[0].nodes_from]
cache.try_load_nodes(nodes_with_non_actual_cache)
assert all(node.fitted_operation is None for node in nodes_with_non_actual_cache)
cache.try_load_nodes(nodes_with_actual_cache)
assert all(node.fitted_operation is not None for node in nodes_with_actual_cache)
# TODO Add changed data case for cache
| [
"fedot.core.data.data_split.train_test_data_setup",
"os.remove",
"numpy.random.seed",
"fedot.core.repository.tasks.Task",
"fedot.core.pipelines.pipeline.Pipeline",
"fedot.core.composer.cache.OperationsCache",
"pytest.fixture",
"sklearn.datasets.load_breast_cancer",
"glob.glob",
"fedot.core.pipelin... | [((504, 520), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (518, 520), False, 'import pytest\n'), ((2259, 2304), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (2273, 2304), False, 'import pytest\n'), ((550, 584), 'fedot.core.repository.tasks.Task', 'Task', (['TaskTypesEnum.classification'], {}), '(TaskTypesEnum.classification)\n', (554, 584), False, 'from fedot.core.repository.tasks import Task, TaskTypesEnum\n'), ((612, 647), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (630, 647), False, 'from sklearn.datasets import load_breast_cancer\n'), ((652, 669), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (666, 669), True, 'import numpy as np\n'), ((674, 703), 'numpy.random.shuffle', 'np.random.shuffle', (['predictors'], {}), '(predictors)\n', (691, 703), True, 'import numpy as np\n'), ((708, 735), 'numpy.random.shuffle', 'np.random.shuffle', (['response'], {}), '(response)\n', (725, 735), True, 'import numpy as np\n'), ((1079, 1117), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'input_data'}), '(data=input_data)\n', (1100, 1117), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((2636, 2646), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (2644, 2646), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((3352, 3371), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""dt"""'], {}), "('dt')\n", (3365, 3371), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((3645, 3655), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (3653, 3655), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((3671, 3691), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""qda"""'], {}), "('qda')\n", (3684, 3691), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4151, 4171), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""qda"""'], {}), "('qda')\n", (4164, 4171), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4369, 4389), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""knn"""'], {}), "('knn')\n", (4382, 4389), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4752, 4772), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""knn"""'], {}), "('knn')\n", (4765, 4772), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4844, 4862), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['"""knn"""'], {}), "('knn')\n", (4855, 4862), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4879, 4897), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['"""knn"""'], {}), "('knn')\n", (4890, 4897), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((5249, 5266), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (5264, 5266), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((5410, 5447), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', ([], {'operation_type': '"""logit"""'}), "(operation_type='logit')\n", (5423, 5447), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((6381, 6398), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (6396, 6398), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((7496, 7513), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (7511, 7513), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((8714, 8731), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (8729, 8731), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((8935, 8972), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', ([], {'operation_type': '"""logit"""'}), "(operation_type='logit')\n", (8948, 8972), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((10000, 10017), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (10015, 10017), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((2715, 2735), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['model'], {}), '(model)\n', (2728, 2735), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((6630, 6678), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['other_pipeline.root_node.nodes_from[0]'], {}), '(other_pipeline.root_node.nodes_from[0])\n', (6638, 6678), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((7889, 7937), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['other_pipeline.root_node.nodes_from[0]'], {}), '(other_pipeline.root_node.nodes_from[0])\n', (7897, 7937), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((1745, 1762), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (1760, 1762), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((1785, 1802), 'fedot.core.composer.cache.OperationsCache', 'OperationsCache', ([], {}), '()\n', (1800, 1802), False, 'from fedot.core.composer.cache import OperationsCache\n'), ((1992, 2007), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (2001, 2007), False, 'import glob\n'), ((2911, 2941), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['requirement_model'], {}), '(requirement_model)\n', (2922, 2941), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((3445, 3468), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['model_type'], {}), '(model_type)\n', (3456, 3468), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((3763, 3786), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['model_type'], {}), '(model_type)\n', (3774, 3786), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4243, 4266), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['model_type'], {}), '(model_type)\n', (4254, 4266), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((4463, 4486), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['model_type'], {}), '(model_type)\n', (4474, 4486), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((2158, 2178), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (2167, 2178), False, 'import os\n')] |
import os, sys, shutil, copy #, argparse, re
import cv2
import numpy as np
OPTFLOW_METHOD = "Brox" # Farneback, Brox, TODO: Horn-Schunck
USE_RENDER = True
CUDA_ID = "3"
if USE_RENDER:
if __name__=="__main__":
if CUDA_ID is None:
from tools.GPUmonitor import getAvailableGPU
gpu_available = getAvailableGPU() #active_mem_threshold=0.05
if gpu_available:
CUDA_ID = str(gpu_available[0])
if CUDA_ID is None:
print('No GPU available.')
sys.exit(4)
os.environ["CUDA_VISIBLE_DEVICES"]=CUDA_ID
import tensorflow as tf
if __name__=="__main__":
tf.enable_eager_execution()
from phitest.render import *
import json
# image format: RGBA, FP16, [0,1]
if OPTFLOW_METHOD=="Brox":
sys.path.append("path/to/pyflow")
import pyflow
def img_to_UINT8(img):
if img.dtype==np.float64 or img.dtype==np.float32:
return (img*255.).astype(np.uint8)
elif img.dtype==np.uint8:
return img
else:
raise TypeError("Unknown image type %s"%img.dtype)
def img_to_FP32(img):
if img.dtype==np.float64:
return img.astype(np.float32)
elif img.dtype==np.float32:
return img
elif img.dtype==np.uint8:
return img.astype(np.float32) / 255.
else:
raise TypeError("Unknown image type %s"%img.dtype)
def img_to_FP64(img):
if img.dtype==np.float64:
return img
elif img.dtype==np.float32:
return img.astype(np.float64)
elif img.dtype==np.uint8:
return img.astype(np.float64) / 255.
else:
raise TypeError("Unknown image type %s"%img.dtype)
scalarFlow_path_mask = 'data/ScalarFlow/sim_{sim:06d}/input/cam/imgsUnproc_{frame:06d}.npz'
def load_scalarFlow_images(sim, frame, cams=[0,1,2,3,4]):
path = scalarFlow_path_mask.format(sim=sim, frame=frame)
with np.load(path) as np_data:
images = np_data["data"]
for image in images:
print("loaded image stats:", np.amin(image), np.mean(image), np.amax(image), image.dtype)
images = [np.array(np.flip(normalize_image_shape(img_to_FP32(images[_]), "GRAY"), axis=0)) for _ in cams]
return images
def normalize_image_shape(image, out_format="RGB"):
image_rank = len(image.shape)
assert image_rank<4
if image_rank==2:
image = image[..., np.newaxis]
image_channels = image.shape[-1]
if image_channels==1 and out_format=="RGB":
image = np.repeat(image, 3, axis=-1)
elif image_channels>1 and out_format=="GRAY":
image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)
return image
def get_max_mip_level(image):
assert len(image.shape)==3
min_res = min(image.shape[:-1])
return np.log2(min_res).astype(np.int32)
def write_images(path_mask, images):
for i, image in enumerate(images):
path = path_mask.format(frame=i)
print("write image with shape", image.shape, "to", path)
image = img_to_UINT8(image)
if image.shape[-1]==3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if image.shape[-1]==4:
image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)
cv2.imwrite(path, image)
def flow_to_image(flow, mode="MAG_ANG"):
flow = flow.astype(np.float32)
print("flow stats: ", np.amin(flow), np.mean(flow), np.amax(flow))
if mode=="ABS_NORM":
flow = np.abs(flow)
flow /= np.amax(flow)
flow = np.pad(flow, ((0,0),(0,0),(0,1)))
if mode=="MAG_ANG":
hsv = np.ones(list(flow.shape[:-1])+[3], dtype=np.uint8)*255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * (180 / (2*np.pi))
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
flow = img_to_FP32(cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB))
print("flow image stats: ", np.amin(flow), np.mean(flow), np.amax(flow))
return flow
alpha = 0.045 # 0.012
window = 30 # 20
def get_dense_optical_flow(image1, image2):
"""compute a vector field indicating how each position in image1 flows into image2?
"""
if image1.shape[-1]>1:
image1 = cv2.cvtColor(image1, cv2.COLOR_RGBA2GRAY)[...,np.newaxis]
if image2.shape[-1]>1:
image2 = cv2.cvtColor(image2, cv2.COLOR_RGBA2GRAY)[...,np.newaxis]
if OPTFLOW_METHOD=="Farneback":
flow = cv2.calcOpticalFlowFarneback(img_to_UINT8(image1), img_to_UINT8(image2), None, \
pyr_scale=0.5, levels=15, #min(get_max_mip_level(image1), get_max_mip_level(image2)), \
winsize=11, iterations=5, poly_n=5, poly_sigma=1.1, flags=0)
elif OPTFLOW_METHOD=="Brox":
u, v, _ = pyflow.coarse2fine_flow(
img_to_FP64(image1), img_to_FP64(image2), alpha=alpha, ratio=0.875, minWidth=window, nOuterFPIterations=12, nInnerFPIterations=1,
nSORIterations=40, colType=1) #alpha 0.012
flow = np.concatenate((u[...,np.newaxis], v[...,np.newaxis]), axis=-1).astype(np.float32)
#print("flow_shape", flow.shape, "dtype", flow.dtype)
return flow
def warp_image(image, flow):
"""
flow: inverse relative lookup position for every pixel in the output
"""
flow = -flow
flow[...,0] += np.arange(flow.shape[1])
flow[...,1] += np.arange(flow.shape[0])[:, np.newaxis]
return cv2.remap(image, flow, None, cv2.INTER_LINEAR)
def lerp_image(image1, image2, t, optical_flow=None):
if optical_flow is None:
optical_flow = get_dense_optical_flow(image1, image2)
i1_warp = warp_image(image1, optical_flow*(t))
i2_warp = warp_image(image2, optical_flow*(-(1.-t)))
return i1_warp*(1.-t) + i2_warp*t
def lerp_image_2(image1, image2, t, optical_flow1=None, optical_flow2=None):
if optical_flow1 is None:
optical_flow1 = get_dense_optical_flow(image1, image2)
if optical_flow2 is None:
optical_flow2 = get_dense_optical_flow(image2, image1)
i1_warp = warp_image(image1, optical_flow1*(t))
i2_warp = warp_image(image2, optical_flow2*(1.-t))
return i1_warp*(1.-t) + i2_warp*t
def lerp(a, b, t):
return (1-t)*a + t*b
def lerp_vector(v1, v2, t):
return lerp(np.asarray(v1), np.asarray(v2), t)
def slerp_vector(v1, v2, t, normalized=True):
"""https://en.wikipedia.org/wiki/Slerp
"""
l1 = np.linalg.norm(v1)
l2 = np.linalg.norm(v2)
v1 = np.asarray(v1)/l1
v2 = np.asarray(v2)/l2
angle = np.dot(v1, v2)
if np.abs(angle)==1:
raise ValueError("Can't interpolate {} and {}".format(v1, v2))
angle = np.arccos(angle)
direction = (v1 * np.sin((1-t)*angle) + v2 * np.sin(t*angle))/np.sin(angle)
if not normalized:
direction *= lerp(l1, l2, t)
return direction
def interpolate_camera_calibration(cal1, cal2, t, focus_slerp=None):
calib = {}
calib["forward"] = slerp_vector(cal1["forward"], cal2["forward"], t, normalized=True)
calib["up"] = slerp_vector(cal1["up"], cal2["up"], t, normalized=True)
calib["right"] = slerp_vector(cal1["right"], cal2["right"], t, normalized=True)
if focus_slerp is not None:
p1 = np.subtract(cal1["position"], focus_slerp)
p2 = np.subtract(cal2["position"], focus_slerp)
calib["position"] = np.add(slerp_vector(p1, p2, t, normalized=False), focus_slerp)
else:
calib["position"] = lerp_vector(cal1["position"], cal2["position"], t)
calib["fov_horizontal"] = lerp(cal1["fov_horizontal"], cal2["fov_horizontal"], t)
return calib
if USE_RENDER:
flip_z = lambda v: np.asarray(v)*np.asarray([1,1,-1])
invert_v = lambda v: np.asarray(v)*(-1)
def lerp_transform(T1, T2, t):
assert isinstance(T1, Transform)
assert isinstance(T2, Transform)
def make_camera(calib, focus, focus_depth_clip=1.0):
def pos():
return flip_z(calib["position"])
def fwd():
return invert_v(flip_z(calib["forward"]))
def up():
return flip_z(calib["up"])
def right():
return flip_z(calib["right"])
#fwd, up, right, pos
cam_focus = flip_z(focus)
train_cam_resolution = (256, 1920//4, 1080//4)
aspect = train_cam_resolution[2]/train_cam_resolution[1]
cam_dh = focus_depth_clip*0.5 #depth half
position = pos()
dist = np.linalg.norm(cam_focus-position)
cam = Camera(MatrixTransform.from_fwd_up_right_pos(fwd(), up(), right(), position), nearFar=[dist-cam_dh,dist+cam_dh], fov=calib["fov_horizontal"], aspect=aspect, static=None)
cam.transform.grid_size = copy.copy(train_cam_resolution)
return cam
if __name__=="__main__":
out_path = "./view_interpolation_tests/view_interpolation_synth_1-3_B_%.2e_%d_2-way-warp"%(alpha, window)
#if os.path.exists(out_path):
# shutil.rmtree(out_path)
os.makedirs(out_path, exist_ok=True)
print("output path:", out_path)
if True: #USE_RENDER:
print("interpolation test with rendered images")
camId_1 = 1
camId_2 = 3
n_subdivisions = 5
#slerp_position = True
print("load camera calibration for 2 cameras")
with open("scalaFlow_cameras.json", "r") as file:
calib = json.load(file)
cal1 = calib[str(camId_1)]
cal2 = calib[str(camId_2)]
if cal1["fov_horizontal"] is None: cal1["fov_horizontal"] =calib["fov_horizontal_average"]
if cal2["fov_horizontal"] is None: cal2["fov_horizontal"] =calib["fov_horizontal_average"]
focus = calib["focus"]
print("interpolate cameras")
# for cal in calibrations:
# print(cal)
# exit(0)
print("load density")
sf_dens_transform = GridTransform([100,178,100], translation=flip_z(calib["volume_offset"] + np.asarray([0,0,calib["marker_width"]])), scale=[calib["marker_width"]]*3, normalize='MIN')
with np.load("data/ScalarFlow/sim_000000/reconstruction/density_000140.npz") as np_data:
density = np_data["data"][np.newaxis,::-1,...]
print(density.shape)
density = tf.constant(density, dtype=tf.float32)
sf_dens_transform.set_data(density)
for slerp_position in [False, True]:
calibrations = [cal1] + [interpolate_camera_calibration(cal1, cal2, (_+1)/(n_subdivisions+1), focus if slerp_position else None) for _ in range( n_subdivisions)] + [cal2]
print("prepare rendering")
cameras = [make_camera(_, focus) for _ in calibrations]
renderer = Renderer(None,
filter_mode="LINEAR",
mipmapping="LINEAR",
num_mips=3,
blend_mode="BEER_LAMBERT",
)
print("render interpolated cameras")
images = [renderer.render_density(sf_dens_transform, [Light(intensity=1.0)], [cam], cut_alpha=True)[0].numpy()[0] for cam in cameras]
print(images[0].shape)
write_images(os.path.join(out_path, "render-sub-" + ("slerp" if slerp_position else "lerp") + "_image_{frame:04d}.png"), images)
#optical flow interpolation for comparison
print("optical flow")
flow = get_dense_optical_flow(images[0], images[-1])
write_images(os.path.join(out_path, "warp-sub_flow_{frame:04d}.png"), [flow_to_image(flow), flow_to_image(-flow)])
print("backwards optical flow")
flow_b = get_dense_optical_flow(images[-1], images[0])
write_images(os.path.join(out_path, "warp-sub_back-flow_{frame:04d}.png"), [flow_to_image(flow_b), flow_to_image(-flow_b)])
print("warp")
i_warp_sub = [images[0]]
i_warp_sub.extend(lerp_image(images[0], images[-1], (s+1)/(n_subdivisions+1), optical_flow=flow) for s in range(n_subdivisions))
i_warp_sub.append(images[-1])
print("write images")
write_images(os.path.join(out_path, "warp-sub_image_{frame:04d}.png"), i_warp_sub)
print("warp 2-way")
i_warp_sub = [images[0]]
i_warp_sub.extend(lerp_image_2(images[0], images[-1], (s+1)/(n_subdivisions+1), optical_flow1=flow, optical_flow2=flow_b) for s in range(n_subdivisions))
i_warp_sub.append(images[-1])
print("write images")
write_images(os.path.join(out_path, "warp-2-sub_image_{frame:04d}.png"), i_warp_sub)
else:
step_2 = True
step_4 = True
step_1_subdivsion = True
n_subdivisions = 5
print("load images")
images = load_scalarFlow_images(0,130, cams=[2,1,0,4,3]) #
#images = [(_*255.) for _ in images] #.astype(np.uint8)
print("write original images")
write_images(os.path.join(out_path, "input_image_{frame:04d}.png"), images)
if step_2:
print("step 2 optical flow (%s)"%OPTFLOW_METHOD)
flow1_1 = get_dense_optical_flow(images[0], images[2])
flow2_1 = get_dense_optical_flow(images[2], images[4])
write_images(os.path.join(out_path, "warp1_flow_{frame:04d}.png"), [flow_to_image(flow1_1), flow_to_image(flow2_1)])
print("step 2 warp")
i_warp_1 = [
images[0],
lerp_image(images[0], images[2], 0.5, optical_flow=flow1_1),
images[2],
lerp_image(images[2], images[4], 0.5, optical_flow=flow2_1),
images[4],
]
write_images(os.path.join(out_path, "warp1_image_{frame:04d}.png"), i_warp_1)
if step_4:
print("step 4 optical flow (%s)"%OPTFLOW_METHOD)
flow_2 = get_dense_optical_flow(images[0], images[4])
write_images(os.path.join(out_path, "warp2_flow_{frame:04d}.png"), [flow_to_image(flow_2)])
print("step 4 warp")
i_warp_2 = [
images[0],
lerp_image(images[0], images[4], 0.25, optical_flow=flow_2),
lerp_image(images[0], images[4], 0.5 , optical_flow=flow_2),
lerp_image(images[0], images[4], 0.75, optical_flow=flow_2),
images[4],
]
write_images(os.path.join(out_path, "warp2_image_{frame:04d}.png"), i_warp_2)
if step_1_subdivsion:
print("step 1 - subdivision optical flow (%s)"%OPTFLOW_METHOD)
flows = [get_dense_optical_flow(images[i], images[i+1]) for i in range(0, len(images)-1)]
write_images(os.path.join(out_path, "warp1sub_flow_{frame:04d}.png"), [flow_to_image(_) for _ in flows])
print("step 1 - subdivision warp")
i_warp_sub = []
for i in range(0, len(images)-1):
i_warp_sub.append(images[i])
i_warp_sub.extend(lerp_image(images[i], images[i+1], (s+1)/(n_subdivisions+1), optical_flow=flows[i]) for s in range(n_subdivisions))
i_warp_sub.append(images[-1])
write_images(os.path.join(out_path, "warp1sub_image_{frame:04d}.png"), i_warp_sub)
print("Done.") | [
"numpy.load",
"numpy.abs",
"numpy.amin",
"cv2.remap",
"numpy.mean",
"numpy.linalg.norm",
"numpy.arange",
"numpy.sin",
"cv2.normalize",
"os.path.join",
"sys.path.append",
"numpy.pad",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.arccos",
"numpy.repeat",
"numpy.log2",
"numpy.asarray",
"te... | [((739, 772), 'sys.path.append', 'sys.path.append', (['"""path/to/pyflow"""'], {}), "('path/to/pyflow')\n", (754, 772), False, 'import os, sys, shutil, copy\n'), ((4869, 4893), 'numpy.arange', 'np.arange', (['flow.shape[1]'], {}), '(flow.shape[1])\n', (4878, 4893), True, 'import numpy as np\n'), ((4960, 5006), 'cv2.remap', 'cv2.remap', (['image', 'flow', 'None', 'cv2.INTER_LINEAR'], {}), '(image, flow, None, cv2.INTER_LINEAR)\n', (4969, 5006), False, 'import cv2\n'), ((5919, 5937), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (5933, 5937), True, 'import numpy as np\n'), ((5945, 5963), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (5959, 5963), True, 'import numpy as np\n'), ((6024, 6038), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (6030, 6038), True, 'import numpy as np\n'), ((6138, 6154), 'numpy.arccos', 'np.arccos', (['angle'], {}), '(angle)\n', (6147, 6154), True, 'import numpy as np\n'), ((8258, 8294), 'os.makedirs', 'os.makedirs', (['out_path'], {'exist_ok': '(True)'}), '(out_path, exist_ok=True)\n', (8269, 8294), False, 'import os, sys, shutil, copy\n'), ((595, 622), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (620, 622), True, 'import tensorflow as tf\n'), ((1750, 1763), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1757, 1763), True, 'import numpy as np\n'), ((2305, 2333), 'numpy.repeat', 'np.repeat', (['image', '(3)'], {'axis': '(-1)'}), '(image, 3, axis=-1)\n', (2314, 2333), True, 'import numpy as np\n'), ((2955, 2979), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (2966, 2979), False, 'import cv2\n'), ((3081, 3094), 'numpy.amin', 'np.amin', (['flow'], {}), '(flow)\n', (3088, 3094), True, 'import numpy as np\n'), ((3096, 3109), 'numpy.mean', 'np.mean', (['flow'], {}), '(flow)\n', (3103, 3109), True, 'import numpy as np\n'), ((3111, 3124), 'numpy.amax', 'np.amax', (['flow'], {}), '(flow)\n', (3118, 3124), True, 'import numpy as np\n'), ((3162, 3174), 'numpy.abs', 'np.abs', (['flow'], {}), '(flow)\n', (3168, 3174), True, 'import numpy as np\n'), ((3186, 3199), 'numpy.amax', 'np.amax', (['flow'], {}), '(flow)\n', (3193, 3199), True, 'import numpy as np\n'), ((3210, 3248), 'numpy.pad', 'np.pad', (['flow', '((0, 0), (0, 0), (0, 1))'], {}), '(flow, ((0, 0), (0, 0), (0, 1)))\n', (3216, 3248), True, 'import numpy as np\n'), ((3344, 3387), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (3359, 3387), False, 'import cv2\n'), ((3446, 3495), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (3459, 3495), False, 'import cv2\n'), ((3589, 3602), 'numpy.amin', 'np.amin', (['flow'], {}), '(flow)\n', (3596, 3602), True, 'import numpy as np\n'), ((3604, 3617), 'numpy.mean', 'np.mean', (['flow'], {}), '(flow)\n', (3611, 3617), True, 'import numpy as np\n'), ((3619, 3632), 'numpy.amax', 'np.amax', (['flow'], {}), '(flow)\n', (3626, 3632), True, 'import numpy as np\n'), ((4911, 4935), 'numpy.arange', 'np.arange', (['flow.shape[0]'], {}), '(flow.shape[0])\n', (4920, 4935), True, 'import numpy as np\n'), ((5781, 5795), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (5791, 5795), True, 'import numpy as np\n'), ((5797, 5811), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (5807, 5811), True, 'import numpy as np\n'), ((5971, 5985), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (5981, 5985), True, 'import numpy as np\n'), ((5996, 6010), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (6006, 6010), True, 'import numpy as np\n'), ((6044, 6057), 'numpy.abs', 'np.abs', (['angle'], {}), '(angle)\n', (6050, 6057), True, 'import numpy as np\n'), ((6220, 6233), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (6226, 6233), True, 'import numpy as np\n'), ((6674, 6716), 'numpy.subtract', 'np.subtract', (["cal1['position']", 'focus_slerp'], {}), "(cal1['position'], focus_slerp)\n", (6685, 6716), True, 'import numpy as np\n'), ((6725, 6767), 'numpy.subtract', 'np.subtract', (["cal2['position']", 'focus_slerp'], {}), "(cal2['position'], focus_slerp)\n", (6736, 6767), True, 'import numpy as np\n'), ((7767, 7803), 'numpy.linalg.norm', 'np.linalg.norm', (['(cam_focus - position)'], {}), '(cam_focus - position)\n', (7781, 7803), True, 'import numpy as np\n'), ((8010, 8041), 'copy.copy', 'copy.copy', (['train_cam_resolution'], {}), '(train_cam_resolution)\n', (8019, 8041), False, 'import os, sys, shutil, copy\n'), ((9377, 9415), 'tensorflow.constant', 'tf.constant', (['density'], {'dtype': 'tf.float32'}), '(density, dtype=tf.float32)\n', (9388, 9415), True, 'import tensorflow as tf\n'), ((313, 330), 'tools.GPUmonitor.getAvailableGPU', 'getAvailableGPU', ([], {}), '()\n', (328, 330), False, 'from tools.GPUmonitor import getAvailableGPU\n'), ((1859, 1873), 'numpy.amin', 'np.amin', (['image'], {}), '(image)\n', (1866, 1873), True, 'import numpy as np\n'), ((1875, 1889), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (1882, 1889), True, 'import numpy as np\n'), ((1891, 1905), 'numpy.amax', 'np.amax', (['image'], {}), '(image)\n', (1898, 1905), True, 'import numpy as np\n'), ((2393, 2433), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGBA2GRAY'], {}), '(image, cv2.COLOR_RGBA2GRAY)\n', (2405, 2433), False, 'import cv2\n'), ((2558, 2574), 'numpy.log2', 'np.log2', (['min_res'], {}), '(min_res)\n', (2565, 2574), True, 'import numpy as np\n'), ((2834, 2872), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (2846, 2872), False, 'import cv2\n'), ((2911, 2951), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGBA2BGRA'], {}), '(image, cv2.COLOR_RGBA2BGRA)\n', (2923, 2951), False, 'import cv2\n'), ((3518, 3554), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (3530, 3554), False, 'import cv2\n'), ((3865, 3906), 'cv2.cvtColor', 'cv2.cvtColor', (['image1', 'cv2.COLOR_RGBA2GRAY'], {}), '(image1, cv2.COLOR_RGBA2GRAY)\n', (3877, 3906), False, 'import cv2\n'), ((3960, 4001), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_RGBA2GRAY'], {}), '(image2, cv2.COLOR_RGBA2GRAY)\n', (3972, 4001), False, 'import cv2\n'), ((7077, 7090), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (7087, 7090), True, 'import numpy as np\n'), ((7091, 7113), 'numpy.asarray', 'np.asarray', (['[1, 1, -1]'], {}), '([1, 1, -1])\n', (7101, 7113), True, 'import numpy as np\n'), ((7135, 7148), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (7145, 7148), True, 'import numpy as np\n'), ((8601, 8616), 'json.load', 'json.load', (['file'], {}), '(file)\n', (8610, 8616), False, 'import json\n'), ((9205, 9276), 'numpy.load', 'np.load', (['"""data/ScalarFlow/sim_000000/reconstruction/density_000140.npz"""'], {}), "('data/ScalarFlow/sim_000000/reconstruction/density_000140.npz')\n", (9212, 9276), True, 'import numpy as np\n'), ((10394, 10449), 'os.path.join', 'os.path.join', (['out_path', '"""warp-sub_flow_{frame:04d}.png"""'], {}), "(out_path, 'warp-sub_flow_{frame:04d}.png')\n", (10406, 10449), False, 'import os, sys, shutil, copy\n'), ((10605, 10665), 'os.path.join', 'os.path.join', (['out_path', '"""warp-sub_back-flow_{frame:04d}.png"""'], {}), "(out_path, 'warp-sub_back-flow_{frame:04d}.png')\n", (10617, 10665), False, 'import os, sys, shutil, copy\n'), ((10971, 11027), 'os.path.join', 'os.path.join', (['out_path', '"""warp-sub_image_{frame:04d}.png"""'], {}), "(out_path, 'warp-sub_image_{frame:04d}.png')\n", (10983, 11027), False, 'import os, sys, shutil, copy\n'), ((11327, 11385), 'os.path.join', 'os.path.join', (['out_path', '"""warp-2-sub_image_{frame:04d}.png"""'], {}), "(out_path, 'warp-2-sub_image_{frame:04d}.png')\n", (11339, 11385), False, 'import os, sys, shutil, copy\n'), ((11694, 11747), 'os.path.join', 'os.path.join', (['out_path', '"""input_image_{frame:04d}.png"""'], {}), "(out_path, 'input_image_{frame:04d}.png')\n", (11706, 11747), False, 'import os, sys, shutil, copy\n'), ((478, 489), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (486, 489), False, 'import os, sys, shutil, copy\n'), ((6176, 6199), 'numpy.sin', 'np.sin', (['((1 - t) * angle)'], {}), '((1 - t) * angle)\n', (6182, 6199), True, 'import numpy as np\n'), ((6203, 6220), 'numpy.sin', 'np.sin', (['(t * angle)'], {}), '(t * angle)\n', (6209, 6220), True, 'import numpy as np\n'), ((10131, 10241), 'os.path.join', 'os.path.join', (['out_path', "('render-sub-' + ('slerp' if slerp_position else 'lerp') +\n '_image_{frame:04d}.png')"], {}), "(out_path, 'render-sub-' + ('slerp' if slerp_position else\n 'lerp') + '_image_{frame:04d}.png')\n", (10143, 10241), False, 'import os, sys, shutil, copy\n'), ((11963, 12015), 'os.path.join', 'os.path.join', (['out_path', '"""warp1_flow_{frame:04d}.png"""'], {}), "(out_path, 'warp1_flow_{frame:04d}.png')\n", (11975, 12015), False, 'import os, sys, shutil, copy\n'), ((12312, 12365), 'os.path.join', 'os.path.join', (['out_path', '"""warp1_image_{frame:04d}.png"""'], {}), "(out_path, 'warp1_image_{frame:04d}.png')\n", (12324, 12365), False, 'import os, sys, shutil, copy\n'), ((12523, 12575), 'os.path.join', 'os.path.join', (['out_path', '"""warp2_flow_{frame:04d}.png"""'], {}), "(out_path, 'warp2_flow_{frame:04d}.png')\n", (12535, 12575), False, 'import os, sys, shutil, copy\n'), ((12897, 12950), 'os.path.join', 'os.path.join', (['out_path', '"""warp2_image_{frame:04d}.png"""'], {}), "(out_path, 'warp2_image_{frame:04d}.png')\n", (12909, 12950), False, 'import os, sys, shutil, copy\n'), ((13169, 13224), 'os.path.join', 'os.path.join', (['out_path', '"""warp1sub_flow_{frame:04d}.png"""'], {}), "(out_path, 'warp1sub_flow_{frame:04d}.png')\n", (13181, 13224), False, 'import os, sys, shutil, copy\n'), ((13582, 13638), 'os.path.join', 'os.path.join', (['out_path', '"""warp1sub_image_{frame:04d}.png"""'], {}), "(out_path, 'warp1sub_image_{frame:04d}.png')\n", (13594, 13638), False, 'import os, sys, shutil, copy\n'), ((4566, 4631), 'numpy.concatenate', 'np.concatenate', (['(u[..., np.newaxis], v[..., np.newaxis])'], {'axis': '(-1)'}), '((u[..., np.newaxis], v[..., np.newaxis]), axis=-1)\n', (4580, 4631), True, 'import numpy as np\n'), ((9105, 9146), 'numpy.asarray', 'np.asarray', (["[0, 0, calib['marker_width']]"], {}), "([0, 0, calib['marker_width']])\n", (9115, 9146), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10
@author: jaehyuk
"""
import numpy as np
import scipy.stats as ss
import scipy.optimize as sopt
import scipy.integrate as spint
import pyfeng as pf
from . import normal
from . import bsm
'''
MC model class for Beta=1
'''
class ModelBsmMC:
beta = 1.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
bsm_model = None
'''
You may define more members for MC: time step, etc
'''
def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0, time_steps=1_000, n_samples=10_000):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.time_steps = time_steps
self.n_samples = n_samples
self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)
def bsm_vol(self, strike, spot, texp=None, sigma=None):
''''
From the price from self.price() compute the implied vol
Use self.bsm_model.impvol() method
'''
price = self.price(strike, spot, texp, sigma)
vol = self.bsm_model.impvol(price, strike, spot, texp)
return vol
def price(self, strike, spot, texp=None, sigma=None, cp=1, time_steps=1_000, n_samples=10_000):
'''
Your MC routine goes here
Generate paths for vol and price first. Then get prices (vector) for all strikes
You may fix the random number seed
'''
np.random.seed(12345)
div_fac = np.exp(-texp * self.divr)
disc_fac = np.exp(-texp * self.intr)
forward = spot / disc_fac * div_fac
if sigma is None:
sigma = self.sigma
self.time_steps = time_steps # number of time steps of MC
self.n_samples = n_samples # number of samples of MC
# Generate correlated normal random variables W1, Z1
z = np.random.normal(size=(self.n_samples, self.time_steps))
x = np.random.normal(size=(self.n_samples, self.time_steps))
w = self.rho * z + np.sqrt(1-self.rho**2) * x
path_size = np.zeros([self.n_samples, self.time_steps + 1])
delta_tk = texp / self.time_steps
log_sk = np.log(spot) * np.ones_like(path_size) # log price
sk = spot * np.ones_like(path_size) # price
sigma_tk = self.sigma * np.ones_like(path_size) # sigma
for i in range(self.time_steps):
log_sk[:, i+1] = log_sk[:, i] + sigma_tk[:, i] * np.sqrt(delta_tk) * w[:, i] - 0.5 * (sigma_tk[:, i]**2) * delta_tk
sigma_tk[:, i+1] = sigma_tk[:, i] * np.exp(self.vov * np.sqrt(delta_tk) * z[:, i] - 0.5 * (self.vov**2) * delta_tk)
sk[:, i+1] = np.exp(log_sk[:, i+1])
price = np.zeros_like(strike)
for j in range(len(strike)):
price[j] = np.mean(np.maximum(sk[:, -1] - strike[j], 0))
return disc_fac * price
'''
MC model class for Beta=0
'''
class ModelNormalMC:
beta = 0.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
normal_model = None
def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0, time_steps=1_000, n_samples=10_000):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.time_steps = time_steps
self.n_samples = n_samples
self.normal_model = pf.Norm(sigma, intr=intr, divr=divr)
def norm_vol(self, strike, spot, texp=None, sigma=None):
''''
From the price from self.price() compute the implied vol.
Use self.normal_model.impvol() method
'''
price = self.price(strike, spot, texp, sigma)
vol = self.normal_model.impvol(price, strike, spot, texp)
return vol
def price(self, strike, spot, texp=None, sigma=None, cp=1, time_steps=1_000, n_samples=10_000):
'''
Your MC routine goes here
Generate paths for vol and price first. Then get prices (vector) for all strikes
You may fix the random number seed
'''
np.random.seed(12345)
div_fac = np.exp(-texp * self.divr)
disc_fac = np.exp(-texp * self.intr)
forward = spot / disc_fac * div_fac
if sigma is None:
sigma = self.sigma
self.time_steps = time_steps # number of time steps of MC
self.n_samples = n_samples # number of samples of MC
# Generate correlated normal random variables W1, Z1
z = np.random.normal(size=(self.n_samples, self.time_steps))
x = np.random.normal(size=(self.n_samples, self.time_steps))
w = self.rho * z + np.sqrt(1-self.rho**2) * x
path_size = np.zeros([self.n_samples, self.time_steps + 1])
delta_tk = texp / self.time_steps
sk = spot * np.ones_like(path_size) # price
sigma_tk = self.sigma * np.ones_like(path_size) # sigma
for i in range(self.time_steps):
sk[:, i+1] = sk[:, i] + sigma_tk[:, i] * np.sqrt(delta_tk) * w[:, i]
sigma_tk[:, i+1] = sigma_tk[:, i] * np.exp(self.vov * np.sqrt(delta_tk) * z[:, i] - 0.5 * (self.vov**2) * delta_tk)
price = np.zeros_like(strike)
for j in range(len(strike)):
price[j] = np.mean(np.maximum(sk[:, -1] - strike[j], 0))
return disc_fac * price
'''
Conditional MC model class for Beta=1
'''
class ModelBsmCondMC:
beta = 1.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
bsm_model = None
'''
You may define more members for MC: time step, etc
'''
def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0, time_steps=1_000, n_samples=10_000):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.time_steps = time_steps
self.n_samples = n_samples
self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)
def bsm_vol(self, strike, spot, texp=None):
''''
should be same as bsm_vol method in ModelBsmMC (just copy & paste)
'''
price = self.price(strike, spot, texp, sigma)
vol = self.bsm_model.impvol(price, strike, spot, texp)
return vol
def price(self, strike, spot, texp=None, cp=1, time_steps=1_000, n_samples=10_000):
'''
Your MC routine goes here
Generate paths for vol only. Then compute integrated variance and BSM price.
Then get prices (vector) for all strikes
You may fix the random number seed
'''
np.random.seed(12345)
div_fac = np.exp(-texp * self.divr)
disc_fac = np.exp(-texp * self.intr)
forward = spot / disc_fac * div_fac
self.time_steps = time_steps # number of time steps of MC
self.n_samples = n_samples # number of samples of MC
# Generate correlated normal random variables Z
z = np.random.normal(size=(self.n_samples, self.time_steps))
delta_tk = texp / self.time_steps
sigma_tk = self.sigma * np.ones([self.n_samples, self.time_steps+1])
for i in range(self.time_steps):
sigma_tk[:, i+1] = sigma_tk[:, i] * np.exp(self.vov * np.sqrt(delta_tk) * z[:, i] - 0.5 * (self.vov ** 2) * delta_tk)
I = spint.simps(sigma_tk * sigma_tk, dx=texp/self.time_steps) / (self.sigma**2) # integrate by using Simpson's rule
spot_cond = spot * np.exp(self.rho * (sigma_tk[:, -1] - self.sigma) / self.vov - (self.rho*self.sigma)**2 * texp * I / 2)
vol = self.sigma * np.sqrt((1 - self.rho**2) * I)
price = np.zeros_like(strike)
for j in range(len(strike)):
price[j] = np.mean(bsm.price(strike[j], spot_cond, texp ,vol))
return disc_fac * price
'''
Conditional MC model class for Beta=0
'''
class ModelNormalCondMC:
beta = 0.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
normal_model = None
def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0, time_steps=1_000, n_samples=10_000):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.time_steps = time_steps
self.n_samples = n_samples
self.normal_model = pf.Norm(sigma, intr=intr, divr=divr)
def norm_vol(self, strike, spot, texp=None):
''''
should be same as norm_vol method in ModelNormalMC (just copy & paste)
'''
price = self.price(strike, spot, texp, sigma)
vol = self.normal_model.impvol(price, strike, spot, texp)
return vol
def price(self, strike, spot, texp=None, cp=1, time_steps=1_000, n_samples=10_000):
'''
Your MC routine goes here
Generate paths for vol only. Then compute integrated variance and normal price.
You may fix the random number seed
'''
np.random.seed(12345)
div_fac = np.exp(-texp * self.divr)
disc_fac = np.exp(-texp * self.intr)
forward = spot / disc_fac * div_fac
self.time_steps = time_steps # number of time steps of MC
self.n_samples = n_samples # number of samples of MC
# Generate correlated normal random variables Z
z = np.random.normal(size=(self.n_samples, self.time_steps))
delta_tk = texp / self.time_steps
sigma_tk = self.sigma * np.ones([self.n_samples, self.time_steps+1])
for i in range(self.time_steps):
sigma_tk[:, i+1] = sigma_tk[:, i] * np.exp(self.vov * np.sqrt(delta_tk) * z[:, i] - 0.5 * (self.vov ** 2) * delta_tk)
I = spint.simps(sigma_tk * sigma_tk, dx=texp/self.time_steps) / (self.sigma**2) # integrate by using Simpson's rule
spot_cond = spot + self.rho * (sigma_tk[:, -1] - self.sigma) / self.vov
vol = self.sigma * np.sqrt((1 - self.rho**2) * I)
price = np.zeros_like(strike)
for j in range(len(strike)):
price[j] = np.mean(normal.price(strike[j], spot_cond, texp ,vol))
return disc_fac * price
| [
"numpy.zeros_like",
"numpy.random.seed",
"pyfeng.Bsm",
"numpy.log",
"numpy.ones_like",
"numpy.maximum",
"numpy.zeros",
"numpy.ones",
"pyfeng.Norm",
"numpy.exp",
"numpy.random.normal",
"scipy.integrate.simps",
"numpy.sqrt"
] | [((818, 853), 'pyfeng.Bsm', 'pf.Bsm', (['sigma'], {'intr': 'intr', 'divr': 'divr'}), '(sigma, intr=intr, divr=divr)\n', (824, 853), True, 'import pyfeng as pf\n'), ((1498, 1519), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (1512, 1519), True, 'import numpy as np\n'), ((1538, 1563), 'numpy.exp', 'np.exp', (['(-texp * self.divr)'], {}), '(-texp * self.divr)\n', (1544, 1563), True, 'import numpy as np\n'), ((1583, 1608), 'numpy.exp', 'np.exp', (['(-texp * self.intr)'], {}), '(-texp * self.intr)\n', (1589, 1608), True, 'import numpy as np\n'), ((1942, 1998), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.time_steps)'}), '(size=(self.n_samples, self.time_steps))\n', (1958, 1998), True, 'import numpy as np\n'), ((2011, 2067), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.time_steps)'}), '(size=(self.n_samples, self.time_steps))\n', (2027, 2067), True, 'import numpy as np\n'), ((2143, 2190), 'numpy.zeros', 'np.zeros', (['[self.n_samples, self.time_steps + 1]'], {}), '([self.n_samples, self.time_steps + 1])\n', (2151, 2190), True, 'import numpy as np\n'), ((2828, 2849), 'numpy.zeros_like', 'np.zeros_like', (['strike'], {}), '(strike)\n', (2841, 2849), True, 'import numpy as np\n'), ((3518, 3554), 'pyfeng.Norm', 'pf.Norm', (['sigma'], {'intr': 'intr', 'divr': 'divr'}), '(sigma, intr=intr, divr=divr)\n', (3525, 3554), True, 'import pyfeng as pf\n'), ((4216, 4237), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (4230, 4237), True, 'import numpy as np\n'), ((4256, 4281), 'numpy.exp', 'np.exp', (['(-texp * self.divr)'], {}), '(-texp * self.divr)\n', (4262, 4281), True, 'import numpy as np\n'), ((4301, 4326), 'numpy.exp', 'np.exp', (['(-texp * self.intr)'], {}), '(-texp * self.intr)\n', (4307, 4326), True, 'import numpy as np\n'), ((4660, 4716), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.time_steps)'}), '(size=(self.n_samples, self.time_steps))\n', (4676, 4716), True, 'import numpy as np\n'), ((4729, 4785), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.time_steps)'}), '(size=(self.n_samples, self.time_steps))\n', (4745, 4785), True, 'import numpy as np\n'), ((4861, 4908), 'numpy.zeros', 'np.zeros', (['[self.n_samples, self.time_steps + 1]'], {}), '([self.n_samples, self.time_steps + 1])\n', (4869, 4908), True, 'import numpy as np\n'), ((5401, 5422), 'numpy.zeros_like', 'np.zeros_like', (['strike'], {}), '(strike)\n', (5414, 5422), True, 'import numpy as np\n'), ((6167, 6202), 'pyfeng.Bsm', 'pf.Bsm', (['sigma'], {'intr': 'intr', 'divr': 'divr'}), '(sigma, intr=intr, divr=divr)\n', (6173, 6202), True, 'import pyfeng as pf\n'), ((6832, 6853), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (6846, 6853), True, 'import numpy as np\n'), ((6872, 6897), 'numpy.exp', 'np.exp', (['(-texp * self.divr)'], {}), '(-texp * self.divr)\n', (6878, 6897), True, 'import numpy as np\n'), ((6917, 6942), 'numpy.exp', 'np.exp', (['(-texp * self.intr)'], {}), '(-texp * self.intr)\n', (6923, 6942), True, 'import numpy as np\n'), ((7206, 7262), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.time_steps)'}), '(size=(self.n_samples, self.time_steps))\n', (7222, 7262), True, 'import numpy as np\n'), ((7931, 7952), 'numpy.zeros_like', 'np.zeros_like', (['strike'], {}), '(strike)\n', (7944, 7952), True, 'import numpy as np\n'), ((8654, 8690), 'pyfeng.Norm', 'pf.Norm', (['sigma'], {'intr': 'intr', 'divr': 'divr'}), '(sigma, intr=intr, divr=divr)\n', (8661, 8690), True, 'import pyfeng as pf\n'), ((9295, 9316), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (9309, 9316), True, 'import numpy as np\n'), ((9335, 9360), 'numpy.exp', 'np.exp', (['(-texp * self.divr)'], {}), '(-texp * self.divr)\n', (9341, 9360), True, 'import numpy as np\n'), ((9380, 9405), 'numpy.exp', 'np.exp', (['(-texp * self.intr)'], {}), '(-texp * self.intr)\n', (9386, 9405), True, 'import numpy as np\n'), ((9664, 9720), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.time_steps)'}), '(size=(self.n_samples, self.time_steps))\n', (9680, 9720), True, 'import numpy as np\n'), ((10339, 10360), 'numpy.zeros_like', 'np.zeros_like', (['strike'], {}), '(strike)\n', (10352, 10360), True, 'import numpy as np\n'), ((2275, 2287), 'numpy.log', 'np.log', (['spot'], {}), '(spot)\n', (2281, 2287), True, 'import numpy as np\n'), ((2290, 2313), 'numpy.ones_like', 'np.ones_like', (['path_size'], {}), '(path_size)\n', (2302, 2313), True, 'import numpy as np\n'), ((2351, 2374), 'numpy.ones_like', 'np.ones_like', (['path_size'], {}), '(path_size)\n', (2363, 2374), True, 'import numpy as np\n'), ((2428, 2451), 'numpy.ones_like', 'np.ones_like', (['path_size'], {}), '(path_size)\n', (2440, 2451), True, 'import numpy as np\n'), ((2788, 2812), 'numpy.exp', 'np.exp', (['log_sk[:, i + 1]'], {}), '(log_sk[:, i + 1])\n', (2794, 2812), True, 'import numpy as np\n'), ((5005, 5028), 'numpy.ones_like', 'np.ones_like', (['path_size'], {}), '(path_size)\n', (5017, 5028), True, 'import numpy as np\n'), ((5082, 5105), 'numpy.ones_like', 'np.ones_like', (['path_size'], {}), '(path_size)\n', (5094, 5105), True, 'import numpy as np\n'), ((7363, 7409), 'numpy.ones', 'np.ones', (['[self.n_samples, self.time_steps + 1]'], {}), '([self.n_samples, self.time_steps + 1])\n', (7370, 7409), True, 'import numpy as np\n'), ((7604, 7663), 'scipy.integrate.simps', 'spint.simps', (['(sigma_tk * sigma_tk)'], {'dx': '(texp / self.time_steps)'}), '(sigma_tk * sigma_tk, dx=texp / self.time_steps)\n', (7615, 7663), True, 'import scipy.integrate as spint\n'), ((7753, 7863), 'numpy.exp', 'np.exp', (['(self.rho * (sigma_tk[:, -1] - self.sigma) / self.vov - (self.rho * self.\n sigma) ** 2 * texp * I / 2)'], {}), '(self.rho * (sigma_tk[:, -1] - self.sigma) / self.vov - (self.rho *\n self.sigma) ** 2 * texp * I / 2)\n', (7759, 7863), True, 'import numpy as np\n'), ((7883, 7915), 'numpy.sqrt', 'np.sqrt', (['((1 - self.rho ** 2) * I)'], {}), '((1 - self.rho ** 2) * I)\n', (7890, 7915), True, 'import numpy as np\n'), ((9821, 9867), 'numpy.ones', 'np.ones', (['[self.n_samples, self.time_steps + 1]'], {}), '([self.n_samples, self.time_steps + 1])\n', (9828, 9867), True, 'import numpy as np\n'), ((10062, 10121), 'scipy.integrate.simps', 'spint.simps', (['(sigma_tk * sigma_tk)'], {'dx': '(texp / self.time_steps)'}), '(sigma_tk * sigma_tk, dx=texp / self.time_steps)\n', (10073, 10121), True, 'import scipy.integrate as spint\n'), ((10291, 10323), 'numpy.sqrt', 'np.sqrt', (['((1 - self.rho ** 2) * I)'], {}), '((1 - self.rho ** 2) * I)\n', (10298, 10323), True, 'import numpy as np\n'), ((2095, 2121), 'numpy.sqrt', 'np.sqrt', (['(1 - self.rho ** 2)'], {}), '(1 - self.rho ** 2)\n', (2102, 2121), True, 'import numpy as np\n'), ((2927, 2963), 'numpy.maximum', 'np.maximum', (['(sk[:, -1] - strike[j])', '(0)'], {}), '(sk[:, -1] - strike[j], 0)\n', (2937, 2963), True, 'import numpy as np\n'), ((4813, 4839), 'numpy.sqrt', 'np.sqrt', (['(1 - self.rho ** 2)'], {}), '(1 - self.rho ** 2)\n', (4820, 4839), True, 'import numpy as np\n'), ((5500, 5536), 'numpy.maximum', 'np.maximum', (['(sk[:, -1] - strike[j])', '(0)'], {}), '(sk[:, -1] - strike[j], 0)\n', (5510, 5536), True, 'import numpy as np\n'), ((5214, 5231), 'numpy.sqrt', 'np.sqrt', (['delta_tk'], {}), '(delta_tk)\n', (5221, 5231), True, 'import numpy as np\n'), ((2568, 2585), 'numpy.sqrt', 'np.sqrt', (['delta_tk'], {}), '(delta_tk)\n', (2575, 2585), True, 'import numpy as np\n'), ((2701, 2718), 'numpy.sqrt', 'np.sqrt', (['delta_tk'], {}), '(delta_tk)\n', (2708, 2718), True, 'import numpy as np\n'), ((5308, 5325), 'numpy.sqrt', 'np.sqrt', (['delta_tk'], {}), '(delta_tk)\n', (5315, 5325), True, 'import numpy as np\n'), ((7527, 7544), 'numpy.sqrt', 'np.sqrt', (['delta_tk'], {}), '(delta_tk)\n', (7534, 7544), True, 'import numpy as np\n'), ((9985, 10002), 'numpy.sqrt', 'np.sqrt', (['delta_tk'], {}), '(delta_tk)\n', (9992, 10002), True, 'import numpy as np\n')] |
from glob import glob
import xarray as xr
import numpy as np
def load(sg_num, var, dataPath):
filenames=sorted(glob(dataPath+'p'+str(sg_num)+'*.nc'))
filenames
for i,f in enumerate(filenames):
ds = xr.open_dataset(f)
ds_sg_data = ds[var]
coords = ['log_gps_lon', 'log_gps_lat', 'log_gps_time']
lon=ds[coords]['log_gps_lon'][1]
lat=ds[coords]['log_gps_lat'][1]
time=ds[coords]['log_gps_time'][1]
dive = np.tile(np.array(f[63:66]).astype(float), ds_sg_data['ctd_time'].shape)
ds_sg_data = (
ds_sg_data
.assign_coords(dive = ('sg_data_point', dive.astype(float)))
.assign_coords(ctd_time = ('sg_data_point', ds_sg_data.ctd_time.data))
.assign_coords(ctd_depth = ('sg_data_point', ds_sg_data.ctd_depth.data))
.assign_coords(ctd_pressure = ('sg_data_point', ds_sg_data.ctd_pressure.data))
.swap_dims({"sg_data_point": "ctd_time"})
.assign_coords(lon_gps=lon)
.assign_coords(lat_gps=lat)
.assign_coords(time_gps=time)
)
if i==0:
ds_sg_data_main = ds_sg_data
else:
ds_sg_data_main = xr.concat([ds_sg_data_main, ds_sg_data], dim='ctd_time')
ds_sg = ds_sg_data_main
return ds_sg | [
"xarray.open_dataset",
"numpy.array",
"xarray.concat"
] | [((234, 252), 'xarray.open_dataset', 'xr.open_dataset', (['f'], {}), '(f)\n', (249, 252), True, 'import xarray as xr\n'), ((1444, 1500), 'xarray.concat', 'xr.concat', (['[ds_sg_data_main, ds_sg_data]'], {'dim': '"""ctd_time"""'}), "([ds_sg_data_main, ds_sg_data], dim='ctd_time')\n", (1453, 1500), True, 'import xarray as xr\n'), ((526, 544), 'numpy.array', 'np.array', (['f[63:66]'], {}), '(f[63:66])\n', (534, 544), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
from pprint import pprint
from sklearn import preprocessing
import data_helpers
import torch
import torch.optim as optim
import torch.utils.data as data_utils
import torch.autograd as autograd
import torch.nn as nn
import numpy as np
import argparse, sys, time, json
import dataset_walker
from slu_model import SluConvNet
np.random.seed(0)
torch.manual_seed(0)
def main(argv):
parser = argparse.ArgumentParser(description='CNN baseline for DSTC5 SAP Task')
parser.add_argument('--trainset', dest='trainset', action='store', metavar='TRAINSET', required=True, help='')
parser.add_argument('--testset', dest='testset', action='store', metavar='TESTSET', required=True, help='')
parser.add_argument('--dataroot', dest='dataroot', action='store', required=True, metavar='PATH', help='')
parser.add_argument('--roletype', dest='roletype', action='store', choices=['guide', 'tourist'], required=True, help='speaker')
args = parser.parse_args()
train_utters = []
trainset = dataset_walker.dataset_walker(args.trainset, dataroot=args.dataroot, labels=True, translations=True)
sys.stderr.write('Loading training instances ... ')
for call in trainset:
for (log_utter, translations, label_utter) in call:
if log_utter['speaker'].lower() != args.roletype:
continue
transcript = data_helpers.tokenize_and_lower(log_utter['transcript'])
speech_act = label_utter['speech_act']
sa_label_list = []
for sa in speech_act:
sa_label_list += ['%s_%s' % (sa['act'], attr) for attr in sa['attributes']]
sa_label_list = sorted(set(sa_label_list))
train_utters += [(transcript, log_utter['speaker'], sa_label_list)]
sys.stderr.write('Done\n')
test_utters = []
testset = dataset_walker.dataset_walker(args.testset, dataroot=args.dataroot, labels=True, translations=True)
sys.stderr.write('Loading testing instances ... ')
for call in testset:
for (log_utter, translations, label_utter) in call:
if log_utter['speaker'].lower() != args.roletype:
continue
try:
translation = data_helpers.tokenize_and_lower(translations['translated'][0]['hyp'])
except:
translation = ''
speech_act = label_utter['speech_act']
sa_label_list = []
for sa in speech_act:
sa_label_list += ['%s_%s' % (sa['act'], attr) for attr in sa['attributes']]
sa_label_list = sorted(set(sa_label_list))
test_utters += [(translation, log_utter['speaker'], sa_label_list)]
pprint(train_utters[:2])
pprint(test_utters[:2])
# load parameters
params = data_helpers.load_params("parameters/cnn.txt")
pprint(params)
num_epochs = int(params['num_epochs'])
validation_split = float(params['validation_split'])
batch_size = int(params['batch_size'])
multilabel = params['multilabel']=="true"
# build vocabulary
sents = [utter[0].split(' ') for utter in train_utters]
max_sent_len = int(params['max_sent_len'])
pad_sents = data_helpers.pad_sentences(sents, max_sent_len)
vocabulary, inv_vocabulary = data_helpers.build_vocab(pad_sents)
print("vocabulary size: %d" % len(vocabulary))
# params['max_sent_len'] = max_sent_len
# build inputs
train_inputs = data_helpers.build_input_data(pad_sents, vocabulary)
test_sents = [utter[0].split(' ') for utter in test_utters]
test_pad_sents = data_helpers.pad_sentences(test_sents, max_sent_len)
test_inputs = data_helpers.build_input_data(test_pad_sents, vocabulary)
# build labels
train_labels = [len(utter[2]) for utter in train_utters]
test_labels = [len(utter[2]) for utter in test_utters]
major_percentage = len([p for p in test_labels if p == 1])*100.0 / len(test_labels)
print("majority percentage: %.2f%%" % major_percentage)
label_binarizer = preprocessing.LabelBinarizer()
label_binarizer.fit(train_labels+test_labels)
train_labels = label_binarizer.transform(train_labels)
test_labels = label_binarizer.transform(test_labels)
# split and shuffle data
indices = np.arange(train_inputs.shape[0])
np.random.shuffle(indices)
train_inputs = train_inputs[indices]
train_labels = train_labels[indices]
num_validation = int(validation_split * train_inputs.shape[0])
# x_train = train_inputs[:-num_validation]
# y_train = train_labels[:-num_validation]
# x_val = train_inputs[-num_validation:]
# y_val = train_labels[-num_validation:]
x_train = train_inputs
y_train = train_labels
x_test = test_inputs
y_test = test_labels
# construct a pytorch data_loader
x_train = torch.from_numpy(x_train).long()
y_train = torch.from_numpy(y_train).float()
dataset_tensor = data_utils.TensorDataset(x_train, y_train)
train_loader = data_utils.DataLoader(dataset_tensor, batch_size=batch_size, shuffle=True, num_workers=4,
pin_memory=False)
x_test = torch.from_numpy(x_test).long()
y_test = torch.from_numpy(y_test).long()
dataset_tensor = data_utils.TensorDataset(x_test, y_test)
test_loader = data_utils.DataLoader(dataset_tensor, batch_size=batch_size, shuffle=False, num_workers=4,
pin_memory=False)
# load pre-trained word embeddings
embedding_dim = int(params['embedding_dim'])
embedding_matrix = data_helpers.load_embedding(vocabulary, embedding_dim=embedding_dim, embedding=params['embedding'])
# load model
model = SluConvNet(params, embedding_matrix, len(vocabulary), y_train.shape[1])
if torch.cuda.is_available():
model = model.cuda()
learning_rate = float(params['learning_rate'])
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = nn.BCEWithLogitsLoss()
for epoch in range(num_epochs):
model.train() # set the model to training mode (apply dropout etc)
for i, (inputs, labels) in enumerate(train_loader):
inputs, labels = autograd.Variable(inputs), autograd.Variable(labels)
if torch.cuda.is_available():
inputs, labels = inputs.cuda(), labels.cuda()
preds = model(inputs)
if torch.cuda.is_available():
preds = preds.cuda()
loss = loss_fn(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
print("current loss: %.4f" % loss)
model.eval() # set the model to evaluation mode
true_acts, pred_acts, accuracy = evaluate(model, label_binarizer, test_loader, y_test.numpy())
print(', '.join(['%s'%p for p in pred_acts]))
predicted_major_percentage = len([p for p in pred_acts if p == 1]) * 100.0 / len(pred_acts)
print("predicted majority percentage: %.2f%%" % predicted_major_percentage) # 99.49%
print("Accuracy: %.4f\n" % accuracy)
# end of training
true_acts, pred_acts, accuracy = evaluate(model, label_binarizer, test_loader, y_test.numpy())
print("Accuracy: %.4f\n" % accuracy)
# end of main
def evaluate(model, label_binarizer, test_loader, y_test):
preds = None
for i, (x_batch, _) in enumerate(test_loader):
inputs = autograd.Variable(x_batch)
if torch.cuda.is_available():
inputs = inputs.cuda()
preds_batch = model(inputs)
preds_batch = preds_batch.cpu().data.numpy()
if preds is None:
preds = preds_batch
else:
preds = np.concatenate((preds, preds_batch), axis=0) # merge along batch axis
pred_labels = np.zeros(y_test.shape)
for i, argmax in enumerate(preds.argmax(axis=1)):
pred_labels[i][argmax] = 1
pred_acts = label_binarizer.inverse_transform(pred_labels)
true_acts = label_binarizer.inverse_transform(y_test)
accuracy = sum(pred_acts == true_acts) * 1.0 / len(pred_acts)
return true_acts, pred_acts, accuracy
def predict_onelabel(preds):
pred_labels = np.zeros(preds.shape)
preds = np.argmax(preds, axis=1)
for i, label_index in enumerate(preds):
pred_labels[i][label_index] = 1
return pred_labels
def predict_multilabel(preds):
threshold = 0.2
pred_labels = np.zeros(preds.shape)
for i, pred in enumerate(preds):
vec = np.array([1 if p > threshold else 0 for p in pred])
pred_labels[i] = vec
return pred_labels
if __name__ == "__main__":
main(sys.argv)
| [
"sklearn.preprocessing.LabelBinarizer",
"numpy.random.seed",
"argparse.ArgumentParser",
"data_helpers.build_vocab",
"numpy.argmax",
"data_helpers.pad_sentences",
"numpy.arange",
"pprint.pprint",
"torch.utils.data.TensorDataset",
"data_helpers.tokenize_and_lower",
"torch.utils.data.DataLoader",
... | [((402, 419), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (416, 419), True, 'import numpy as np\n'), ((421, 441), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (438, 441), False, 'import torch\n'), ((475, 545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CNN baseline for DSTC5 SAP Task"""'}), "(description='CNN baseline for DSTC5 SAP Task')\n", (498, 545), False, 'import argparse, sys, time, json\n'), ((1098, 1203), 'dataset_walker.dataset_walker', 'dataset_walker.dataset_walker', (['args.trainset'], {'dataroot': 'args.dataroot', 'labels': '(True)', 'translations': '(True)'}), '(args.trainset, dataroot=args.dataroot, labels\n =True, translations=True)\n', (1127, 1203), False, 'import dataset_walker\n'), ((1204, 1255), 'sys.stderr.write', 'sys.stderr.write', (['"""Loading training instances ... """'], {}), "('Loading training instances ... ')\n", (1220, 1255), False, 'import argparse, sys, time, json\n'), ((1872, 1898), 'sys.stderr.write', 'sys.stderr.write', (['"""Done\n"""'], {}), "('Done\\n')\n", (1888, 1898), False, 'import argparse, sys, time, json\n'), ((1938, 2042), 'dataset_walker.dataset_walker', 'dataset_walker.dataset_walker', (['args.testset'], {'dataroot': 'args.dataroot', 'labels': '(True)', 'translations': '(True)'}), '(args.testset, dataroot=args.dataroot, labels=\n True, translations=True)\n', (1967, 2042), False, 'import dataset_walker\n'), ((2043, 2093), 'sys.stderr.write', 'sys.stderr.write', (['"""Loading testing instances ... """'], {}), "('Loading testing instances ... ')\n", (2059, 2093), False, 'import argparse, sys, time, json\n'), ((2802, 2826), 'pprint.pprint', 'pprint', (['train_utters[:2]'], {}), '(train_utters[:2])\n', (2808, 2826), False, 'from pprint import pprint\n'), ((2832, 2855), 'pprint.pprint', 'pprint', (['test_utters[:2]'], {}), '(test_utters[:2])\n', (2838, 2855), False, 'from pprint import pprint\n'), ((2895, 2941), 'data_helpers.load_params', 'data_helpers.load_params', (['"""parameters/cnn.txt"""'], {}), "('parameters/cnn.txt')\n", (2919, 2941), False, 'import data_helpers\n'), ((2947, 2961), 'pprint.pprint', 'pprint', (['params'], {}), '(params)\n', (2953, 2961), False, 'from pprint import pprint\n'), ((3307, 3354), 'data_helpers.pad_sentences', 'data_helpers.pad_sentences', (['sents', 'max_sent_len'], {}), '(sents, max_sent_len)\n', (3333, 3354), False, 'import data_helpers\n'), ((3389, 3424), 'data_helpers.build_vocab', 'data_helpers.build_vocab', (['pad_sents'], {}), '(pad_sents)\n', (3413, 3424), False, 'import data_helpers\n'), ((3564, 3616), 'data_helpers.build_input_data', 'data_helpers.build_input_data', (['pad_sents', 'vocabulary'], {}), '(pad_sents, vocabulary)\n', (3593, 3616), False, 'import data_helpers\n'), ((3706, 3758), 'data_helpers.pad_sentences', 'data_helpers.pad_sentences', (['test_sents', 'max_sent_len'], {}), '(test_sents, max_sent_len)\n', (3732, 3758), False, 'import data_helpers\n'), ((3778, 3835), 'data_helpers.build_input_data', 'data_helpers.build_input_data', (['test_pad_sents', 'vocabulary'], {}), '(test_pad_sents, vocabulary)\n', (3807, 3835), False, 'import data_helpers\n'), ((4155, 4185), 'sklearn.preprocessing.LabelBinarizer', 'preprocessing.LabelBinarizer', ([], {}), '()\n', (4183, 4185), False, 'from sklearn import preprocessing\n'), ((4404, 4436), 'numpy.arange', 'np.arange', (['train_inputs.shape[0]'], {}), '(train_inputs.shape[0])\n', (4413, 4436), True, 'import numpy as np\n'), ((4442, 4468), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (4459, 4468), True, 'import numpy as np\n'), ((5081, 5123), 'torch.utils.data.TensorDataset', 'data_utils.TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (5105, 5123), True, 'import torch.utils.data as data_utils\n'), ((5144, 5255), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['dataset_tensor'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(False)'}), '(dataset_tensor, batch_size=batch_size, shuffle=True,\n num_workers=4, pin_memory=False)\n', (5165, 5255), True, 'import torch.utils.data as data_utils\n'), ((5410, 5450), 'torch.utils.data.TensorDataset', 'data_utils.TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (5434, 5450), True, 'import torch.utils.data as data_utils\n'), ((5470, 5582), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['dataset_tensor'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'pin_memory': '(False)'}), '(dataset_tensor, batch_size=batch_size, shuffle=False,\n num_workers=4, pin_memory=False)\n', (5491, 5582), True, 'import torch.utils.data as data_utils\n'), ((5739, 5842), 'data_helpers.load_embedding', 'data_helpers.load_embedding', (['vocabulary'], {'embedding_dim': 'embedding_dim', 'embedding': "params['embedding']"}), "(vocabulary, embedding_dim=embedding_dim,\n embedding=params['embedding'])\n", (5766, 5842), False, 'import data_helpers\n'), ((5954, 5979), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5977, 5979), False, 'import torch\n'), ((6146, 6168), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (6166, 6168), True, 'import torch.nn as nn\n'), ((8070, 8092), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (8078, 8092), True, 'import numpy as np\n'), ((8470, 8491), 'numpy.zeros', 'np.zeros', (['preds.shape'], {}), '(preds.shape)\n', (8478, 8491), True, 'import numpy as np\n'), ((8505, 8529), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (8514, 8529), True, 'import numpy as np\n'), ((8716, 8737), 'numpy.zeros', 'np.zeros', (['preds.shape'], {}), '(preds.shape)\n', (8724, 8737), True, 'import numpy as np\n'), ((7685, 7711), 'torch.autograd.Variable', 'autograd.Variable', (['x_batch'], {}), '(x_batch)\n', (7702, 7711), True, 'import torch.autograd as autograd\n'), ((7724, 7749), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7747, 7749), False, 'import torch\n'), ((8791, 8844), 'numpy.array', 'np.array', (['[(1 if p > threshold else 0) for p in pred]'], {}), '([(1 if p > threshold else 0) for p in pred])\n', (8799, 8844), True, 'import numpy as np\n'), ((1459, 1515), 'data_helpers.tokenize_and_lower', 'data_helpers.tokenize_and_lower', (["log_utter['transcript']"], {}), "(log_utter['transcript'])\n", (1490, 1515), False, 'import data_helpers\n'), ((4977, 5002), 'torch.from_numpy', 'torch.from_numpy', (['x_train'], {}), '(x_train)\n', (4993, 5002), False, 'import torch\n'), ((5025, 5050), 'torch.from_numpy', 'torch.from_numpy', (['y_train'], {}), '(y_train)\n', (5041, 5050), False, 'import torch\n'), ((5310, 5334), 'torch.from_numpy', 'torch.from_numpy', (['x_test'], {}), '(x_test)\n', (5326, 5334), False, 'import torch\n'), ((5356, 5380), 'torch.from_numpy', 'torch.from_numpy', (['y_test'], {}), '(y_test)\n', (5372, 5380), False, 'import torch\n'), ((6446, 6471), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6469, 6471), False, 'import torch\n'), ((6589, 6614), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6612, 6614), False, 'import torch\n'), ((7976, 8020), 'numpy.concatenate', 'np.concatenate', (['(preds, preds_batch)'], {'axis': '(0)'}), '((preds, preds_batch), axis=0)\n', (7990, 8020), True, 'import numpy as np\n'), ((2319, 2388), 'data_helpers.tokenize_and_lower', 'data_helpers.tokenize_and_lower', (["translations['translated'][0]['hyp']"], {}), "(translations['translated'][0]['hyp'])\n", (2350, 2388), False, 'import data_helpers\n'), ((6377, 6402), 'torch.autograd.Variable', 'autograd.Variable', (['inputs'], {}), '(inputs)\n', (6394, 6402), True, 'import torch.autograd as autograd\n'), ((6404, 6429), 'torch.autograd.Variable', 'autograd.Variable', (['labels'], {}), '(labels)\n', (6421, 6429), True, 'import torch.autograd as autograd\n')] |
import sys
import datetime
import platform
import random
import taichi
def get_os_name():
name = platform.platform()
# in python 3.8, platform.platform() uses mac_ver() on macOS
# it will return 'macOS-XXXX' instead of 'Darwin-XXXX'
if name.lower().startswith('darwin') or name.lower().startswith('macos'):
return 'osx'
elif name.lower().startswith('windows'):
return 'win'
elif name.lower().startswith('linux'):
return 'linux'
assert False, "Unknown platform name %s" % name
def get_uuid():
print(
'Warning: get_uuid is deprecated. Please use get_unique_task_id instead.'
)
return get_unique_task_id()
def get_unique_task_id():
return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + (
'%05d' % random.randint(0, 10000))
import copy
import numpy as np
import ctypes
def config_from_dict(args):
from taichi.core import tc_core
d = copy.copy(args)
for k in d:
if isinstance(d[k], tc_core.Vector2f):
d[k] = '({}, {})'.format(d[k].x, d[k].y)
if isinstance(d[k], tc_core.Vector3f):
d[k] = '({}, {}, {})'.format(d[k].x, d[k].y, d[k].z)
d[k] = str(d[k])
return tc_core.config_from_dict(d)
def make_polygon(points, scale):
import taichi as tc
polygon = tc.core.Vector2fList()
for p in points:
if type(p) == list or type(p) == tuple:
polygon.append(scale * vec(p[0], p[1]))
else:
polygon.append(scale * p)
return polygon
def veci(*args):
from taichi.core import tc_core
if isinstance(args[0], tc_core.Vector2i):
return args[0]
if isinstance(args[0], tc_core.Vector3i):
return args[0]
if isinstance(args[0], tuple):
args = tuple(*args)
if len(args) == 2:
return tc_core.Vector2i(int(args[0]), int(args[1]))
elif len(args) == 3:
return tc_core.Vector3i(int(args[0]), int(args[1]), int(args[2]))
elif len(args) == 4:
return tc_core.Vector4i(int(args[0]), int(args[1]), int(args[2]),
int(args[3]))
else:
assert False, type(args[0])
def vec(*args):
from taichi.core import tc_core
if isinstance(args[0], tc_core.Vector2f):
return args[0]
if isinstance(args[0], tc_core.Vector3f):
return args[0]
if isinstance(args[0], tc_core.Vector4f):
return args[0]
if isinstance(args[0], tc_core.Vector2d):
return args[0]
if isinstance(args[0], tc_core.Vector3d):
return args[0]
if isinstance(args[0], tc_core.Vector4d):
return args[0]
if isinstance(args[0], tuple):
args = tuple(*args)
if tc_core.get_default_float_size() == 4:
if len(args) == 2:
return tc_core.Vector2f(float(args[0]), float(args[1]))
elif len(args) == 3:
return tc_core.Vector3f(float(args[0]), float(args[1]),
float(args[2]))
elif len(args) == 4:
return tc_core.Vector4f(float(args[0]), float(args[1]),
float(args[2]), float(args[3]))
else:
assert False, type(args[0])
else:
if len(args) == 2:
return tc_core.Vector2d(float(args[0]), float(args[1]))
elif len(args) == 3:
return tc_core.Vector3d(float(args[0]), float(args[1]),
float(args[2]))
elif len(args) == 4:
return tc_core.Vector4d(float(args[0]), float(args[1]),
float(args[2]), float(args[3]))
else:
assert False, type(args[0])
def default_const_or_evaluate(f, default, u, v):
if f == None:
return default
if type(f) in [float, int, tuple]:
return f
return f(u, v)
def const_or_evaluate(f, u, v):
import taichi as tc
if type(f) in [float, int, tuple, tc.core.Vector2, tc.core.Vector3]:
return f
return f(u, v)
# color_255: actual color
# arr: the transparance of the image, if transform is not 'levelset'
# transform: (x0, x1) as rescaling or simply 'levelset'
def array2d_to_image(arr,
width,
height,
color_255=None,
transform='levelset',
alpha_scale=1.0):
from taichi import tc_core
if color_255 is None:
assert isinstance(arr, tc_core.Array2DVector3) or isinstance(
arr, tc_core.Array2DVector4)
import pyglet
rasterized = arr.rasterize(width, height)
raw_data = np.empty((width, height, arr.get_channels()), dtype=np.float32)
rasterized.to_ndarray(raw_data.ctypes.data_as(ctypes.c_void_p).value)
if transform == 'levelset':
raw_data = (raw_data <= 0).astype(np.float32)
else:
x0, x1 = transform
raw_data = (np.clip(raw_data, x0, x1) - x0) / (x1 - x0)
raw_data = raw_data.swapaxes(0, 1).copy()
if isinstance(arr, tc_core.Array2DVector3):
dat = np.stack(
[raw_data,
np.ones(shape=(width, height, 1), dtype=np.float32)],
axis=2).flatten().reshape((height * width, 4))
dat = dat * 255.0
elif isinstance(arr, tc_core.Array2DVector4):
dat = raw_data.flatten().reshape((height * width, 4))
dat = dat * 255.0
else:
raw_data = raw_data.flatten()
dat = np.outer(np.ones_like(raw_data), color_255)
dat[:, 3] = (color_255[3] * raw_data)
dat[:, 3] *= alpha_scale
dat = np.clip(dat, 0.0, 255.0)
dat = dat.astype(np.uint8)
assert dat.shape == (height * width, 4)
image_data = pyglet.image.ImageData(width, height, 'RGBA', dat.tostring())
return image_data
def image_buffer_to_image(arr):
import pyglet
raw_data = np.empty((arr.get_width() * arr.get_height() * 3, ),
dtype='float32')
arr.to_ndarray(raw_data.ctypes.data_as(ctypes.c_void_p).value)
dat = (raw_data * 255.0).astype('uint8')
dat.reshape((len(raw_data) / 3, 3))
data_string = dat.tostring()
image_data = pyglet.image.ImageData(arr.get_width(), arr.get_height(),
'RGB', data_string)
return image_data
def image_buffer_to_ndarray(arr, bgr=False):
channels = arr.get_channels()
raw_data = np.empty((arr.get_width() * arr.get_height() * channels, ),
dtype='float32')
arr.to_ndarray(raw_data.ctypes.data_as(ctypes.c_void_p).value)
dat = raw_data.astype('float32')
ret = dat.reshape((arr.get_width(), arr.get_height(), channels))
if bgr:
ret = ret[:, :, ::-1]
return ret
def arange(x, y, d):
while x < y:
yield x
x += d
# TODO: remove this...
def P(**kwargs):
return config_from_dict(kwargs)
def imread(fn, bgr=False):
img = taichi.core.Array2DVector3(taichi.veci(0, 0),
taichi.vec(0.0, 0.0, 0.0))
img.read(fn)
return image_buffer_to_ndarray(img, bgr)[::-1]
def read_image(fn, linearize=False):
img = taichi.core.Array2DVector3(taichi.veci(0, 0),
taichi.vec(0.0, 0.0, 0.0))
img.read(fn, linearize)
return img
def show_image(window_name, img):
from taichi.gui.image_viewer import show_image
show_image(window_name, img)
def save_image(fn, img):
img.write(fn)
def ndarray_to_array2d(array):
if array.dtype == np.uint8:
array = (array * (1 / 255.0)).astype(np.float32)
assert array.dtype == np.float32
array = array.copy()
input_ptr = array.ctypes.data_as(ctypes.c_void_p).value
if len(array.shape) == 2 or array.shape[2] == 1:
arr = taichi.core.Array2Dreal(Vectori(0, 0))
elif array.shape[2] == 3:
arr = taichi.core.Array2DVector3(Vectori(0, 0), taichi.Vector(0, 0, 0))
elif array.shape[2] == 4:
arr = taichi.core.Array2DVector4(Vectori(0, 0),
taichi.Vector(0, 0, 0, 0))
else:
assert False, 'ndarray has to be n*m, n*m*3, or n*m*4'
arr.from_ndarray(input_ptr, array.shape[0], array.shape[1])
return arr
def array2d_to_ndarray(arr):
if isinstance(arr, taichi.core.Array2DVector3):
ndarray = np.empty((arr.get_width(), arr.get_height(), 3),
dtype='float32')
elif isinstance(arr, taichi.core.Array2DVector4):
ndarray = np.empty((arr.get_width(), arr.get_height(), 4),
dtype='float32')
elif isinstance(arr, taichi.core.Array2Dreal):
ndarray = np.empty((arr.get_width(), arr.get_height()),
dtype='float32')
else:
assert False, 'Array2d must have type real, Vector3, or Vector4'
output_ptr = ndarray.ctypes.data_as(ctypes.c_void_p).value
arr.to_ndarray(output_ptr)
return ndarray
def opencv_img_to_taichi_img(img):
return (img.swapaxes(0, 1)[:, ::-1, ::-1] * (1 / 255.0)).astype(np.float32)
def sleep(seconds=-1):
if seconds == -1:
while True:
time.sleep(1) # Wait for Ctrl-C
else:
time.sleep(seconds)
class Tee():
def __init__(self, name):
self.file = open(name, 'w')
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __del__(self):
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
self.file.flush()
self.stdout.flush()
def write_to_file(self, data):
self.file.write(data)
import inspect
def get_file_name(asc=0):
return inspect.stack()[1 + asc][1]
def get_function_name(asc=0):
return inspect.stack()[1 + asc][3]
def get_line_number(asc=0):
return inspect.stack()[1 + asc][2]
def get_logging(name):
def logger(msg, *args, **kwargs):
# Python inspection takes time (~0.1ms) so avoid it as much as possible
if taichi.tc_core.logging_effective(name):
msg_formatted = msg.format(*args, **kwargs)
func = getattr(taichi.tc_core, name)
frame = inspect.currentframe().f_back.f_back
file_name, lineno, func_name, _, _ = inspect.getframeinfo(frame)
msg = f'[{file_name}:{func_name}@{lineno}] {msg_formatted}'
func(msg)
return logger
DEBUG = 'debug'
TRACE = 'trace'
INFO = 'info'
WARN = 'warn'
ERROR = 'error'
CRITICAL = 'critical'
debug = get_logging(DEBUG)
trace = get_logging(TRACE)
info = get_logging(INFO)
warn = get_logging(WARN)
error = get_logging(ERROR)
critical = get_logging(CRITICAL)
def redirect_print_to_log():
class Logger:
def write(self, msg):
taichi.core.info('[{}:{}@{}] {}'.format(get_file_name(1),
get_function_name(1),
get_line_number(1), msg))
def flush(self):
taichi.core.flush_log()
sys.stdout = Logger()
def duplicate_stdout_to_file(fn):
taichi.tc_core.duplicate_stdout_to_file(fn)
def set_logging_level(level):
taichi.tc_core.set_logging_level(level)
def set_gdb_trigger(on=True):
taichi.tc_core.set_core_trigger_gdb_when_crash(on)
| [
"taichi.tc_core.set_core_trigger_gdb_when_crash",
"taichi.core.flush_log",
"numpy.ones",
"numpy.clip",
"taichi.Vector",
"taichi.tc_core.set_logging_level",
"random.randint",
"taichi.vec",
"inspect.getframeinfo",
"taichi.tc_core.logging_effective",
"taichi.tc_core.duplicate_stdout_to_file",
"ta... | [((103, 122), 'platform.platform', 'platform.platform', ([], {}), '()\n', (120, 122), False, 'import platform\n'), ((947, 962), 'copy.copy', 'copy.copy', (['args'], {}), '(args)\n', (956, 962), False, 'import copy\n'), ((1227, 1254), 'taichi.tc_core.config_from_dict', 'tc_core.config_from_dict', (['d'], {}), '(d)\n', (1251, 1254), False, 'from taichi import tc_core\n'), ((1328, 1350), 'taichi.core.Vector2fList', 'tc.core.Vector2fList', ([], {}), '()\n', (1348, 1350), True, 'import taichi as tc\n'), ((5568, 5592), 'numpy.clip', 'np.clip', (['dat', '(0.0)', '(255.0)'], {}), '(dat, 0.0, 255.0)\n', (5575, 5592), True, 'import numpy as np\n'), ((7358, 7386), 'taichi.gui.image_viewer.show_image', 'show_image', (['window_name', 'img'], {}), '(window_name, img)\n', (7368, 7386), False, 'from taichi.gui.image_viewer import show_image\n'), ((11103, 11146), 'taichi.tc_core.duplicate_stdout_to_file', 'taichi.tc_core.duplicate_stdout_to_file', (['fn'], {}), '(fn)\n', (11142, 11146), False, 'import taichi\n'), ((11183, 11222), 'taichi.tc_core.set_logging_level', 'taichi.tc_core.set_logging_level', (['level'], {}), '(level)\n', (11215, 11222), False, 'import taichi\n'), ((11259, 11309), 'taichi.tc_core.set_core_trigger_gdb_when_crash', 'taichi.tc_core.set_core_trigger_gdb_when_crash', (['on'], {}), '(on)\n', (11305, 11309), False, 'import taichi\n'), ((2710, 2742), 'taichi.tc_core.get_default_float_size', 'tc_core.get_default_float_size', ([], {}), '()\n', (2740, 2742), False, 'from taichi import tc_core\n'), ((6914, 6931), 'taichi.veci', 'taichi.veci', (['(0)', '(0)'], {}), '(0, 0)\n', (6925, 6931), False, 'import taichi\n'), ((6970, 6995), 'taichi.vec', 'taichi.vec', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (6980, 6995), False, 'import taichi\n'), ((7141, 7158), 'taichi.veci', 'taichi.veci', (['(0)', '(0)'], {}), '(0, 0)\n', (7152, 7158), False, 'import taichi\n'), ((7197, 7222), 'taichi.vec', 'taichi.vec', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (7207, 7222), False, 'import taichi\n'), ((10016, 10054), 'taichi.tc_core.logging_effective', 'taichi.tc_core.logging_effective', (['name'], {}), '(name)\n', (10048, 10054), False, 'import taichi\n'), ((800, 824), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (814, 824), False, 'import random\n'), ((9694, 9709), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (9707, 9709), False, 'import inspect\n'), ((9765, 9780), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (9778, 9780), False, 'import inspect\n'), ((9834, 9849), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (9847, 9849), False, 'import inspect\n'), ((10267, 10294), 'inspect.getframeinfo', 'inspect.getframeinfo', (['frame'], {}), '(frame)\n', (10287, 10294), False, 'import inspect\n'), ((11012, 11035), 'taichi.core.flush_log', 'taichi.core.flush_log', ([], {}), '()\n', (11033, 11035), False, 'import taichi\n'), ((718, 741), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (739, 741), False, 'import datetime\n'), ((4902, 4927), 'numpy.clip', 'np.clip', (['raw_data', 'x0', 'x1'], {}), '(raw_data, x0, x1)\n', (4909, 4927), True, 'import numpy as np\n'), ((5448, 5470), 'numpy.ones_like', 'np.ones_like', (['raw_data'], {}), '(raw_data)\n', (5460, 5470), True, 'import numpy as np\n'), ((7868, 7890), 'taichi.Vector', 'taichi.Vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (7881, 7890), False, 'import taichi\n'), ((8019, 8044), 'taichi.Vector', 'taichi.Vector', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (8032, 8044), False, 'import taichi\n'), ((10181, 10203), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (10201, 10203), False, 'import inspect\n'), ((5100, 5151), 'numpy.ones', 'np.ones', ([], {'shape': '(width, height, 1)', 'dtype': 'np.float32'}), '(shape=(width, height, 1), dtype=np.float32)\n', (5107, 5151), True, 'import numpy as np\n')] |
# Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
import os
import sys
import glob
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pysixd import inout
from params.dataset_params import get_dataset_params
par = get_dataset_params('hinterstoisser')
# data_ids = range(1, par.obj_count + 1)
data_ids = range(1, par['scene_count'] + 1)
# depth_mpath = par.train_depth_mpath
depth_mpath = par['test_depth_mpath']
scale = 0.1
for data_id in data_ids:
print('Processing id: ' + str(data_id))
depth_paths = sorted(glob.glob(os.path.join(
os.path.dirname(depth_mpath.format(data_id, 0)), '*')))
for depth_path in depth_paths:
d = inout.load_depth(depth_path)
d *= scale
d = np.round(d).astype(np.uint16)
inout.save_depth(depth_path, d)
| [
"os.path.abspath",
"pysixd.inout.save_depth",
"pysixd.inout.load_depth",
"params.dataset_params.get_dataset_params",
"numpy.round"
] | [((313, 349), 'params.dataset_params.get_dataset_params', 'get_dataset_params', (['"""hinterstoisser"""'], {}), "('hinterstoisser')\n", (331, 349), False, 'from params.dataset_params import get_dataset_params\n'), ((756, 784), 'pysixd.inout.load_depth', 'inout.load_depth', (['depth_path'], {}), '(depth_path)\n', (772, 784), False, 'from pysixd import inout\n'), ((854, 885), 'pysixd.inout.save_depth', 'inout.save_depth', (['depth_path', 'd'], {}), '(depth_path, d)\n', (870, 885), False, 'from pysixd import inout\n'), ((199, 224), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (214, 224), False, 'import os\n'), ((816, 827), 'numpy.round', 'np.round', (['d'], {}), '(d)\n', (824, 827), True, 'import numpy as np\n')] |
from PyQt5 import QtCore, QtGui, QtWidgets
import res_rc
import ui as gui
import global_ as g
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pickle
import sys
import random
import numpy as np
import argparse
import Reader
import Spectrum
import Algorithm
class Button:
def __init__(self,button,entry):
self.button = button
self.entry = entry
g.values[self.entry] = self.button.value()
self.button.valueChanged.connect(self.valuechange)
def valuechange(self):
g.values[self.entry] = self.button.value()
if(self.entry == "lb" or self.entry == "hb"):
for i in range(len(g.canvas_list)):
try:
g.canvas_list[i].change_x()
except:
pass
elif(self.entry == "w" or self.entry == "T"):
try:
g.Spectrum.Boltzmann_weight_IR()
g.canvas_list[2].plot_IR_theo()
g.Spectrum.Boltzmann_weight_VCD()
g.canvas_list[3].plot_VCD_theo()
except:
pass
class Click_Button:
def __init__(self,button,entry,args = None):
self.button = button
self.entry = entry
self.button.clicked.connect(self.click)
self.args = args
def click(self):
if(self.entry == "normalize_1"):
try:
tmp = (g.exp_ir[:,0] <= g.values["hb"]) & (g.exp_ir[:,0] >= g.values["lb"])
g.exp_ir[:,1] = g.exp_ir[:,1]/np.max(g.exp_ir[tmp,1])
g.canvas_list[0].plot_IR()
except:
pass
try:
tmp = (g.exp_vcd[:,0] <= g.values["hb"]) & (g.exp_vcd[:,0] >= g.values["lb"])
g.exp_vcd[:,1] = g.exp_vcd[:,1]/np.max(np.abs(g.exp_vcd[tmp,1]))
g.canvas_list[1].plot_VCD()
except:
pass
elif(self.entry == "normalize_2"):
try:
tmp = (g.theo_ir[:,0] <= g.values["hb"]) & (g.theo_ir[:,0] >= g.values["lb"])
g.theo_ir[:,1] = g.theo_ir[:,1]/np.max(g.theo_ir[tmp,1])
g.canvas_list[2].plot_IR()
except:
pass
try:
tmp = (g.theo_vcd[:,0] <= g.values["hb"]) & (g.theo_vcd[:,0] >= g.values["lb"])
g.theo_vcd[:,1] = g.theo_vcd[:,1]/np.max(np.abs(g.theo_vcd[tmp,1]))
g.canvas_list[3].plot_VCD()
except:
pass
elif(self.entry == "automatic"):
try:
tmp = (g.exp_ir[:,0] <= g.values["hb"]) & (g.exp_ir[:,0] >= g.values["lb"])
tmp_ir = np.asarray(g.exp_ir[tmp])
g.peak_list_x = []
g.peak_list_y = []
g.peak_list_VCD_y = []
for i in range(1,len(tmp_ir)-1):
if(tmp_ir[i-1,1]<=tmp_ir[i,1]>=tmp_ir[i+1,1]):
g.peak_list_x.append(tmp_ir[i,0])
g.peak_list_y.append(tmp_ir[i,1])
print(g.peak_list_x)
g.canvas_list[0].plot_peaks()
g.exp_peaks = np.zeros((len(g.peak_list_x),2))
g.exp_peaks[:,0] = np.asarray(g.peak_list_x)
g.exp_peaks[:,1] = np.asarray(g.peak_list_y)
for peak in g.peak_list_x:
g.peak_list_VCD_y.append(g.exp_vcd[abs(g.exp_vcd[:,0]-peak)<10e-1,1][0])
g.canvas_list[1].plot_peaks_VCD()
except:
pass
elif(self.entry == "align"):
try:
del Algo
print("del")
except:
pass
Algo = Algorithm.Algorithm()
if(g.set_VCD==False):
g.returnvalue, g.old_freq, g.freq_new, g.inten_new = Algo.Needleman_IR()
else:
g.returnvalue, g.old_freq, g.freq_new, g.inten_new,g.inten_VCD_new = Algo.Needleman_IR()
g.canvas_list[4].plot_IR_assigned()
g.Spectrum.IR_shifted()
if(g.set_VCD==True):
g.Spectrum.VCD_shifted()
p_ir,p_vcd = g.Spectrum.integrate()
self.args.setText("Score: " + str(g.returnvalue)[0:6]+"\np_ir: " + str(p_ir)[0:4]+"\np_vcd: " + str(p_vcd)[0:4]+"\n")
else:
p_ir = g.Spectrum.integrate()
self.args.setText("Score: " + str(g.returnvalue)[0:6]+"\np_ir: " + str(p_ir)[0:4]+"\n")
g.canvas_list[5].plot_IR_shifted()
class Load_Button:
def __init__(self,button,entry):
self.button = button
self.entry = entry
self.button.clicked.connect(self.click)
def click(self):
if(self.entry == "experimental IR"):
try:
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Select "+self.entry +" Spectrum","","")
g.exp_ir = np.loadtxt(fileName,usecols=(0,1))
g.exp_ir = g.exp_ir[g.exp_ir[:,0].argsort()]
g.canvas_list[0].plot_IR()
g.set_IR = True
except:
pass
elif(self.entry == "experimental VCD"):
try:
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Select "+self.entry +" Spectrum","","")
g.exp_vcd = np.loadtxt(fileName,usecols=(0,1,))
g.exp_vcd = g.exp_vcd[g.exp_vcd[:,0].argsort()]
g.canvas_list[1].plot_VCD()
g.set_VCD = True
except:
pass
elif(self.entry == "energies"):
fileName_energy, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Select "+self.entry +" Energies","","")
g.E = pickle.load(open(fileName_energy,"rb"))
elif(self.entry == "theoretical IR"):
fileName_IR, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Select "+self.entry +" Spectrum","","")
g.theo_ir = pickle.load(open(fileName_IR,"rb"))
g.Spectrum.Boltzmann_weight_IR()
g.canvas_list[2].plot_IR_theo()
elif(self.entry == "theoretical VCD"):
try:
fileName_VCD, _ = QtWidgets.QFileDialog.getOpenFileName(None,"Select "+self.entry +" Spectrum","","")
g.theo_vcd = pickle.load(open(fileName_VCD,"rb"))
g.Spectrum.Boltzmann_weight_VCD()
g.canvas_list[3].plot_VCD_theo()
except:
pass
class Canvas(FigureCanvas):
def __init__(self, single = None, parent = None, Button = None, dpi = 100):
height = parent.height()/100.
width = parent.width()/100.
fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.ax = self.figure.add_subplot(111)
#Button
def change_x(self):
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.draw()
def plot_IR(self):
self.delete()
self.ax.plot(g.exp_ir[:,0],g.exp_ir[:,1],color="black")
self.ax.set_ylim(0,1.05)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.draw()
def plot_IR_shifted(self):
self.delete()
self.ax.plot(g.exp_ir[:,0],g.exp_ir[:,1],color="black")
self.ax.plot(g.IR_shifted[:,0],g.IR_shifted[:,1],color="red")
if(g.set_VCD==True):
self.ax.plot(g.exp_vcd[:,0],g.exp_vcd[:,1],"--",color="black")
self.ax.plot(g.VCD_shifted[:,0],g.VCD_shifted[:,1],"--",color="red")
self.ax.set_ylim(-1.05,1.05)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.draw()
def plot_peaks(self):
self.delete()
self.ax.plot(g.exp_ir[:,0],g.exp_ir[:,1],color="black")
self.ax.set_ylim(0,1.05)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.ax.plot(g.peak_list_x,g.peak_list_y,"o",color="blue")
self.draw()
def plot_peaks_VCD(self):
self.delete()
self.ax.plot(g.exp_vcd[:,0],g.exp_vcd[:,1],"--",color="black")
self.ax.plot(g.peak_list_x,g.peak_list_VCD_y,"o",color="blue")
self.ax.set_ylim(-1.05,1.05)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.draw()
def delete(self):
try:
while(len(self.ax.lines)>0):
self.ax.lines[-1].remove()
except:
pass
def plot_VCD(self):
self.delete()
self.ax.plot(g.exp_vcd[:,0],g.exp_vcd[:,1],"--",color="black")
self.ax.set_ylim(-1,1)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.draw()
def plot_IR_theo(self):
self.delete()
self.ax.plot(g.spectrum_boltzmann[:,0],g.spectrum_boltzmann[:,1],color="red") ##x_axis 0..2000
self.ax.set_ylim(0,1)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
tmp_ir = g.spectrum_boltzmann[g.values["lb"]:g.values["hb"]]
g.theo_peaks_x = []
g.theo_peaks_y = []
for i in range(1,len(tmp_ir)-1):
if(tmp_ir[i-1,1]<=tmp_ir[i,1]>=tmp_ir[i+1,1]):
g.theo_peaks_x.append(tmp_ir[i,0])
g.theo_peaks_y.append(tmp_ir[i,1])
self.ax.plot(g.theo_peaks_x,g.theo_peaks_y,"o",color="blue")
g.theo_peaks = np.zeros((len(g.theo_peaks_x),2))
g.theo_peaks[:,0] = np.asarray(g.theo_peaks_x)
g.theo_peaks[:,1] = np.asarray(g.theo_peaks_y)
self.draw()
def plot_IR_assigned(self):
self.delete()
#g.returnvalue, g.old_freq, g.freq, g.inten
self.ax.plot(g.spectrum_boltzmann[:,0],g.spectrum_boltzmann[:,1],color="red") ##x_axis 0..2000
self.ax.plot(g.exp_ir[:,0],g.exp_ir[:,1],color="black")
if(g.set_VCD==True):
self.ax.plot(g.exp_vcd[:,0],g.exp_vcd[:,1],"--",color="black")
self.ax.plot(g.spectrum_boltzmann_vcd[:,0],g.spectrum_boltzmann_vcd[:,1],"--",color="red")
for i in range(len(g.old_freq)):
self.ax.plot([g.old_freq[i],g.freq_new[i]],[g.inten_new[i],g.inten_new[i]],color="blue")
self.ax.set_ylim(-1,1)
self.ax.set_xlim(g.values["lb"],g.values["hb"])
tmp_ir = g.spectrum_boltzmann[g.values["lb"]:g.values["hb"]]
self.draw()
def plot_VCD_theo(self):
self.delete()
self.ax.plot(g.spectrum_boltzmann_vcd[:,0],g.spectrum_boltzmann_vcd[:,1],"--",color="red") ##x_axis 0..2000
tmp_vcd = g.spectrum_boltzmann_vcd[g.values["lb"]:g.values["hb"]]
self.ax.set_ylim(-1,1)
g.peak_list_VCD_y_theo = []
for peak in g.theo_peaks_x:
g.peak_list_VCD_y_theo.append(tmp_vcd[np.abs(tmp_vcd[:,0]-peak)<10e-3,1][0])
self.ax.plot(g.theo_peaks_x,g.peak_list_VCD_y_theo,"o",color="blue")
self.ax.set_xlim(g.values["lb"],g.values["hb"])
self.draw()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = gui.Ui_MainWindow()
ui.setupUi(MainWindow)
g.Spectrum = Spectrum.Spectrum()
g.canvas_list = []
g.canvas_list.append(Canvas(parent = ui.exp_ir_graph))
g.canvas_list.append(Canvas(parent = ui.exp_vcd_graph))
g.canvas_list.append(Canvas(parent = ui.theo_ir_graph))
g.canvas_list.append(Canvas(parent = ui.theo_vcd_graph))
g.canvas_list.append(Canvas(parent = ui.assignment_graph))
g.canvas_list.append(Canvas(parent = ui.shifted_graph))
g.list_buttons = []
g.list_buttons.append(Button(ui.w,"w"))
g.list_buttons.append(Button(ui.lb,"lb"))
g.list_buttons.append(Button(ui.hb,"hb"))
g.list_buttons.append(Button(ui.sigma_1,"s0"))
g.list_buttons.append(Button(ui.sigma_2,"s1"))
g.list_buttons.append(Button(ui.mu,"mu"))
g.list_buttons.append(Button(ui.cutoff,"c"))
g.list_buttons.append(Button(ui.temperature,"T"))
g.list_buttons.append(Load_Button(ui.Load_EXP,"experimental IR"))
g.list_buttons.append(Load_Button(ui.Load_EXP_VCD,"experimental VCD"))
g.list_buttons.append(Load_Button(ui.load_theo_exp,"theoretical IR"))
g.list_buttons.append(Load_Button(ui.load_theo_vcd,"theoretical VCD"))
g.list_buttons.append(Load_Button(ui.load_theo_exp_2,"energies"))
#g.list_buttons.append(Load_Button(ui.load_theo_vcd,"theoretical VCD"))
g.list_buttons.append(Click_Button(ui.normalize_1,"normalize_1"))
g.list_buttons.append(Click_Button(ui.normalize_2,"normalize_2"))
g.list_buttons.append(Click_Button(ui.automatic,"automatic"))
g.list_buttons.append(Click_Button(ui.align,"align",args = ui.results))
MainWindow.show()
sys.exit(app.exec_())
| [
"numpy.abs",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"PyQt5.QtWidgets.QApplication",
"global_.theo_peaks_y.append",
"global_.Spectrum.Boltzmann_weight_IR",
"matplotlib.figure.Figure",
"numpy.max",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__",
"numpy.loadtxt",
"PyQt5.QtWid... | [((11104, 11136), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (11126, 11136), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11154, 11177), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (11175, 11177), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11187, 11206), 'ui.Ui_MainWindow', 'gui.Ui_MainWindow', ([], {}), '()\n', (11204, 11206), True, 'import ui as gui\n'), ((11251, 11270), 'Spectrum.Spectrum', 'Spectrum.Spectrum', ([], {}), '()\n', (11268, 11270), False, 'import Spectrum\n'), ((6895, 6935), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (6901, 6935), False, 'from matplotlib.figure import Figure\n'), ((6944, 6976), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', (['self', 'fig'], {}), '(self, fig)\n', (6965, 6976), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((9565, 9591), 'numpy.asarray', 'np.asarray', (['g.theo_peaks_x'], {}), '(g.theo_peaks_x)\n', (9575, 9591), True, 'import numpy as np\n'), ((9620, 9646), 'numpy.asarray', 'np.asarray', (['g.theo_peaks_y'], {}), '(g.theo_peaks_y)\n', (9630, 9646), True, 'import numpy as np\n'), ((5040, 5133), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', "('Select ' + self.entry + ' Spectrum')", '""""""', '""""""'], {}), "(None, 'Select ' + self.entry +\n ' Spectrum', '', '')\n", (5077, 5133), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5151, 5187), 'numpy.loadtxt', 'np.loadtxt', (['fileName'], {'usecols': '(0, 1)'}), '(fileName, usecols=(0, 1))\n', (5161, 5187), True, 'import numpy as np\n'), ((9325, 9360), 'global_.theo_peaks_x.append', 'g.theo_peaks_x.append', (['tmp_ir[i, 0]'], {}), '(tmp_ir[i, 0])\n', (9346, 9360), True, 'import global_ as g\n'), ((9376, 9411), 'global_.theo_peaks_y.append', 'g.theo_peaks_y.append', (['tmp_ir[i, 1]'], {}), '(tmp_ir[i, 1])\n', (9397, 9411), True, 'import global_ as g\n'), ((1095, 1127), 'global_.Spectrum.Boltzmann_weight_IR', 'g.Spectrum.Boltzmann_weight_IR', ([], {}), '()\n', (1125, 1127), True, 'import global_ as g\n'), ((1192, 1225), 'global_.Spectrum.Boltzmann_weight_VCD', 'g.Spectrum.Boltzmann_weight_VCD', ([], {}), '()\n', (1223, 1225), True, 'import global_ as g\n'), ((1731, 1755), 'numpy.max', 'np.max', (['g.exp_ir[tmp, 1]'], {}), '(g.exp_ir[tmp, 1])\n', (1737, 1755), True, 'import numpy as np\n'), ((5458, 5551), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', "('Select ' + self.entry + ' Spectrum')", '""""""', '""""""'], {}), "(None, 'Select ' + self.entry +\n ' Spectrum', '', '')\n", (5495, 5551), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5570, 5606), 'numpy.loadtxt', 'np.loadtxt', (['fileName'], {'usecols': '(0, 1)'}), '(fileName, usecols=(0, 1))\n', (5580, 5606), True, 'import numpy as np\n'), ((5861, 5954), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', "('Select ' + self.entry + ' Energies')", '""""""', '""""""'], {}), "(None, 'Select ' + self.entry +\n ' Energies', '', '')\n", (5898, 5954), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2005, 2030), 'numpy.abs', 'np.abs', (['g.exp_vcd[tmp, 1]'], {}), '(g.exp_vcd[tmp, 1])\n', (2011, 2030), True, 'import numpy as np\n'), ((2330, 2355), 'numpy.max', 'np.max', (['g.theo_ir[tmp, 1]'], {}), '(g.theo_ir[tmp, 1])\n', (2336, 2355), True, 'import numpy as np\n'), ((2908, 2933), 'numpy.asarray', 'np.asarray', (['g.exp_ir[tmp]'], {}), '(g.exp_ir[tmp])\n', (2918, 2933), True, 'import numpy as np\n'), ((3456, 3481), 'numpy.asarray', 'np.asarray', (['g.peak_list_x'], {}), '(g.peak_list_x)\n', (3466, 3481), True, 'import numpy as np\n'), ((3517, 3542), 'numpy.asarray', 'np.asarray', (['g.peak_list_y'], {}), '(g.peak_list_y)\n', (3527, 3542), True, 'import numpy as np\n'), ((3939, 3960), 'Algorithm.Algorithm', 'Algorithm.Algorithm', ([], {}), '()\n', (3958, 3960), False, 'import Algorithm\n'), ((4267, 4290), 'global_.Spectrum.IR_shifted', 'g.Spectrum.IR_shifted', ([], {}), '()\n', (4288, 4290), True, 'import global_ as g\n'), ((6078, 6171), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', "('Select ' + self.entry + ' Spectrum')", '""""""', '""""""'], {}), "(None, 'Select ' + self.entry +\n ' Spectrum', '', '')\n", (6115, 6171), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6234, 6266), 'global_.Spectrum.Boltzmann_weight_IR', 'g.Spectrum.Boltzmann_weight_IR', ([], {}), '()\n', (6264, 6266), True, 'import global_ as g\n'), ((2609, 2635), 'numpy.abs', 'np.abs', (['g.theo_vcd[tmp, 1]'], {}), '(g.theo_vcd[tmp, 1])\n', (2615, 2635), True, 'import numpy as np\n'), ((4340, 4364), 'global_.Spectrum.VCD_shifted', 'g.Spectrum.VCD_shifted', ([], {}), '()\n', (4362, 4364), True, 'import global_ as g\n'), ((4394, 4416), 'global_.Spectrum.integrate', 'g.Spectrum.integrate', ([], {}), '()\n', (4414, 4416), True, 'import global_ as g\n'), ((4593, 4615), 'global_.Spectrum.integrate', 'g.Spectrum.integrate', ([], {}), '()\n', (4613, 4615), True, 'import global_ as g\n'), ((3183, 3217), 'global_.peak_list_x.append', 'g.peak_list_x.append', (['tmp_ir[i, 0]'], {}), '(tmp_ir[i, 0])\n', (3203, 3217), True, 'import global_ as g\n'), ((3241, 3275), 'global_.peak_list_y.append', 'g.peak_list_y.append', (['tmp_ir[i, 1]'], {}), '(tmp_ir[i, 1])\n', (3261, 3275), True, 'import global_ as g\n'), ((6409, 6502), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', "('Select ' + self.entry + ' Spectrum')", '""""""', '""""""'], {}), "(None, 'Select ' + self.entry +\n ' Spectrum', '', '')\n", (6446, 6502), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6575, 6608), 'global_.Spectrum.Boltzmann_weight_VCD', 'g.Spectrum.Boltzmann_weight_VCD', ([], {}), '()\n', (6606, 6608), True, 'import global_ as g\n'), ((10860, 10888), 'numpy.abs', 'np.abs', (['(tmp_vcd[:, 0] - peak)'], {}), '(tmp_vcd[:, 0] - peak)\n', (10866, 10888), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import altair as alt
def _outliers(data):
bottom, middle, top = np.percentile(data, [25, 50, 75])
iqr = top - bottom
top_whisker = min(top + 1.5*iqr, data.max())
bottom_whisker = max(bottom - 1.5*iqr, data.min())
outliers = data[(data > top_whisker) | (data < bottom_whisker)]
return outliers
def _box_and_whisker(data):
middle = data.median()
bottom = data.quantile(0.25)
top = data.quantile(0.75)
iqr = top - bottom
top_whisker = min(top + 1.5*iqr, data.max())
bottom_whisker = max(bottom - 1.5*iqr, data.min())
return pd.Series({'middle': middle,
'bottom': bottom,
'top': top,
'top_whisker': top_whisker,
'bottom_whisker': bottom_whisker})
def _jitter(x, jitter_width=0.2):
"""Make x-coordinates for a jitter plot."""
return (pd.Categorical(x).codes
+ np.random.uniform(low=-jitter_width,
high=jitter_width,
size=len(x)))
def ecdf_vals(data, formal=False, x_min=None, x_max=None):
"""Get x, y, values of an ECDF for plotting.
Parameters
----------
data : ndarray
One dimensional Numpay array with data.
formal : bool, default False
If True, generate x and y values for formal ECDF (staircase). If
False, generate x and y values for ECDF as dots.
x_min : float, 'infer', or None
Minimum value of x to plot. If 'infer', use a 5% buffer. Ignored
if `formal` is False.
x_max : float, 'infer', or None
Maximum value of x to plot. If 'infer', use a 5% buffer. Ignored
if `formal` is False.
Returns
-------
x : ndarray
x-values for plot
y : ndarray
y-values for plot
"""
x = np.sort(data)
y = np.arange(1, len(data)+1) / len(data)
if formal:
# Set up output arrays
x_formal = np.empty(2*(len(x) + 1))
y_formal = np.empty(2*(len(x) + 1))
# y-values for steps
y_formal[:2] = 0
y_formal[2::2] = y
y_formal[3::2] = y
# x- values for steps
x_formal[0] = x[0]
x_formal[1] = x[0]
x_formal[2::2] = x
x_formal[3:-1:2] = x[1:]
x_formal[-1] = x[-1]
# Put lines at y=0
if x_min is not None:
if x_min == 'infer':
x_min = x.min() - (x.max() - x.min())*0.05
elif x_min > x.min():
raise RuntimeError('x_min > x.min().')
x_formal = np.concatenate(((x_min,), x_formal))
y_formal = np.concatenate(((0,), y_formal))
# Put lines at y=y.max()
if x_max is not None:
if x_max == 'infer':
x_max = x.max() + (x.max() - x.min())*0.05
elif x_max < x.max():
raise RuntimeError('x_max < x.max().')
x_formal = np.concatenate((x_formal, (x_max,)))
y_formal = np.concatenate((y_formal, (y.max(),)))
return x_formal, y_formal
else:
return x, y
def ecdf_y(data):
"""Give y-values of an ECDF for an unsorted column in a data frame.
Parameters
----------
data : Pandas Series
Series (or column of a DataFrame) from which to generate ECDF
values
Returns
-------
output : Pandas Series
Corresponding y-values for an ECDF when plotted with dots.
Notes
-----
.. This only works for plotting an ECDF with points, not for formal
ECDFs
"""
return data.rank(method='first') / len(data)
def ecdf_dataframe(data=None, x=None, color=None, formal=False):
"""Generate a DataFrame that can be used for plotting ECDFs.
Parameters
----------
data : Pandas DataFrame
A tidy data frame.
x : valid column name of Pandas DataFrame
Column of data frame containing values to use in ECDF plot.
color : valid column name of Pandas DataFrame or list of column
names
Column(s) of DataFrame to use for grouping the data. A unique
set of ECDF values is made for each. If None, no groupby
operations are performed and a single ECDF is generated.
formal : bool, default False
If True, generate x and y values for formal ECDF (staircase). If
False, generate x and y values for ECDF as dots.
Returns
-------
output : Pandas DataFrame
Pandas DataFrame with two or three columns.
x : Column named for inputted `x`, data values.
'ECDF': Values for y-values for plotting the ECDF
color : Keys for groups. Omitted if `color` is None.
"""
if data is None:
raise RuntimeError('`data` must be specified.')
if x is None:
raise RuntimeError('`x` must be specified.')
# Determine ranges of plots
if formal:
data_min = data[x].min()
data_max = data[x].max()
x_min = data_min - (data_max - data_min) * 0.05
x_max = data_max + (data_max - data_min) * 0.05
else:
x_min = None
x_max = None
if color is None:
x_ecdf, y_ecdf = ecdf_vals(data[x].values,
formal=formal,
x_min=x_min,
x_max=x_max)
return pd.DataFrame({x: x_ecdf, 'ECDF': y_ecdf})
else:
grouped = data.groupby(color)
df_list = []
for g in grouped:
if type(g[0]) == tuple:
cat = ', '.join([str(c) for c in g[0]])
else:
cat = g[0]
x_ecdf, y_ecdf = ecdf_vals(g[1][x],
formal=formal,
x_min=x_min,
x_max=x_max)
df_list.append(pd.DataFrame(data={color: [cat]*len(x_ecdf),
x: x_ecdf,
'ECDF': y_ecdf}))
return pd.concat(df_list, ignore_index=True)
def altair_jitter(data=None, encode_x=None, encode_y=None,
encode_tooltip=alt.Tooltip(),
height=alt.utils.schemapi.Undefined,
width=alt.utils.schemapi.Undefined, jitter_width=0.2):
"""Generate a jitter plot with Altair.
Parameters
----------
data : Pandas DataFrame
A tidy data frame.
encode_x : str or altair.X instance
Vega-Lite specification of x-values.
encode_y : str or altair.Y instance
Vega-Lite specification of y-values.
encode_tooltip : list or altair.Tooltip instance
Specification for tooltips.
height : float or Undefined (default)
Height of the chart, in pixels.
width : float or Undefined (default)
Width of the chart, in pixels.
jitter_width : float
Maximum jitter distance; must be between 0 and 0.5 to avoid
clashes.
Returns
-------
output : Chart
Altair Chart instance.
"""
if data is None:
raise RuntimeError('`data` must be specified.')
if encode_x is None:
raise RuntimeError('`encode_x` must be specified.')
if encode_y is None:
raise RuntimeError('`encode_y` must be specified.')
if not (0 <= jitter_width <= 0.5):
raise RuntimeError('Must have `jitter_width` between 0 and 0.5.')
# Make Altair instances
if isinstance(encode_x, alt.X):
x = encode_x
else:
x = alt.X(encode_x)
if isinstance(encode_y, alt.Y):
y = encode_y
else:
y = alt.Y(encode_y)
# Get column names
if len(x.shorthand) > 1 and x.shorthand[-2] == ':':
x_name = x.shorthand[:-2]
else:
x_name = x.shorthand
if len(y.shorthand) > 1 and y.shorthand[-2] == ':':
y_name = y.shorthand[:-2]
else:
y_name = y.shorthand
# Determine types
var_types = [None, None]
for i, var in enumerate([x, y]):
if not isinstance(var.type, alt.utils.schemapi.UndefinedType):
var_types[i] = var.type[0].upper()
elif len(var.shorthand) > 1 and var.shorthand[-2] == ':':
var_types[i] = var.shorthand[-1]
else:
raise RuntimeError(
f'Data type of `encode_{var}` must be specified.')
# Make sure data types are given and ok
if var_types[0] not in 'NO' and var_types[1] not in 'NO':
raise RuntimeError('Either `x` or `y` must be nominal or ordinal.')
if var_types == ['N, N']:
raise RuntimeError('Cannot have both `x` and `y` be nominal.')
# Decide if it's a horizontal plot or not
if var_types[0] in 'NO':
horizontal = False
cats = x_name
val = y_name
if isinstance(y.title, alt.utils.schemapi.UndefinedType):
y.title = y_name
else:
horizontal = True
cats = y_name
val = x_name
if isinstance(x.title, alt.utils.schemapi.UndefinedType):
x.title = x_name
# Copy DataFrame so we don't overwrite anything
df = data.copy()
# Set up groupby object
n_cats = len(df[cats].unique())
nominal_axis_vals = list(range(n_cats))
# Make coordinates for plotting
df['__jitter'] = _jitter(df[cats], jitter_width)
if horizontal:
chart = alt.Chart(
data=df,
width=width,
height=height
).mark_point(
).encode(
y=alt.Y(
'__jitter:Q',
axis=alt.Axis(
title=None,
values=nominal_axis_vals,
labels=False,
grid=False,
ticks=False)),
x=x,
color=alt.Color(f'{cats}:N', title=y.title),
tooltip=encode_tooltip)
else:
chart = alt.Chart(
data=df,
width=width,
height=height
).mark_point(
).encode(
x=alt.X(
'__jitter:Q',
axis=alt.Axis(
title=None,
values=nominal_axis_vals,
labels=False,
grid=False,
ticks=False)),
y=y,
color=alt.Color(f'{cats}:N', title=x.title),
tooltip=encode_tooltip)
return chart
def altair_box(data=None, encode_x=None, encode_y=None,
encode_color=alt.Color(), height=None, width=None):
"""Generate a box plot with Altair.
Parameters
----------
data : Pandas DataFrame
A tidy data frame.
encode_x : str or altair.X instance
Specification of x-values.
encode_y : str or altair.Y instance
Specification of y-values.
encode_color : str or Color instance or None or Undefined (default)
Specification of coloring of box plot. If Undefined (Default),
all boxes are colored with Altair defaults. If None, the boxes
are colored according to the categorical variable.
height : float or None (default)
Height of the chart, in pixels. If None, inferred.
width : float or None (default)
Width of the chart, in pixels. If None, inferred.
Returns
-------
output : Chart
Altair Chart instance.
"""
# Make Altair instances
if isinstance(encode_x, alt.X):
x = encode_x
else:
x = alt.X(encode_x)
if isinstance(encode_y, alt.Y):
y = encode_y
else:
y = alt.Y(encode_y)
# Get column names
if len(x.shorthand) > 1 and x.shorthand[-2] == ':':
x_name = x.shorthand[:-2]
else:
x_name = x.shorthand
if len(y.shorthand) > 1 and y.shorthand[-2] == ':':
y_name = y.shorthand[:-2]
else:
y_name = y.shorthand
# Get axis titles
if isinstance(x.title, alt.utils.schemapi.UndefinedType):
x_title = x_name
else:
x_title = x.title
if isinstance(y.title, alt.utils.schemapi.UndefinedType):
y_title = y_name
else:
y_title = y.title
# Determine types
var_types = [None, None]
for i, var in enumerate([x, y]):
if not isinstance(var.type, alt.utils.schemapi.UndefinedType):
var_types[i] = var.type[0].upper()
elif len(var.shorthand) > 1 and var.shorthand[-2] == ':':
var_types[i] = var.shorthand[-1]
else:
raise RuntimeError(
f'Data type of `encode_{var}` must be specified.')
# Make sure data types are given and ok
if var_types[0] not in 'NO' and var_types[1] not in 'NO':
raise RuntimeError('Either `x` or `y` must be nominal or ordinal.')
if var_types == ['N, N']:
raise RuntimeError('Cannot have both `x` and `y` be nominal.')
# Decide if it's a horizontal plot or not
if var_types[0] in 'NO':
horizontal = False
cats = x_name
val = y_name
if encode_color is None:
encode_color = alt.Color(f'{cats}:N', title=x.title)
else:
horizontal = True
cats = y_name
val = x_name
if encode_color is None:
encode_color = alt.Color(f'{cats}:N', title=y.title)
# Set up groupby object
grouped = data.groupby(cats)
n_boxes = len(grouped)
# Set default heights and widths, also of bars
if width is None:
if horizontal:
width = 400
else:
width = 200
if height is None:
if horizontal:
height = 200
else:
height = 300
if horizontal:
size = height*0.9 / n_boxes
else:
size = width*0.9 / n_boxes
# Data frame for boxes and whiskers
df_box = (grouped[val].apply(_box_and_whisker)
.reset_index()
.rename(columns={'level_1': 'box_val'})
.pivot(index=cats, columns='box_val'))
df_box.columns = df_box.columns.get_level_values(1)
df_box = df_box.reset_index()
# Data frame for outliers
df_outlier = grouped[val].apply(_outliers).reset_index(level=0)
if horizontal:
chart_box = alt.Chart(
data=df_box,
width=width,
height=height
).mark_bar(
size=size
).encode(
y=alt.Y(f'{cats}:N', title=y_title),
x=alt.X('bottom:Q', title=x_title),
x2=alt.X2('top:Q', title=x_title),
color=encode_color)
chart_median = alt.Chart(
data=df_box,
width=width,
height=height
).mark_tick(
size=size,
color='white'
).encode(
y=alt.Y(f'{cats}:N', title=y_title),
x=alt.X('middle:Q', title=x_title))
chart_whisker = alt.Chart(
data=df_box,
width=width,
height=height
).mark_rule(
).encode(
y=alt.Y(f'{cats}:N', title=y_title),
x=alt.X('bottom_whisker:Q', title=x_title),
x2=alt.X2('top_whisker:Q', title=x_title))
chart_outliers = alt.Chart(
data=df_outlier,
width=width,
height=height
).mark_point(
).encode(
y=alt.Y(f'{cats}:N', title=y_title),
x=alt.X(f'{val}:Q', title=x_title),
color=encode_color)
else:
chart_box = alt.Chart(
data=df_box,
width=width,
height=height
).mark_bar(
size=size
).encode(
x=alt.X(f'{cats}:N', title=x_title),
y=alt.Y('bottom:Q', title=y_title),
y2=alt.Y2('top:Q', title=y_title),
color=encode_color)
chart_median = alt.Chart(
data=df_box,
width=width,
height=height
).mark_tick(
size=size,
color='white'
).encode(
x=alt.X(f'{cats}:N', title=x_title),
y=alt.Y('middle:Q', title=y_title))
chart_whisker = alt.Chart(
data=df_box,
width=width,
height=height
).mark_rule(
).encode(
x=alt.X(f'{cats}:N', title=x_title),
y=alt.Y('bottom_whisker:Q', title=y_title),
y2=alt.Y2('top_whisker:Q', title=y_title))
chart_outliers = alt.Chart(
data=df_outlier,
width=width,
height=height
).mark_point(
).encode(
x=alt.X(f'{cats}:N', title=x_title),
y=alt.Y(f'{val}:Q', title=y_title),
color=encode_color)
return chart_whisker + chart_box + chart_median + chart_outliers | [
"pandas.DataFrame",
"altair.Y",
"altair.X2",
"altair.Chart",
"numpy.percentile",
"numpy.sort",
"altair.X",
"altair.Axis",
"pandas.Series",
"pandas.Categorical",
"altair.Tooltip",
"altair.Y2",
"pandas.concat",
"numpy.concatenate",
"altair.Color"
] | [((108, 141), 'numpy.percentile', 'np.percentile', (['data', '[25, 50, 75]'], {}), '(data, [25, 50, 75])\n', (121, 141), True, 'import numpy as np\n'), ((618, 743), 'pandas.Series', 'pd.Series', (["{'middle': middle, 'bottom': bottom, 'top': top, 'top_whisker': top_whisker,\n 'bottom_whisker': bottom_whisker}"], {}), "({'middle': middle, 'bottom': bottom, 'top': top, 'top_whisker':\n top_whisker, 'bottom_whisker': bottom_whisker})\n", (627, 743), True, 'import pandas as pd\n'), ((1874, 1887), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (1881, 1887), True, 'import numpy as np\n'), ((6241, 6254), 'altair.Tooltip', 'alt.Tooltip', ([], {}), '()\n', (6252, 6254), True, 'import altair as alt\n'), ((10708, 10719), 'altair.Color', 'alt.Color', ([], {}), '()\n', (10717, 10719), True, 'import altair as alt\n'), ((5415, 5456), 'pandas.DataFrame', 'pd.DataFrame', (["{x: x_ecdf, 'ECDF': y_ecdf}"], {}), "({x: x_ecdf, 'ECDF': y_ecdf})\n", (5427, 5456), True, 'import pandas as pd\n'), ((6108, 6145), 'pandas.concat', 'pd.concat', (['df_list'], {'ignore_index': '(True)'}), '(df_list, ignore_index=True)\n', (6117, 6145), True, 'import pandas as pd\n'), ((7596, 7611), 'altair.X', 'alt.X', (['encode_x'], {}), '(encode_x)\n', (7601, 7611), True, 'import altair as alt\n'), ((7692, 7707), 'altair.Y', 'alt.Y', (['encode_y'], {}), '(encode_y)\n', (7697, 7707), True, 'import altair as alt\n'), ((11677, 11692), 'altair.X', 'alt.X', (['encode_x'], {}), '(encode_x)\n', (11682, 11692), True, 'import altair as alt\n'), ((11773, 11788), 'altair.Y', 'alt.Y', (['encode_y'], {}), '(encode_y)\n', (11778, 11788), True, 'import altair as alt\n'), ((927, 944), 'pandas.Categorical', 'pd.Categorical', (['x'], {}), '(x)\n', (941, 944), True, 'import pandas as pd\n'), ((2622, 2658), 'numpy.concatenate', 'np.concatenate', (['((x_min,), x_formal)'], {}), '(((x_min,), x_formal))\n', (2636, 2658), True, 'import numpy as np\n'), ((2682, 2714), 'numpy.concatenate', 'np.concatenate', (['((0,), y_formal)'], {}), '(((0,), y_formal))\n', (2696, 2714), True, 'import numpy as np\n'), ((2983, 3019), 'numpy.concatenate', 'np.concatenate', (['(x_formal, (x_max,))'], {}), '((x_formal, (x_max,)))\n', (2997, 3019), True, 'import numpy as np\n'), ((13266, 13303), 'altair.Color', 'alt.Color', (['f"""{cats}:N"""'], {'title': 'x.title'}), "(f'{cats}:N', title=x.title)\n", (13275, 13303), True, 'import altair as alt\n'), ((13443, 13480), 'altair.Color', 'alt.Color', (['f"""{cats}:N"""'], {'title': 'y.title'}), "(f'{cats}:N', title=y.title)\n", (13452, 13480), True, 'import altair as alt\n'), ((9928, 9965), 'altair.Color', 'alt.Color', (['f"""{cats}:N"""'], {'title': 'y.title'}), "(f'{cats}:N', title=y.title)\n", (9937, 9965), True, 'import altair as alt\n'), ((10525, 10562), 'altair.Color', 'alt.Color', (['f"""{cats}:N"""'], {'title': 'x.title'}), "(f'{cats}:N', title=x.title)\n", (10534, 10562), True, 'import altair as alt\n'), ((14648, 14681), 'altair.Y', 'alt.Y', (['f"""{cats}:N"""'], {'title': 'y_title'}), "(f'{cats}:N', title=y_title)\n", (14653, 14681), True, 'import altair as alt\n'), ((14701, 14733), 'altair.X', 'alt.X', (['"""bottom:Q"""'], {'title': 'x_title'}), "('bottom:Q', title=x_title)\n", (14706, 14733), True, 'import altair as alt\n'), ((14754, 14784), 'altair.X2', 'alt.X2', (['"""top:Q"""'], {'title': 'x_title'}), "('top:Q', title=x_title)\n", (14760, 14784), True, 'import altair as alt\n'), ((15081, 15114), 'altair.Y', 'alt.Y', (['f"""{cats}:N"""'], {'title': 'y_title'}), "(f'{cats}:N', title=y_title)\n", (15086, 15114), True, 'import altair as alt\n'), ((15135, 15167), 'altair.X', 'alt.X', (['"""middle:Q"""'], {'title': 'x_title'}), "('middle:Q', title=x_title)\n", (15140, 15167), True, 'import altair as alt\n'), ((15372, 15405), 'altair.Y', 'alt.Y', (['f"""{cats}:N"""'], {'title': 'y_title'}), "(f'{cats}:N', title=y_title)\n", (15377, 15405), True, 'import altair as alt\n'), ((15425, 15465), 'altair.X', 'alt.X', (['"""bottom_whisker:Q"""'], {'title': 'x_title'}), "('bottom_whisker:Q', title=x_title)\n", (15430, 15465), True, 'import altair as alt\n'), ((15486, 15524), 'altair.X2', 'alt.X2', (['"""top_whisker:Q"""'], {'title': 'x_title'}), "('top_whisker:Q', title=x_title)\n", (15492, 15524), True, 'import altair as alt\n'), ((15735, 15768), 'altair.Y', 'alt.Y', (['f"""{cats}:N"""'], {'title': 'y_title'}), "(f'{cats}:N', title=y_title)\n", (15740, 15768), True, 'import altair as alt\n'), ((15788, 15820), 'altair.X', 'alt.X', (['f"""{val}:Q"""'], {'title': 'x_title'}), "(f'{val}:Q', title=x_title)\n", (15793, 15820), True, 'import altair as alt\n'), ((16084, 16117), 'altair.X', 'alt.X', (['f"""{cats}:N"""'], {'title': 'x_title'}), "(f'{cats}:N', title=x_title)\n", (16089, 16117), True, 'import altair as alt\n'), ((16137, 16169), 'altair.Y', 'alt.Y', (['"""bottom:Q"""'], {'title': 'y_title'}), "('bottom:Q', title=y_title)\n", (16142, 16169), True, 'import altair as alt\n'), ((16190, 16220), 'altair.Y2', 'alt.Y2', (['"""top:Q"""'], {'title': 'y_title'}), "('top:Q', title=y_title)\n", (16196, 16220), True, 'import altair as alt\n'), ((16510, 16543), 'altair.X', 'alt.X', (['f"""{cats}:N"""'], {'title': 'x_title'}), "(f'{cats}:N', title=x_title)\n", (16515, 16543), True, 'import altair as alt\n'), ((16563, 16595), 'altair.Y', 'alt.Y', (['"""middle:Q"""'], {'title': 'y_title'}), "('middle:Q', title=y_title)\n", (16568, 16595), True, 'import altair as alt\n'), ((16793, 16826), 'altair.X', 'alt.X', (['f"""{cats}:N"""'], {'title': 'x_title'}), "(f'{cats}:N', title=x_title)\n", (16798, 16826), True, 'import altair as alt\n'), ((16846, 16886), 'altair.Y', 'alt.Y', (['"""bottom_whisker:Q"""'], {'title': 'y_title'}), "('bottom_whisker:Q', title=y_title)\n", (16851, 16886), True, 'import altair as alt\n'), ((16907, 16945), 'altair.Y2', 'alt.Y2', (['"""top_whisker:Q"""'], {'title': 'y_title'}), "('top_whisker:Q', title=y_title)\n", (16913, 16945), True, 'import altair as alt\n'), ((17149, 17182), 'altair.X', 'alt.X', (['f"""{cats}:N"""'], {'title': 'x_title'}), "(f'{cats}:N', title=x_title)\n", (17154, 17182), True, 'import altair as alt\n'), ((17202, 17234), 'altair.Y', 'alt.Y', (['f"""{val}:Q"""'], {'title': 'y_title'}), "(f'{val}:Q', title=y_title)\n", (17207, 17234), True, 'import altair as alt\n'), ((9435, 9481), 'altair.Chart', 'alt.Chart', ([], {'data': 'df', 'width': 'width', 'height': 'height'}), '(data=df, width=width, height=height)\n', (9444, 9481), True, 'import altair as alt\n'), ((9676, 9765), 'altair.Axis', 'alt.Axis', ([], {'title': 'None', 'values': 'nominal_axis_vals', 'labels': '(False)', 'grid': '(False)', 'ticks': '(False)'}), '(title=None, values=nominal_axis_vals, labels=False, grid=False,\n ticks=False)\n', (9684, 9765), True, 'import altair as alt\n'), ((10033, 10079), 'altair.Chart', 'alt.Chart', ([], {'data': 'df', 'width': 'width', 'height': 'height'}), '(data=df, width=width, height=height)\n', (10042, 10079), True, 'import altair as alt\n'), ((10273, 10362), 'altair.Axis', 'alt.Axis', ([], {'title': 'None', 'values': 'nominal_axis_vals', 'labels': '(False)', 'grid': '(False)', 'ticks': '(False)'}), '(title=None, values=nominal_axis_vals, labels=False, grid=False,\n ticks=False)\n', (10281, 10362), True, 'import altair as alt\n'), ((14445, 14495), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_box', 'width': 'width', 'height': 'height'}), '(data=df_box, width=width, height=height)\n', (14454, 14495), True, 'import altair as alt\n'), ((14846, 14896), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_box', 'width': 'width', 'height': 'height'}), '(data=df_box, width=width, height=height)\n', (14855, 14896), True, 'import altair as alt\n'), ((15194, 15244), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_box', 'width': 'width', 'height': 'height'}), '(data=df_box, width=width, height=height)\n', (15203, 15244), True, 'import altair as alt\n'), ((15552, 15606), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_outlier', 'width': 'width', 'height': 'height'}), '(data=df_outlier, width=width, height=height)\n', (15561, 15606), True, 'import altair as alt\n'), ((15888, 15938), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_box', 'width': 'width', 'height': 'height'}), '(data=df_box, width=width, height=height)\n', (15897, 15938), True, 'import altair as alt\n'), ((16282, 16332), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_box', 'width': 'width', 'height': 'height'}), '(data=df_box, width=width, height=height)\n', (16291, 16332), True, 'import altair as alt\n'), ((16622, 16672), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_box', 'width': 'width', 'height': 'height'}), '(data=df_box, width=width, height=height)\n', (16631, 16672), True, 'import altair as alt\n'), ((16973, 17027), 'altair.Chart', 'alt.Chart', ([], {'data': 'df_outlier', 'width': 'width', 'height': 'height'}), '(data=df_outlier, width=width, height=height)\n', (16982, 17027), True, 'import altair as alt\n')] |
from abc import ABCMeta, abstractmethod
from abcpy.graphtools import GraphTools
import numpy as np
from sklearn.covariance import ledoit_wolf
from glmnet import LogitNet
class Approx_likelihood(metaclass = ABCMeta):
"""This abstract base class defines the approximate likelihood
function.
"""
@abstractmethod
def __init__(self, statistics_calc):
"""
The constructor of a sub-class must accept a non-optional statistics
calculator, which is stored to self.statistics_calc.
Parameters
----------
statistics_calc : abcpy.stasistics.Statistics
Statistics extractor object that conforms to the Statistics class.
"""
raise NotImplemented
@abstractmethod
def likelihood(y_obs, y_sim):
"""To be overwritten by any sub-class: should compute the approximate likelihood
value given the observed data set y_obs and the data set y_sim simulated from
model set at the parameter value.
Parameters
----------
y_obs: Python list
Observed data set.
y_sim: Python list
Simulated data set from model at the parameter value.
Returns
-------
float
Computed approximate likelihood.
"""
raise NotImplemented
class SynLiklihood(Approx_likelihood):
"""This class implements the approximate likelihood function which computes the approximate
likelihood using the synthetic likelihood approach described in Wood [1].
For synthetic likelihood approximation, we compute the robust precision matrix using Ledoit and Wolf's [2]
method.
[1] <NAME>. Statistical inference for noisy nonlinear ecological
dynamic systems. Nature, 466(7310):1102–1104, Aug. 2010.
[2] <NAME> and <NAME>, A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices,
Journal of Multivariate Analysis, Volume 88, Issue 2, pages 365-411, February 2004.
"""
def __init__(self, statistics_calc):
self.stat_obs = None
self.data_set=None
self.statistics_calc = statistics_calc
def likelihood(self, y_obs, y_sim):
# print("DEBUG: SynLiklihood.likelihood().")
if not isinstance(y_obs, list):
raise TypeError('Observed data is not of allowed types')
if not isinstance(y_sim, list):
raise TypeError('simulated data is not of allowed types')
# Extract summary statistics from the observed data
if(self.stat_obs is None or y_obs!=self.data_set):
self.stat_obs = self.statistics_calc.statistics(y_obs)
self.data_set=y_obs
# Extract summary statistics from the simulated data
stat_sim = self.statistics_calc.statistics(y_sim)
# Compute the mean, robust precision matrix and determinant of precision matrix
# print("DEBUG: meansim computation.")
mean_sim = np.mean(stat_sim,0)
# print("DEBUG: robust_precision_sim computation.")
lw_cov_, _ = ledoit_wolf(stat_sim)
robust_precision_sim = np.linalg.inv(lw_cov_)
# print("DEBUG: robust_precision_sim_det computation..")
robust_precision_sim_det = np.linalg.det(robust_precision_sim)
# print("DEBUG: combining.")
result = pow(np.sqrt((1/(2*np.pi))*robust_precision_sim_det),self.stat_obs.shape[0])\
*np.exp(np.sum(-0.5*np.sum(np.array(self.stat_obs-mean_sim)* \
np.array(np.matrix(robust_precision_sim)*np.matrix(self.stat_obs-mean_sim).T).T, axis = 1)))
return result
class PenLogReg(Approx_likelihood, GraphTools):
"""This class implements the approximate likelihood function which computes the approximate
likelihood up to a constant using penalized logistic regression described in
Dutta et. al. [1]. It takes one additional function handler defining the
true model and two additional parameters n_folds and n_simulate correspondingly defining number
of folds used to estimate prediction error using cross-validation and the number
of simulated dataset sampled from each parameter to approximate the likelihood
function. For lasso penalized logistic regression we use glmnet of Friedman et.
al. [2].
[1] Reference: <NAME>, <NAME>, <NAME>, and <NAME>. Likelihood-free
inference by penalised logistic regression. arXiv:1611.10242, Nov. 2016.
[2] <NAME>., <NAME>., and <NAME>. (2010). Regularization
paths for generalized linear models via coordinate descent. Journal of Statistical
Software, 33(1), 1–22.
Parameters
----------
statistics_calc : abcpy.stasistics.Statistics
Statistics extractor object that conforms to the Statistics class.
model : abcpy.models.Model
Model object that conforms to the Model class.
n_simulate : int
Number of data points in the simulated data set.
n_folds: int, optional
Number of folds for cross-validation. The default value is 10.
max_iter: int, optional
Maximum passes over the data. The default is 100000.
seed: int, optional
Seed for the random number generator. The used glmnet solver is not
deterministic, this seed is used for determining the cv folds. The default value is
None.
"""
def __init__(self, statistics_calc, model, n_simulate, n_folds=10, max_iter = 100000, seed = None):
self.model = model
self.statistics_calc = statistics_calc
self.n_folds = n_folds
self.n_simulate = n_simulate
self.seed = seed
self.max_iter = max_iter
# Simulate reference data and extract summary statistics from the reference data
self.ref_data_stat = self._simulate_ref_data()[0]
self.stat_obs = None
self.data_set = None
def likelihood(self, y_obs, y_sim):
if not isinstance(y_obs, list):
raise TypeError('Observed data is not of allowed types')
if not isinstance(y_sim, list):
raise TypeError('simulated data is not of allowed types')
# Extract summary statistics from the observed data
if(self.stat_obs is None or self.data_set!=y_obs):
self.stat_obs = self.statistics_calc.statistics(y_obs)
self.data_set=y_obs
# Extract summary statistics from the simulated data
stat_sim = self.statistics_calc.statistics(y_sim)
# Compute the approximate likelihood for the y_obs given theta
y = np.append(np.zeros(self.n_simulate),np.ones(self.n_simulate))
X = np.array(np.concatenate((stat_sim,self.ref_data_stat),axis=0))
m = LogitNet(alpha = 1, n_splits = self.n_folds, max_iter = self.max_iter, random_state= self.seed)
m = m.fit(X, y)
result = np.exp(-np.sum((m.intercept_+np.sum(np.multiply(m.coef_,self.stat_obs),axis=1)),axis=0))
return result
def _simulate_ref_data(self, rng=np.random.RandomState()):
"""
Simulate the reference data set. This code is run at the initialization of
Penlogreg
"""
ref_data_stat = [[None]*self.n_simulate for i in range(len(self.model))]
self.sample_from_prior(rng=rng)
for model_index, model in enumerate(self.model):
ind=0
while(ref_data_stat[model_index][-1] is None):
data = model.forward_simulate(model.get_input_values(), 1, rng=rng)
data_stat = self.statistics_calc.statistics(data[0].tolist())
ref_data_stat[model_index][ind]= data_stat
ind+=1
ref_data_stat[model_index] = np.squeeze(np.asarray(ref_data_stat[model_index]))
return ref_data_stat
| [
"numpy.matrix",
"numpy.multiply",
"sklearn.covariance.ledoit_wolf",
"glmnet.LogitNet",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.random.RandomState",
"numpy.mean",
"numpy.linalg.inv",
"numpy.array",
"numpy.linalg.det",
"numpy.concatenate",
"numpy.sqrt"
] | [((2996, 3016), 'numpy.mean', 'np.mean', (['stat_sim', '(0)'], {}), '(stat_sim, 0)\n', (3003, 3016), True, 'import numpy as np\n'), ((3097, 3118), 'sklearn.covariance.ledoit_wolf', 'ledoit_wolf', (['stat_sim'], {}), '(stat_sim)\n', (3108, 3118), False, 'from sklearn.covariance import ledoit_wolf\n'), ((3150, 3172), 'numpy.linalg.inv', 'np.linalg.inv', (['lw_cov_'], {}), '(lw_cov_)\n', (3163, 3172), True, 'import numpy as np\n'), ((3273, 3308), 'numpy.linalg.det', 'np.linalg.det', (['robust_precision_sim'], {}), '(robust_precision_sim)\n', (3286, 3308), True, 'import numpy as np\n'), ((6777, 6869), 'glmnet.LogitNet', 'LogitNet', ([], {'alpha': '(1)', 'n_splits': 'self.n_folds', 'max_iter': 'self.max_iter', 'random_state': 'self.seed'}), '(alpha=1, n_splits=self.n_folds, max_iter=self.max_iter,\n random_state=self.seed)\n', (6785, 6869), False, 'from glmnet import LogitNet\n'), ((7073, 7096), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (7094, 7096), True, 'import numpy as np\n'), ((6638, 6663), 'numpy.zeros', 'np.zeros', (['self.n_simulate'], {}), '(self.n_simulate)\n', (6646, 6663), True, 'import numpy as np\n'), ((6664, 6688), 'numpy.ones', 'np.ones', (['self.n_simulate'], {}), '(self.n_simulate)\n', (6671, 6688), True, 'import numpy as np\n'), ((6711, 6765), 'numpy.concatenate', 'np.concatenate', (['(stat_sim, self.ref_data_stat)'], {'axis': '(0)'}), '((stat_sim, self.ref_data_stat), axis=0)\n', (6725, 6765), True, 'import numpy as np\n'), ((3367, 3418), 'numpy.sqrt', 'np.sqrt', (['(1 / (2 * np.pi) * robust_precision_sim_det)'], {}), '(1 / (2 * np.pi) * robust_precision_sim_det)\n', (3374, 3418), True, 'import numpy as np\n'), ((7776, 7814), 'numpy.asarray', 'np.asarray', (['ref_data_stat[model_index]'], {}), '(ref_data_stat[model_index])\n', (7786, 7814), True, 'import numpy as np\n'), ((6950, 6985), 'numpy.multiply', 'np.multiply', (['m.coef_', 'self.stat_obs'], {}), '(m.coef_, self.stat_obs)\n', (6961, 6985), True, 'import numpy as np\n'), ((3475, 3509), 'numpy.array', 'np.array', (['(self.stat_obs - mean_sim)'], {}), '(self.stat_obs - mean_sim)\n', (3483, 3509), True, 'import numpy as np\n'), ((3528, 3559), 'numpy.matrix', 'np.matrix', (['robust_precision_sim'], {}), '(robust_precision_sim)\n', (3537, 3559), True, 'import numpy as np\n'), ((3560, 3595), 'numpy.matrix', 'np.matrix', (['(self.stat_obs - mean_sim)'], {}), '(self.stat_obs - mean_sim)\n', (3569, 3595), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
def polyvalHelperFunction(x,p):
# The problem with applying "polyval" to data series is that the "x" is the second argument of the function
# instead of being the first. So we use this function to invert the two, waiting to find a better way
output = np.polyval(p,x)
return output
def piecewisePolyvalHelperFunction(x,p):
# The problem with applying "polyval" to data series is that the "x" is the second argument of the function
# instead of being the first. So we use this function to invert the two, waiting to find a better way
output = np.piecewise(x, [x < 0.5 , x >= 0.5], [np.polyval(p[1],x) , np.polyval(p[0],x)])
return output
def coolpropMixtureHelperFunction(composition):
mixture = "HEOS::" + "N2[" + str(composition["N2"]) + "]&" + "O2[" + str(composition["O2"]) + "]&" + "H2O[" + str(composition["H2O"]) + "]&" + "CO2[" + str(composition) + "]"
return mixture
def d2df(system,unit,flow,property):
header_name = system + ":" + unit + ":" + flow + ":" + property
return header_name | [
"numpy.polyval"
] | [((303, 319), 'numpy.polyval', 'np.polyval', (['p', 'x'], {}), '(p, x)\n', (313, 319), True, 'import numpy as np\n'), ((649, 668), 'numpy.polyval', 'np.polyval', (['p[1]', 'x'], {}), '(p[1], x)\n', (659, 668), True, 'import numpy as np\n'), ((670, 689), 'numpy.polyval', 'np.polyval', (['p[0]', 'x'], {}), '(p[0], x)\n', (680, 689), True, 'import numpy as np\n')] |
"""
probe.py
"""
import numpy as np
from scipy.io import FortranFile
class Probe():
def __init__(self, **kwargs):
self.dtype = np.float64
for arg, val in kwargs.items():
if arg == "file":
self.file = val
elif arg == "variables":
self.variables = val
elif arg == "dtype":
if val == "single":
self.dtype = np.float32
else:
self.dtype = np.float64
elif arg == "freq":
self.freq = val
def read(self, nx, ny, nz):
with open(self.file, "rb") as bindat:
fldat = np.fromfile(bindat, self.dtype)
nfields = len(self.variables)
if self.freq[0]:
nx = nx // self.freq[0] + 1
else:
nx = 1
if self.freq[1]:
ny = ny // self.freq[1] + 1
else:
ny = 1
if self.freq[2]:
nz = nz // self.freq[2] + 1
else:
nz = 1
ntime = len(fldat) // (nfields * nx * ny * nz + 1) # Note that Fortran adds a byte for new-line
bindat = FortranFile(self.file, "r")
fldat = []
for t in range(ntime):
fldat.append(bindat.read_reals(dtype=np.float64).reshape(nfields, nx, ny, nz))
bindat.close()
fldict = {}
for field in range(nfields):
fldict[self.variables[field]] = []
for t in range(ntime):
for f in range(nfields):
fldict[self.variables[f]].append(fldat[t][f])
return fldict
| [
"numpy.fromfile",
"scipy.io.FortranFile"
] | [((1171, 1198), 'scipy.io.FortranFile', 'FortranFile', (['self.file', '"""r"""'], {}), "(self.file, 'r')\n", (1182, 1198), False, 'from scipy.io import FortranFile\n'), ((683, 714), 'numpy.fromfile', 'np.fromfile', (['bindat', 'self.dtype'], {}), '(bindat, self.dtype)\n', (694, 714), True, 'import numpy as np\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from mlxtend.evaluate import scoring
import numpy as np
def test_metric_argument():
"Test exception is raised when user provides invalid metric argument"
try:
scoring(y_target=[1], y_predicted=[1], metric='test')
assert False
except AttributeError:
assert True
def test_y_arguments():
"Test exception is raised when user provides invalid vectors"
try:
scoring(y_target=[1, 2], y_predicted=[1])
assert False
except AttributeError:
assert True
def test_accuracy():
"Test accuracy metric"
y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
assert res == 0.75
def test_error():
"Test error metric"
y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
assert res == 0.25
def test_binary():
"Test exception is raised if label is not binary in f1"
try:
y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
assert False
except AttributeError:
assert True
def test_precision():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
assert round(res, 3) == 0.75, res
def test_recall():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
assert round(res, 3) == 0.6, res
def test_truepositiverate():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ,
y_predicted=y_pred,
metric='true_positive_rate')
assert round(res, 3) == 0.6, res
def test_falsepositiverate():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ,
y_predicted=y_pred,
metric='false_positive_rate')
assert round(res, 3) == 0.333, res
def test_specificity():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
assert round(res, 3) == 0.667, res
def test_sensitivity():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
assert round(res, 3) == 0.6, res
def test_f1():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
assert round(res, 3) == 0.667, res
def test_matthews_corr_coef():
y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
res = scoring(y_target=y_targ,
y_predicted=y_pred,
metric='matthews_corr_coef')
assert round(res, 3) == 0.258, res
def test_avg_perclass_accuracy():
y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
res = scoring(y_target=y_targ,
y_predicted=y_pred,
metric='average per-class accuracy')
assert round(res, 3) == 0.667, res
def test_avg_perclass_error():
y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
res = scoring(y_target=y_targ,
y_predicted=y_pred,
metric='average per-class error')
assert round(res, 3) == 0.333, res
| [
"mlxtend.evaluate.scoring",
"numpy.array"
] | [((773, 836), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""accuracy"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='accuracy')\n", (780, 836), False, 'from mlxtend.evaluate import scoring\n'), ((990, 1050), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""error"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='error')\n", (997, 1050), False, 'from mlxtend.evaluate import scoring\n'), ((1492, 1556), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""precision"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='precision')\n", (1499, 1556), False, 'from mlxtend.evaluate import scoring\n'), ((1702, 1763), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""recall"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='recall')\n", (1709, 1763), False, 'from mlxtend.evaluate import scoring\n'), ((1918, 1991), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""true_positive_rate"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='true_positive_rate')\n", (1925, 1991), False, 'from mlxtend.evaluate import scoring\n'), ((2183, 2257), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""false_positive_rate"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='false_positive_rate')\n", (2190, 2257), False, 'from mlxtend.evaluate import scoring\n'), ((2445, 2511), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""specificity"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='specificity')\n", (2452, 2511), False, 'from mlxtend.evaluate import scoring\n'), ((2663, 2729), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""sensitivity"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')\n", (2670, 2729), False, 'from mlxtend.evaluate import scoring\n'), ((2870, 2927), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""f1"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='f1')\n", (2877, 2927), False, 'from mlxtend.evaluate import scoring\n'), ((3086, 3159), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""matthews_corr_coef"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='matthews_corr_coef')\n", (3093, 3159), False, 'from mlxtend.evaluate import scoring\n'), ((3284, 3324), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 1, 1, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])\n', (3292, 3324), True, 'import numpy as np\n'), ((3338, 3378), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 1, 1, 2, 2, 2, 2]'], {}), '([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])\n', (3346, 3378), True, 'import numpy as np\n'), ((3389, 3475), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""average per-class accuracy"""'}), "(y_target=y_targ, y_predicted=y_pred, metric=\n 'average per-class accuracy')\n", (3396, 3475), False, 'from mlxtend.evaluate import scoring\n'), ((3592, 3632), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 1, 1, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])\n', (3600, 3632), True, 'import numpy as np\n'), ((3646, 3686), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 1, 1, 2, 2, 2, 2]'], {}), '([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])\n', (3654, 3686), True, 'import numpy as np\n'), ((3697, 3775), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""average per-class error"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='average per-class error')\n", (3704, 3775), False, 'from mlxtend.evaluate import scoring\n'), ((296, 349), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': '[1]', 'y_predicted': '[1]', 'metric': '"""test"""'}), "(y_target=[1], y_predicted=[1], metric='test')\n", (303, 349), False, 'from mlxtend.evaluate import scoring\n'), ((527, 568), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': '[1, 2]', 'y_predicted': '[1]'}), '(y_target=[1, 2], y_predicted=[1])\n', (534, 568), False, 'from mlxtend.evaluate import scoring\n'), ((1256, 1313), 'mlxtend.evaluate.scoring', 'scoring', ([], {'y_target': 'y_targ', 'y_predicted': 'y_pred', 'metric': '"""f1"""'}), "(y_target=y_targ, y_predicted=y_pred, metric='f1')\n", (1263, 1313), False, 'from mlxtend.evaluate import scoring\n')] |
#!/usr/bin/env python3
import numpy as np
def regularization(theta, lambda_):
"""
Computes the regularization term of a non-empty numpy.ndarray with a for-loop.
Args:
theta: has to be numpy.ndarray, a vector of dimensions n*1.
lambda_: has to be a float.
Returns:
The regularization term of theta.
None if theta is empty.
Raises:
This function should not raise any Exception.
"""
try:
if np.size == 0:
return None
return lambda_ * np.sum(theta ** 2)
except Exception:
return None
if __name__ == "__main__":
X = np.array([0, 15, -9, 7, 12, 3, -21])
print(regularization(X, 0.3))
print(regularization(X, 0.01))
print(regularization(X, 0)) | [
"numpy.array",
"numpy.sum"
] | [((650, 686), 'numpy.array', 'np.array', (['[0, 15, -9, 7, 12, 3, -21]'], {}), '([0, 15, -9, 7, 12, 3, -21])\n', (658, 686), True, 'import numpy as np\n'), ((548, 566), 'numpy.sum', 'np.sum', (['(theta ** 2)'], {}), '(theta ** 2)\n', (554, 566), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import jax
from jax import random
from jaxdl.rl.networks.actor_nets import NormalDistPolicy
from jaxdl.rl.networks.conv_actor_nets import NormalConvDistPolicy
from jaxdl.rl.networks.critic_nets import DoubleCriticNetwork
from jaxdl.rl.networks.conv_critic_nets import DoubleConvCriticNetwork
from jaxdl.rl.networks.temperature_nets import Temperature
class TestNetworks(unittest.TestCase):
def test_actor_net(self):
observations = np.array([[5., 5., 5.]], dtype=np.float32)
rng = random.PRNGKey(0)
actor_net = NormalDistPolicy([24, 24], 2)
actor_params = actor_net.init(
rng, random.uniform(rng, (1, 3)))
out = actor_net.apply(actor_params, observations)
rng, key = jax.random.split(rng)
sample = out.sample(seed=key)
self.assertEqual(sample.shape[1], 2)
def test_conv_actor_net(self):
rng = random.PRNGKey(0)
rng, key = jax.random.split(rng)
N = 256
observations = jax.random.uniform(rng, shape=(1, N, N, 3))
actor_net = NormalConvDistPolicy([24, 24], 2)
actor_params = actor_net.init(
rng, random.uniform(rng, (1, N, N, 3)))
out = actor_net.apply(actor_params, observations)
rng, key = jax.random.split(rng)
sample = out.sample(seed=key)
self.assertEqual(sample.shape[1], 2)
def test_critic_net(self):
observations = np.array([[5., 5., 5.]], dtype=np.float32)
actions = np.array([[5., 5., 5.]], dtype=np.float32)
rng = random.PRNGKey(0)
critic_net = DoubleCriticNetwork([24, 24])
actor_params = critic_net.init(
rng, random.uniform(rng, (1, 3)), random.uniform(rng, (1, 3)))
out = critic_net.apply(actor_params, observations, actions)
self.assertEqual(len(out), 2)
def test_conv_critic_net(self):
rng = random.PRNGKey(0)
rng, key = jax.random.split(rng)
N = 256
observations = jax.random.uniform(key, shape=(1, N, N, 3))
actions = jax.random.uniform(key, shape=(1, 1))
critic_net = DoubleConvCriticNetwork([24, 24])
actor_params = critic_net.init(
rng, random.uniform(rng, (1, N, N, 3)), random.uniform(rng, (1, 1)))
out = critic_net.apply(actor_params, observations, actions)
self.assertEqual(len(out), 2)
def test_temperature_net(self):
rng = random.PRNGKey(0)
temperature_net = Temperature()
temperature_params = temperature_net.init(rng)
out = temperature_net.apply(temperature_params)
self.assertEqual(out, 1)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"jax.random.uniform",
"jaxdl.rl.networks.conv_actor_nets.NormalConvDistPolicy",
"jaxdl.rl.networks.temperature_nets.Temperature",
"jaxdl.rl.networks.critic_nets.DoubleCriticNetwork",
"jaxdl.rl.networks.actor_nets.NormalDistPolicy",
"jaxdl.rl.networks.conv_critic_nets.DoubleConvCriticNet... | [((2482, 2497), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2495, 2497), False, 'import unittest\n'), ((473, 518), 'numpy.array', 'np.array', (['[[5.0, 5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[5.0, 5.0, 5.0]], dtype=np.float32)\n', (481, 518), True, 'import numpy as np\n'), ((526, 543), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (540, 543), False, 'from jax import random\n'), ((560, 589), 'jaxdl.rl.networks.actor_nets.NormalDistPolicy', 'NormalDistPolicy', (['[24, 24]', '(2)'], {}), '([24, 24], 2)\n', (576, 589), False, 'from jaxdl.rl.networks.actor_nets import NormalDistPolicy\n'), ((735, 756), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (751, 756), False, 'import jax\n'), ((876, 893), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (890, 893), False, 'from jax import random\n'), ((909, 930), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (925, 930), False, 'import jax\n'), ((963, 1006), 'jax.random.uniform', 'jax.random.uniform', (['rng'], {'shape': '(1, N, N, 3)'}), '(rng, shape=(1, N, N, 3))\n', (981, 1006), False, 'import jax\n'), ((1023, 1056), 'jaxdl.rl.networks.conv_actor_nets.NormalConvDistPolicy', 'NormalConvDistPolicy', (['[24, 24]', '(2)'], {}), '([24, 24], 2)\n', (1043, 1056), False, 'from jaxdl.rl.networks.conv_actor_nets import NormalConvDistPolicy\n'), ((1207, 1228), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (1223, 1228), False, 'import jax\n'), ((1353, 1398), 'numpy.array', 'np.array', (['[[5.0, 5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[5.0, 5.0, 5.0]], dtype=np.float32)\n', (1361, 1398), True, 'import numpy as np\n'), ((1410, 1455), 'numpy.array', 'np.array', (['[[5.0, 5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[5.0, 5.0, 5.0]], dtype=np.float32)\n', (1418, 1455), True, 'import numpy as np\n'), ((1463, 1480), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1477, 1480), False, 'from jax import random\n'), ((1498, 1527), 'jaxdl.rl.networks.critic_nets.DoubleCriticNetwork', 'DoubleCriticNetwork', (['[24, 24]'], {}), '([24, 24])\n', (1517, 1527), False, 'from jaxdl.rl.networks.critic_nets import DoubleCriticNetwork\n'), ((1777, 1794), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1791, 1794), False, 'from jax import random\n'), ((1810, 1831), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (1826, 1831), False, 'import jax\n'), ((1864, 1907), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '(1, N, N, 3)'}), '(key, shape=(1, N, N, 3))\n', (1882, 1907), False, 'import jax\n'), ((1922, 1959), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '(1, 1)'}), '(key, shape=(1, 1))\n', (1940, 1959), False, 'import jax\n'), ((1977, 2010), 'jaxdl.rl.networks.conv_critic_nets.DoubleConvCriticNetwork', 'DoubleConvCriticNetwork', (['[24, 24]'], {}), '([24, 24])\n', (2000, 2010), False, 'from jaxdl.rl.networks.conv_critic_nets import DoubleConvCriticNetwork\n'), ((2266, 2283), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (2280, 2283), False, 'from jax import random\n'), ((2306, 2319), 'jaxdl.rl.networks.temperature_nets.Temperature', 'Temperature', ([], {}), '()\n', (2317, 2319), False, 'from jaxdl.rl.networks.temperature_nets import Temperature\n'), ((637, 664), 'jax.random.uniform', 'random.uniform', (['rng', '(1, 3)'], {}), '(rng, (1, 3))\n', (651, 664), False, 'from jax import random\n'), ((1103, 1136), 'jax.random.uniform', 'random.uniform', (['rng', '(1, N, N, 3)'], {}), '(rng, (1, N, N, 3))\n', (1117, 1136), False, 'from jax import random\n'), ((1576, 1603), 'jax.random.uniform', 'random.uniform', (['rng', '(1, 3)'], {}), '(rng, (1, 3))\n', (1590, 1603), False, 'from jax import random\n'), ((1605, 1632), 'jax.random.uniform', 'random.uniform', (['rng', '(1, 3)'], {}), '(rng, (1, 3))\n', (1619, 1632), False, 'from jax import random\n'), ((2059, 2092), 'jax.random.uniform', 'random.uniform', (['rng', '(1, N, N, 3)'], {}), '(rng, (1, N, N, 3))\n', (2073, 2092), False, 'from jax import random\n'), ((2094, 2121), 'jax.random.uniform', 'random.uniform', (['rng', '(1, 1)'], {}), '(rng, (1, 1))\n', (2108, 2121), False, 'from jax import random\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""export checkpoint file into models"""
import os
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import Tensor, context, load_checkpoint, export
from src.finetune_eval_model import BertCLSModel, BertSquadModel, BertNERModel
from src.bert_for_finetune import BertNER
from src.utils import convert_labels_to_index
from src.model_utils.config import config as args, bert_net_cfg
from src.model_utils.moxing_adapter import moxing_wrapper
from src.model_utils.device_adapter import get_device_id
def modelarts_pre_process():
'''modelarts pre process function.'''
args.device_id = get_device_id()
_file_dir = os.path.dirname(os.path.abspath(__file__))
args.export_ckpt_file = os.path.join(_file_dir, args.export_ckpt_file)
args.label_file_path = os.path.join(args.data_path, args.label_file_path)
args.export_file_name = os.path.join(_file_dir, args.export_file_name)
@moxing_wrapper(pre_process=modelarts_pre_process)
def run_export():
'''export function'''
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
if args.device_target == "Ascend":
context.set_context(device_id=args.device_id)
label_list = []
with open(args.label_file_path) as f:
for label in f:
label_list.append(label.strip())
tag_to_index = convert_labels_to_index(label_list)
if args.use_crf.lower() == "true":
max_val = max(tag_to_index.values())
tag_to_index["<START>"] = max_val + 1
tag_to_index["<STOP>"] = max_val + 2
number_labels = len(tag_to_index)
else:
number_labels = len(tag_to_index)
if args.description == "run_ner":
if args.use_crf.lower() == "true":
net = BertNER(bert_net_cfg, args.export_batch_size, False, num_labels=number_labels,
use_crf=True, tag_to_index=tag_to_index)
else:
net = BertNERModel(bert_net_cfg, False, number_labels, use_crf=(args.use_crf.lower() == "true"))
elif args.description == "run_classifier":
net = BertCLSModel(bert_net_cfg, False, num_labels=number_labels)
elif args.description == "run_squad":
net = BertSquadModel(bert_net_cfg, False)
else:
raise ValueError("unsupported downstream task")
load_checkpoint(args.export_ckpt_file, net=net)
net.set_train(False)
input_ids = Tensor(np.zeros([args.export_batch_size, bert_net_cfg.seq_length]), mstype.int32)
input_mask = Tensor(np.zeros([args.export_batch_size, bert_net_cfg.seq_length]), mstype.int32)
token_type_id = Tensor(np.zeros([args.export_batch_size, bert_net_cfg.seq_length]), mstype.int32)
label_ids = Tensor(np.zeros([args.export_batch_size, bert_net_cfg.seq_length]), mstype.int32)
if args.description == "run_ner" and args.use_crf.lower() == "true":
input_data = [input_ids, input_mask, token_type_id, label_ids]
else:
input_data = [input_ids, input_mask, token_type_id]
export(net, *input_data, file_name=args.export_file_name, file_format=args.file_format)
if __name__ == "__main__":
run_export()
| [
"mindspore.context.set_context",
"os.path.abspath",
"src.finetune_eval_model.BertCLSModel",
"mindspore.export",
"src.utils.convert_labels_to_index",
"mindspore.load_checkpoint",
"numpy.zeros",
"src.model_utils.config.config.use_crf.lower",
"src.finetune_eval_model.BertSquadModel",
"src.bert_for_fi... | [((1588, 1637), 'src.model_utils.moxing_adapter.moxing_wrapper', 'moxing_wrapper', ([], {'pre_process': 'modelarts_pre_process'}), '(pre_process=modelarts_pre_process)\n', (1602, 1637), False, 'from src.model_utils.moxing_adapter import moxing_wrapper\n'), ((1282, 1297), 'src.model_utils.device_adapter.get_device_id', 'get_device_id', ([], {}), '()\n', (1295, 1297), False, 'from src.model_utils.device_adapter import get_device_id\n'), ((1385, 1431), 'os.path.join', 'os.path.join', (['_file_dir', 'args.export_ckpt_file'], {}), '(_file_dir, args.export_ckpt_file)\n', (1397, 1431), False, 'import os\n'), ((1459, 1509), 'os.path.join', 'os.path.join', (['args.data_path', 'args.label_file_path'], {}), '(args.data_path, args.label_file_path)\n', (1471, 1509), False, 'import os\n'), ((1538, 1584), 'os.path.join', 'os.path.join', (['_file_dir', 'args.export_file_name'], {}), '(_file_dir, args.export_file_name)\n', (1550, 1584), False, 'import os\n'), ((1686, 1764), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args.device_target'}), '(mode=context.GRAPH_MODE, device_target=args.device_target)\n', (1705, 1764), False, 'from mindspore import Tensor, context, load_checkpoint, export\n'), ((2010, 2045), 'src.utils.convert_labels_to_index', 'convert_labels_to_index', (['label_list'], {}), '(label_list)\n', (2033, 2045), False, 'from src.utils import convert_labels_to_index\n'), ((2968, 3015), 'mindspore.load_checkpoint', 'load_checkpoint', (['args.export_ckpt_file'], {'net': 'net'}), '(args.export_ckpt_file, net=net)\n', (2983, 3015), False, 'from mindspore import Tensor, context, load_checkpoint, export\n'), ((3658, 3750), 'mindspore.export', 'export', (['net', '*input_data'], {'file_name': 'args.export_file_name', 'file_format': 'args.file_format'}), '(net, *input_data, file_name=args.export_file_name, file_format=args.\n file_format)\n', (3664, 3750), False, 'from mindspore import Tensor, context, load_checkpoint, export\n'), ((1330, 1355), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1345, 1355), False, 'import os\n'), ((1812, 1857), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'args.device_id'}), '(device_id=args.device_id)\n', (1831, 1857), False, 'from mindspore import Tensor, context, load_checkpoint, export\n'), ((2054, 2074), 'src.model_utils.config.config.use_crf.lower', 'args.use_crf.lower', ([], {}), '()\n', (2072, 2074), True, 'from src.model_utils.config import config as args, bert_net_cfg\n'), ((3065, 3124), 'numpy.zeros', 'np.zeros', (['[args.export_batch_size, bert_net_cfg.seq_length]'], {}), '([args.export_batch_size, bert_net_cfg.seq_length])\n', (3073, 3124), True, 'import numpy as np\n'), ((3164, 3223), 'numpy.zeros', 'np.zeros', (['[args.export_batch_size, bert_net_cfg.seq_length]'], {}), '([args.export_batch_size, bert_net_cfg.seq_length])\n', (3172, 3223), True, 'import numpy as np\n'), ((3266, 3325), 'numpy.zeros', 'np.zeros', (['[args.export_batch_size, bert_net_cfg.seq_length]'], {}), '([args.export_batch_size, bert_net_cfg.seq_length])\n', (3274, 3325), True, 'import numpy as np\n'), ((3364, 3423), 'numpy.zeros', 'np.zeros', (['[args.export_batch_size, bert_net_cfg.seq_length]'], {}), '([args.export_batch_size, bert_net_cfg.seq_length])\n', (3372, 3423), True, 'import numpy as np\n'), ((2365, 2385), 'src.model_utils.config.config.use_crf.lower', 'args.use_crf.lower', ([], {}), '()\n', (2383, 2385), True, 'from src.model_utils.config import config as args, bert_net_cfg\n'), ((2415, 2539), 'src.bert_for_finetune.BertNER', 'BertNER', (['bert_net_cfg', 'args.export_batch_size', '(False)'], {'num_labels': 'number_labels', 'use_crf': '(True)', 'tag_to_index': 'tag_to_index'}), '(bert_net_cfg, args.export_batch_size, False, num_labels=\n number_labels, use_crf=True, tag_to_index=tag_to_index)\n', (2422, 2539), False, 'from src.bert_for_finetune import BertNER\n'), ((2745, 2804), 'src.finetune_eval_model.BertCLSModel', 'BertCLSModel', (['bert_net_cfg', '(False)'], {'num_labels': 'number_labels'}), '(bert_net_cfg, False, num_labels=number_labels)\n', (2757, 2804), False, 'from src.finetune_eval_model import BertCLSModel, BertSquadModel, BertNERModel\n'), ((3481, 3501), 'src.model_utils.config.config.use_crf.lower', 'args.use_crf.lower', ([], {}), '()\n', (3499, 3501), True, 'from src.model_utils.config import config as args, bert_net_cfg\n'), ((2861, 2896), 'src.finetune_eval_model.BertSquadModel', 'BertSquadModel', (['bert_net_cfg', '(False)'], {}), '(bert_net_cfg, False)\n', (2875, 2896), False, 'from src.finetune_eval_model import BertCLSModel, BertSquadModel, BertNERModel\n'), ((2651, 2671), 'src.model_utils.config.config.use_crf.lower', 'args.use_crf.lower', ([], {}), '()\n', (2669, 2671), True, 'from src.model_utils.config import config as args, bert_net_cfg\n')] |
"""
Tax-Calculator federal income and payroll tax Calculator class.
"""
# CODING-STYLE CHECKS:
# pycodestyle calculator.py
# pylint --disable=locally-disabled calculator.py
#
# pylint: disable=too-many-lines,no-value-for-parameter
import copy
import numpy as np
import pandas as pd
import paramtools
from taxcalc.calcfunctions import (TaxInc, SchXYZTax, GainsTax, AGIsurtax,
NetInvIncTax, AMT, EI_PayrollTax, Adj,
DependentCare, ALD_InvInc_ec_base, CapGains,
SSBenefits, UBI, AGI, ItemDedCap, ItemDed,
StdDed, AdditionalMedicareTax, F2441, EITC,
RefundablePayrollTaxCredit,
ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new,
BennetRomneyChildTaxCredit, NewYoungChildTaxCredit,
RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR,
AmOppCreditParts, EducationTaxCredit,
CharityCredit,
NonrefundableCredits, C1040, IITAX,
BenefitSurtax, BenefitLimitation,
FairShareTax, LumpSumTax, BenefitPrograms,
ExpandIncome, AfterTaxIncome)
from taxcalc.policy import Policy
from taxcalc.records import Records
from taxcalc.consumption import Consumption
from taxcalc.growdiff import GrowDiff
from taxcalc.growfactors import GrowFactors
from taxcalc.utils import (DIST_VARIABLES, create_distribution_table,
DIFF_VARIABLES, create_difference_table,
create_diagnostic_table,
ce_aftertax_expanded_income,
mtr_graph_data, atr_graph_data, xtr_graph_plot,
pch_graph_data, pch_graph_plot)
# import pdb
class Calculator():
"""
Constructor for the Calculator class.
Parameters
----------
policy: Policy class object
this argument must be specified and object is copied for internal use
records: Records class object
this argument must be specified and object is copied for internal use
verbose: boolean
specifies whether or not to write to stdout data-loaded and
data-extrapolated progress reports; default value is false.
sync_years: boolean
specifies whether or not to synchronize policy year and records year;
default value is true.
consumption: Consumption class object
specifies consumption response assumptions used to calculate
"effective" marginal tax rates; default is None, which implies
no consumption responses assumed in marginal tax rate calculations;
when argument is an object it is copied for internal use;
also specifies consumption value of in-kind benefis with no in-kind
consumption values specified implying consumption value is equal to
government cost of providing the in-kind benefits
Raises
------
ValueError:
if parameters are not the appropriate type.
Returns
-------
class instance: Calculator
Notes
-----
The most efficient way to specify current-law and reform Calculator
objects is as follows:
pol = Policy()
rec = Records()
calc1 = Calculator(policy=pol, records=rec) # current-law
pol.implement_reform(...)
calc2 = Calculator(policy=pol, records=rec) # reform
All calculations are done on the internal copies of the Policy and
Records objects passed to each of the two Calculator constructors.
"""
# pylint: disable=too-many-public-methods
def __init__(self, policy=None, records=None, verbose=False,
sync_years=True, consumption=None):
# pylint: disable=too-many-arguments,too-many-branches
if isinstance(policy, Policy):
self.__policy = copy.deepcopy(policy)
else:
raise ValueError('must specify policy as a Policy object')
if isinstance(records, Records):
self.__records = copy.deepcopy(records)
else:
raise ValueError('must specify records as a Records object')
if self.__policy.current_year < self.__records.data_year:
self.__policy.set_year(self.__records.data_year)
if consumption is None:
self.__consumption = Consumption()
elif isinstance(consumption, Consumption):
self.__consumption = copy.deepcopy(consumption)
else:
raise ValueError('consumption must be None or Consumption object')
if self.__consumption.current_year < self.__policy.current_year:
self.__consumption.set_year(self.__policy.current_year)
if verbose:
if self.__records.IGNORED_VARS:
print('Your data include the following unused ' +
'variables that will be ignored:')
for var in self.__records.IGNORED_VARS:
print(' ' +
var)
current_year_is_data_year = (
self.__records.current_year == self.__records.data_year)
if sync_years and current_year_is_data_year:
if verbose:
print('You loaded data for ' +
str(self.__records.data_year) + '.')
while self.__records.current_year < self.__policy.current_year:
self.__records.increment_year()
if verbose:
print('Tax-Calculator startup automatically ' +
'extrapolated your data to ' +
str(self.__records.current_year) + '.')
else:
if verbose:
print('Tax-Calculator startup did not ' +
'extrapolate your data.')
assert self.__policy.current_year == self.__records.current_year
assert self.__policy.current_year == self.__consumption.current_year
self.__stored_records = None
def increment_year(self):
"""
Advance all embedded objects to next year.
"""
next_year = self.__policy.current_year + 1
self.__records.increment_year()
self.__policy.set_year(next_year)
self.__consumption.set_year(next_year)
def advance_to_year(self, year):
"""
The advance_to_year function gives an optional way of implementing
increment year functionality by immediately specifying the year
as input. New year must be at least the current year.
"""
iteration = year - self.current_year
if iteration < 0:
raise ValueError('New current year must be ' +
'greater than or equal to current year!')
for _ in range(iteration):
self.increment_year()
assert self.current_year == year
def calc_all(self, zero_out_calc_vars=False):
"""
Call all tax-calculation functions for the current_year.
"""
# conducts static analysis of Calculator object for current_year
UBI(self.__policy, self.__records)
BenefitPrograms(self)
self._calc_one_year(zero_out_calc_vars)
BenefitSurtax(self)
BenefitLimitation(self)
FairShareTax(self.__policy, self.__records)
LumpSumTax(self.__policy, self.__records)
ExpandIncome(self.__policy, self.__records)
AfterTaxIncome(self.__policy, self.__records)
def weighted_total(self, variable_name):
"""
Return all-filing-unit weighted total of named Records variable.
"""
return (self.array(variable_name) * self.array('s006')).sum()
def total_weight(self):
"""
Return all-filing-unit total of sampling weights.
NOTE: var_weighted_mean = calc.weighted_total(var)/calc.total_weight()
"""
return self.array('s006').sum()
def dataframe(self, variable_list, all_vars=False):
"""
Return Pandas DataFrame containing the listed variables from the
embedded Records object. If all_vars is True, then the variable_list
is ignored and all variables used as input to and calculated by the
Calculator.calc_all() method (which does not include marginal tax
rates) are included in the returned Pandas DataFrame.
"""
if all_vars:
varlist = list(self.__records.USABLE_READ_VARS |
self.__records.CALCULATED_VARS)
else:
assert isinstance(variable_list, list)
varlist = variable_list
arys = [self.array(varname) for varname in varlist]
dframe = pd.DataFrame(data=np.column_stack(arys), columns=varlist)
del arys
del varlist
return dframe
def array(self, variable_name, variable_value=None):
"""
If variable_value is None, return numpy ndarray containing the
named variable in embedded Records object.
If variable_value is not None, set named variable in embedded Records
object to specified variable_value and return None (which can be
ignored).
"""
if variable_value is None:
return getattr(self.__records, variable_name)
assert isinstance(variable_value, np.ndarray)
setattr(self.__records, variable_name, variable_value)
return None
def n65(self):
"""
Return numpy ndarray containing the number of
individuals age 65+ in each filing unit.
"""
vdf = self.dataframe(['age_head', 'age_spouse', 'elderly_dependents'])
return ((vdf['age_head'] >= 65).astype(int) +
(vdf['age_spouse'] >= 65).astype(int) +
vdf['elderly_dependents'])
def incarray(self, variable_name, variable_add):
"""
Add variable_add to named variable in embedded Records object.
"""
assert isinstance(variable_add, np.ndarray)
setattr(self.__records, variable_name,
self.array(variable_name) + variable_add)
def zeroarray(self, variable_name):
"""
Set named variable in embedded Records object to zeros.
"""
setattr(self.__records, variable_name, np.zeros(self.array_len))
def store_records(self):
"""
Make internal copy of embedded Records object that can then be
restored after interim calculations that make temporary changes
to the embedded Records object.
"""
assert self.__stored_records is None
self.__stored_records = copy.deepcopy(self.__records)
def restore_records(self):
"""
Set the embedded Records object to the stored Records object
that was saved in the last call to the store_records() method.
"""
assert isinstance(self.__stored_records, Records)
self.__records = copy.deepcopy(self.__stored_records)
del self.__stored_records
self.__stored_records = None
@property
def array_len(self):
"""
Length of arrays in embedded Records object.
"""
return self.__records.array_length
def policy_param(self, param_name, param_value=None):
"""
If param_value is None, return named parameter in
embedded Policy object.
If param_value is not None, set named parameter in
embedded Policy object to specified param_value and
return None (which can be ignored).
"""
if param_value is None:
val = getattr(self.__policy, param_name)
if param_name.startswith("_"):
return val
else:
return val[0] # drop down a dimension.
setattr(self.__policy, param_name, param_value)
return None
def consump_param(self, param_name):
"""
Return value of named parameter in embedded Consumption object.
"""
return getattr(self.__consumption, param_name)
def consump_benval_params(self):
"""
Return list of benefit-consumption-value parameter values
in embedded Consumption object.
"""
return self.__consumption.benval_params()
@property
def reform_warnings(self):
"""
Calculator class embedded Policy object's parameter_warnings.
"""
return self.__policy.parameter_warnings
@property
def current_year(self):
"""
Calculator class current calendar year property.
"""
return self.__policy.current_year
@property
def data_year(self):
"""
Calculator class initial (i.e., first) records data year property.
"""
return self.__records.data_year
def diagnostic_table(self, num_years):
"""
Generate multi-year diagnostic table containing aggregate statistics;
this method leaves the Calculator object unchanged.
Parameters
----------
num_years : Integer
number of years to include in diagnostic table starting
with the Calculator object's current_year (must be at least
one and no more than what would exceed Policy end_year)
Returns
-------
Pandas DataFrame object containing the multi-year diagnostic table
"""
assert num_years >= 1
max_num_years = self.__policy.end_year - self.__policy.current_year + 1
assert num_years <= max_num_years
calc = copy.deepcopy(self)
yearlist = list()
varlist = list()
for iyr in range(1, num_years + 1):
calc.calc_all()
yearlist.append(calc.current_year)
varlist.append(calc.dataframe(DIST_VARIABLES))
if iyr < num_years:
calc.increment_year()
del calc
return create_diagnostic_table(varlist, yearlist)
def distribution_tables(self, calc, groupby,
pop_quantiles=False, scaling=True):
"""
Get results from self and calc, sort them by expanded_income into
table rows defined by groupby, compute grouped statistics, and
return tables as a pair of Pandas dataframes.
This method leaves the Calculator object(s) unchanged.
Note that the returned tables have consistent income groups (based
on the self expanded_income) even though the baseline expanded_income
in self and the reform expanded_income in calc are different.
Parameters
----------
calc : Calculator object or None
typically represents the reform while self represents the baseline;
if calc is None, the second returned table is None
groupby : String object
options for input: 'weighted_deciles', 'standard_income_bins',
'soi_agi_bins'
determines how the columns in resulting Pandas DataFrame are sorted
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
scaling : boolean
specifies create_distribution_table utility function argument
that determines whether table entry values are scaled or not
Return and typical usage
------------------------
dist1, dist2 = calc1.distribution_tables(calc2, 'weighted_deciles')
OR
dist1, _ = calc1.distribution_tables(None, 'weighted_deciles')
(where calc1 is a baseline Calculator object
and calc2 is a reform Calculator object).
Each of the dist1 and optional dist2 is a distribution table as a
Pandas DataFrame with DIST_TABLE_COLUMNS and groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned tables have 3
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
# nested functions used only by this method
def distribution_table_dataframe(calcobj):
"""
Return pandas DataFrame containing the DIST_TABLE_COLUMNS variables
from specified Calculator object, calcobj.
"""
dframe = calcobj.dataframe(DIST_VARIABLES)
# weighted count of all people or filing units
if pop_quantiles:
dframe['count'] = np.multiply(dframe['s006'], dframe['XTOT'])
else:
dframe['count'] = dframe['s006']
# weighted count of those with itemized-deduction returns
dframe['count_ItemDed'] = dframe['count'].where(
dframe['c04470'] > 0., 0.)
# weighted count of those with standard-deduction returns
dframe['count_StandardDed'] = dframe['count'].where(
dframe['standard'] > 0., 0.)
# weight count of those with positive Alternative Minimum Tax (AMT)
dframe['count_AMT'] = dframe['count'].where(
dframe['c09600'] > 0., 0.)
return dframe
def have_same_income_measure(calc1, calc2):
"""
Return true if calc1 and calc2 contain the same expanded_income;
otherwise, return false. (Note that "same" means nobody's
expanded_income differs by more than one cent.)
"""
im1 = calc1.array('expanded_income')
im2 = calc2.array('expanded_income')
return np.allclose(im1, im2, rtol=0.0, atol=0.01)
# main logic of distribution_tables method
assert calc is None or isinstance(calc, Calculator)
assert groupby in ('weighted_deciles', 'standard_income_bins',
'soi_agi_bins')
if calc is not None:
assert np.allclose(self.array('s006'),
calc.array('s006')) # check rows in same order
var_dataframe = distribution_table_dataframe(self)
imeasure = 'expanded_income'
dt1 = create_distribution_table(var_dataframe, groupby, imeasure,
pop_quantiles, scaling)
del var_dataframe
if calc is None:
dt2 = None
else:
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
assert np.allclose(self.consump_benval_params(),
calc.consump_benval_params())
var_dataframe = distribution_table_dataframe(calc)
if have_same_income_measure(self, calc):
imeasure = 'expanded_income'
else:
imeasure = 'expanded_income_baseline'
var_dataframe[imeasure] = self.array('expanded_income')
dt2 = create_distribution_table(var_dataframe, groupby, imeasure,
pop_quantiles, scaling)
del var_dataframe
return (dt1, dt2)
def difference_table(self, calc, groupby, tax_to_diff,
pop_quantiles=False):
"""
Get results from self and calc, sort them by expanded_income into
table rows defined by groupby, compute grouped statistics, and
return tax-difference table as a Pandas dataframe.
This method leaves the Calculator objects unchanged.
Note that the returned tables have consistent income groups (based
on the self expanded_income) even though the baseline expanded_income
in self and the reform expanded_income in calc are different.
Parameters
----------
calc : Calculator object
calc represents the reform while self represents the baseline
groupby : String object
options for input: 'weighted_deciles', 'standard_income_bins'
determines how the columns in resulting Pandas DataFrame are sorted
tax_to_diff : String object
options for input: 'iitax', 'payrolltax', 'combined'
specifies which tax to difference
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
Returns and typical usage
-------------------------
diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')
(where calc1 is a baseline Calculator object
and calc2 is a reform Calculator object).
The returned diff is a difference table as a Pandas DataFrame
with DIST_TABLE_COLUMNS and groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned table has three
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
assert isinstance(calc, Calculator)
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
assert np.allclose(self.consump_benval_params(),
calc.consump_benval_params())
self_var_dframe = self.dataframe(DIFF_VARIABLES)
calc_var_dframe = calc.dataframe(DIFF_VARIABLES)
diff = create_difference_table(self_var_dframe, calc_var_dframe,
groupby, tax_to_diff, pop_quantiles)
del self_var_dframe
del calc_var_dframe
return diff
MTR_VALID_VARIABLES = ['e00200p', 'e00200s',
'e00900p', 'e00300',
'e00400', 'e00600',
'e00650', 'e01400',
'e01700', 'e02000',
'e02400', 'p22250',
'p23250', 'e18500',
'e19200', 'e26270',
'e19800', 'e20100',
'k1bx14p']
def mtr(self, variable_str='e00200p', diff_choice=0.01,
negative_finite_diff=False,
zero_out_calculated_vars=False,
calc_all_already_called=False,
wrt_full_compensation=True):
"""
Calculates the marginal payroll, individual income, and combined
tax rates for every tax filing unit, leaving the Calculator object
in exactly the same state as it would be in after a calc_all() call.
The marginal tax rates are approximated as the change in tax
liability caused by a small increase (the finite_diff) in the variable
specified by the variable_str divided by that small increase in the
variable, when wrt_full_compensation is false.
If wrt_full_compensation is true, then the marginal tax rates
are computed as the change in tax liability divided by the change
in total compensation caused by the small increase in the variable
(where the change in total compensation is the sum of the small
increase in the variable and any increase in the employer share of
payroll taxes caused by the small increase in the variable).
If using 'e00200s' as variable_str, the marginal tax rate for all
records where MARS != 2 will be missing. If you want to perform a
function such as np.mean() on the returned arrays, you will need to
account for this.
Parameters
----------
variable_str: string
specifies type of income or expense that is increased to compute
the marginal tax rates. See Notes for list of valid variables.
negative_finite_diff: boolean
specifies whether or not marginal tax rates are computed by
subtracting (rather than adding) a small finite_diff amount
to the specified variable.
zero_out_calculated_vars: boolean
specifies value of zero_out_calc_vars parameter used in calls
of Calculator.calc_all() method.
calc_all_already_called: boolean
specifies whether self has already had its Calculor.calc_all()
method called, in which case this method will not do a final
calc_all() call but use the incoming embedded Records object
as the outgoing Records object embedding in self.
wrt_full_compensation: boolean
specifies whether or not marginal tax rates on earned income
are computed with respect to (wrt) changes in total compensation
that includes the employer share of OASDI and HI payroll taxes.
Returns
-------
A tuple of numpy arrays in the following order:
mtr_payrolltax: an array of marginal payroll tax rates.
mtr_incometax: an array of marginal individual income tax rates.
mtr_combined: an array of marginal combined tax rates, which is
the sum of mtr_payrolltax and mtr_incometax.
Notes
-----
The arguments zero_out_calculated_vars and calc_all_already_called
cannot both be true.
Valid variable_str values are:
'e00200p', taxpayer wage/salary earnings (also included in e00200);
'e00200s', spouse wage/salary earnings (also included in e00200);
'e00900p', taxpayer Schedule C self-employment income (also in e00900);
'e00300', taxable interest income;
'e00400', federally-tax-exempt interest income;
'e00600', all dividends included in AGI
'e00650', qualified dividends (also included in e00600)
'e01400', federally-taxable IRA distribution;
'e01700', federally-taxable pension benefits;
'e02000', Schedule E total net income/loss
'e02400', all social security (OASDI) benefits;
'p22250', short-term capital gains;
'p23250', long-term capital gains;
'e18500', Schedule A real-estate-tax paid;
'e19200', Schedule A interest paid;
'e26270', S-corporation/partnership income (also included in e02000);
'e19800', Charity cash contributions;
'e20100', Charity non-cash contributions;
'k1bx14p', Partnership income (also included in e26270 and e02000).
"""
# pylint: disable=too-many-arguments,too-many-statements
# pylint: disable=too-many-locals,too-many-branches
assert not zero_out_calculated_vars or not calc_all_already_called
# check validity of variable_str parameter
if variable_str not in Calculator.MTR_VALID_VARIABLES:
msg = 'mtr variable_str="{}" is not valid'
raise ValueError(msg.format(variable_str))
# specify value for finite_diff parameter
finite_diff = diff_choice # a one-cent difference
if negative_finite_diff:
finite_diff *= -1.0
# remember records object in order to restore it after mtr computations
self.store_records()
# extract variable array(s) from embedded records object
variable = self.array(variable_str)
if variable_str == 'e00200p':
earnings_var = self.array('e00200')
elif variable_str == 'e00200s':
earnings_var = self.array('e00200')
elif variable_str == 'e00900p':
seincome_var = self.array('e00900')
elif variable_str == 'e00650':
divincome_var = self.array('e00600')
elif variable_str == 'e26270':
scheincome_var = self.array('e02000')
elif variable_str == 'k1bx14p':
scheincome_var = self.array('e02000')
scorpincome_var = self.array('e26270')
# calculate level of taxes after a marginal increase in income
self.array(variable_str, variable + finite_diff)
if variable_str == 'e00200p':
self.array('e00200', earnings_var + finite_diff)
elif variable_str == 'e00200s':
self.array('e00200', earnings_var + finite_diff)
elif variable_str == 'e00900p':
self.array('e00900', seincome_var + finite_diff)
elif variable_str == 'e00650':
self.array('e00600', divincome_var + finite_diff)
elif variable_str == 'e26270':
self.array('e02000', scheincome_var + finite_diff)
elif variable_str == 'k1bx14p':
self.array('e02000', scheincome_var + finite_diff)
self.array('e26270', scorpincome_var + finite_diff)
if self.__consumption.has_response():
self.__consumption.response(self.__records, finite_diff)
self.calc_all(zero_out_calc_vars=zero_out_calculated_vars)
payrolltax_chng = self.array('payrolltax')
incometax_chng = self.array('iitax')
combined_taxes_chng = incometax_chng + payrolltax_chng
# calculate base level of taxes after restoring records object
self.restore_records()
if not calc_all_already_called or zero_out_calculated_vars:
self.calc_all(zero_out_calc_vars=zero_out_calculated_vars)
payrolltax_base = self.array('payrolltax')
incometax_base = self.array('iitax')
combined_taxes_base = incometax_base + payrolltax_base
# compute marginal changes in combined tax liability
payrolltax_diff = payrolltax_chng - payrolltax_base
incometax_diff = incometax_chng - incometax_base
combined_diff = combined_taxes_chng - combined_taxes_base
# specify optional adjustment for employer (er) OASDI+HI payroll taxes
mtr_on_earnings = variable_str in ('e00200p', 'e00200s')
if wrt_full_compensation and mtr_on_earnings:
oasdi_taxed = np.logical_or(
variable < self.policy_param('SS_Earnings_c'),
variable >= self.policy_param('SS_Earnings_thd')
)
adj = np.where(oasdi_taxed,
0.5 * (self.policy_param('FICA_ss_trt') +
self.policy_param('FICA_mc_trt')),
0.5 * self.policy_param('FICA_mc_trt'))
else:
adj = 0.0
# compute marginal tax rates
mtr_payrolltax = payrolltax_diff / (finite_diff * (1.0 + adj))
mtr_incometax = incometax_diff / (finite_diff * (1.0 + adj))
mtr_combined = combined_diff / (finite_diff * (1.0 + adj))
# if variable_str is e00200s, set MTR to NaN for units without a spouse
if variable_str == 'e00200s':
mars = self.array('MARS')
mtr_payrolltax = np.where(mars == 2, mtr_payrolltax, np.nan)
mtr_incometax = np.where(mars == 2, mtr_incometax, np.nan)
mtr_combined = np.where(mars == 2, mtr_combined, np.nan)
# delete intermediate variables
del variable
if variable_str in ('e00200p', 'e00200s'):
del earnings_var
elif variable_str == 'e00900p':
del seincome_var
elif variable_str == 'e00650':
del divincome_var
elif variable_str == 'e26270':
del scheincome_var
elif variable_str == 'k1bx14p':
del scheincome_var
del scorpincome_var
del payrolltax_chng
del incometax_chng
del combined_taxes_chng
del payrolltax_base
del incometax_base
del combined_taxes_base
del payrolltax_diff
del incometax_diff
del combined_diff
del adj
# return the three marginal tax rate arrays
return (mtr_payrolltax, mtr_incometax, mtr_combined)
def mtr_graph(self, calc,
mars='ALL',
mtr_measure='combined',
mtr_variable='e00200p',
alt_e00200p_text='',
mtr_wrt_full_compen=False,
income_measure='expanded_income',
pop_quantiles=False,
dollar_weighting=False):
"""
Create marginal tax rate graph that can be written to an HTML
file (using the write_graph_file utility function) or shown on
the screen immediately in an interactive or notebook session
(following the instructions in the documentation of the
xtr_graph_plot utility function).
Parameters
----------
calc : Calculator object
calc represents the reform while self represents the baseline
mars : integer or string
specifies which filing status subgroup to show in the graph
- 'ALL': include all filing units in sample
- 1: include only single filing units
- 2: include only married-filing-jointly filing units
- 3: include only married-filing-separately filing units
- 4: include only head-of-household filing units
mtr_measure : string
specifies which marginal tax rate to show on graph's y axis
- 'itax': marginal individual income tax rate
- 'ptax': marginal payroll tax rate
- 'combined': sum of marginal income and payroll tax rates
mtr_variable : string
any string in the Calculator.VALID_MTR_VARS set
specifies variable to change in order to compute marginal tax rates
alt_e00200p_text : string
text to use in place of mtr_variable
when mtr_variable is 'e00200p';
if empty string then use 'e00200p'
mtr_wrt_full_compen : boolean
see documentation of Calculator.mtr()
argument wrt_full_compensation
(value has an effect only if mtr_variable is 'e00200p')
income_measure : string
specifies which income variable to show on the graph's x axis
- 'wages': wage and salary income (e00200)
- 'agi': adjusted gross income, AGI (c00100)
- 'expanded_income': broader than AGI (see definition in
calcfunctions.py file).
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
dollar_weighting : boolean
False implies both income_measure percentiles on x axis
and mtr values for each percentile on the y axis are
computed without using dollar income_measure weights (just
sampling weights); True implies both income_measure
percentiles on x axis and mtr values for each percentile
on the y axis are computed using dollar income_measure
weights (in addition to sampling weights). Specifying
True produces a graph x axis that shows income_measure
(not filing unit) percentiles.
Returns
-------
graph that is a bokeh.plotting figure object
"""
# pylint: disable=too-many-arguments,too-many-locals
# check that two Calculator objects are comparable
assert isinstance(calc, Calculator)
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
# check validity of mars parameter
assert mars == 'ALL' or 1 <= mars <= 4
# check validity of income_measure
assert income_measure in ('expanded_income', 'agi', 'wages')
if income_measure == 'expanded_income':
income_variable = 'expanded_income'
elif income_measure == 'agi':
income_variable = 'c00100'
elif income_measure == 'wages':
income_variable = 'e00200'
# check validity of mtr_measure parameter
assert mtr_measure in ('combined', 'itax', 'ptax')
# calculate marginal tax rates
(mtr1_ptax, mtr1_itax,
mtr1_combined) = self.mtr(variable_str=mtr_variable,
wrt_full_compensation=mtr_wrt_full_compen)
(mtr2_ptax, mtr2_itax,
mtr2_combined) = calc.mtr(variable_str=mtr_variable,
wrt_full_compensation=mtr_wrt_full_compen)
if mtr_measure == 'combined':
mtr1 = mtr1_combined
mtr2 = mtr2_combined
elif mtr_measure == 'itax':
mtr1 = mtr1_itax
mtr2 = mtr2_itax
elif mtr_measure == 'ptax':
mtr1 = mtr1_ptax
mtr2 = mtr2_ptax
# extract datafames needed by mtr_graph_data utility function
record_variables = ['s006', 'XTOT']
if mars != 'ALL':
record_variables.append('MARS')
record_variables.append(income_variable)
vdf = self.dataframe(record_variables)
vdf['mtr1'] = mtr1
vdf['mtr2'] = mtr2
# select filing-status subgroup, if any
if mars != 'ALL':
vdf = vdf[vdf['MARS'] == mars]
# construct data for graph
data = mtr_graph_data(vdf,
year=self.current_year,
mars=mars,
mtr_measure=mtr_measure,
alt_e00200p_text=alt_e00200p_text,
mtr_wrt_full_compen=mtr_wrt_full_compen,
income_measure=income_measure,
pop_quantiles=pop_quantiles,
dollar_weighting=dollar_weighting)
# delete intermediate variables
del vdf
del mtr1_ptax
del mtr1_itax
del mtr1_combined
del mtr1
del mtr2_ptax
del mtr2_itax
del mtr2_combined
del mtr2
del record_variables
# construct figure from data
fig = xtr_graph_plot(data,
width=850,
height=500,
xlabel='',
ylabel='',
title='',
legendloc='bottom_right')
del data
return fig
def atr_graph(self, calc,
mars='ALL',
atr_measure='combined',
pop_quantiles=False):
"""
Create average tax rate graph that can be written to an HTML
file (using the write_graph_file utility function) or shown on
the screen immediately in an interactive or notebook session
(following the instructions in the documentation of the
xtr_graph_plot utility function). The graph shows the mean
average tax rate for each expanded-income percentile excluding
any percentile that includes a filing unit with negative or
zero basline (self) expanded income.
Parameters
----------
calc : Calculator object
calc represents the reform while self represents the baseline,
where both self and calc have calculated taxes for this year
before being used by this method
mars : integer or string
specifies which filing status subgroup to show in the graph
- 'ALL': include all filing units in sample
- 1: include only single filing units
- 2: include only married-filing-jointly filing units
- 3: include only married-filing-separately filing units
- 4: include only head-of-household filing units
atr_measure : string
specifies which average tax rate to show on graph's y axis
- 'itax': average individual income tax rate
- 'ptax': average payroll tax rate
- 'combined': sum of average income and payroll tax rates
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
Returns
-------
graph that is a bokeh.plotting figure object
"""
# check that two Calculator objects are comparable
assert isinstance(calc, Calculator)
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
# check validity of function arguments
assert mars == 'ALL' or 1 <= mars <= 4
assert atr_measure in ('combined', 'itax', 'ptax')
# extract needed output that is assumed unchanged by reform from self
record_variables = ['s006', 'XTOT']
if mars != 'ALL':
record_variables.append('MARS')
record_variables.append('expanded_income')
vdf = self.dataframe(record_variables)
# create 'tax1' and 'tax2' columns given specified atr_measure
if atr_measure == 'combined':
vdf['tax1'] = self.array('combined')
vdf['tax2'] = calc.array('combined')
elif atr_measure == 'itax':
vdf['tax1'] = self.array('iitax')
vdf['tax2'] = calc.array('iitax')
elif atr_measure == 'ptax':
vdf['tax1'] = self.array('payrolltax')
vdf['tax2'] = calc.array('payrolltax')
# select filing-status subgroup, if any
if mars != 'ALL':
vdf = vdf[vdf['MARS'] == mars]
# construct data for graph
data = atr_graph_data(vdf,
year=self.current_year,
mars=mars,
atr_measure=atr_measure,
pop_quantiles=pop_quantiles)
# delete intermediate variables
del vdf
del record_variables
# construct figure from data
fig = xtr_graph_plot(data,
width=850,
height=500,
xlabel='',
ylabel='',
title='',
legendloc='bottom_right')
del data
return fig
def pch_graph(self, calc, pop_quantiles=False):
"""
Create percentage change in after-tax expanded income graph that
can be written to an HTML file (using the write_graph_file utility
function) or shown on the screen immediately in an interactive or
notebook session (following the instructions in the documentation
of the xtr_graph_plot utility function). The graph shows the
dollar-weighted mean percentage change in after-tax expanded income
for each expanded-income percentile excluding any percentile that
includes a filing unit with negative or zero basline (self) expanded
income.
Parameters
----------
calc : Calculator object
calc represents the reform while self represents the baseline,
where both self and calc have calculated taxes for this year
before being used by this method
pop_quantiles : boolean
specifies whether or not weighted_deciles contain an equal number
of people (True) or an equal number of filing units (False)
Returns
-------
graph that is a bokeh.plotting figure object
"""
# check that two Calculator objects are comparable
assert isinstance(calc, Calculator)
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
# extract needed output from baseline and reform Calculator objects
vdf1 = self.dataframe(['s006', 'XTOT', 'aftertax_income',
'expanded_income'])
vdf2 = calc.dataframe(['s006', 'XTOT', 'aftertax_income'])
assert np.allclose(vdf1['s006'], vdf2['s006'])
assert np.allclose(vdf1['XTOT'], vdf2['XTOT'])
vdf = pd.DataFrame()
vdf['s006'] = vdf1['s006']
vdf['XTOT'] = vdf1['XTOT']
vdf['expanded_income'] = vdf1['expanded_income']
vdf['chg_aftinc'] = vdf2['aftertax_income'] - vdf1['aftertax_income']
# construct data for graph
data = pch_graph_data(vdf, year=self.current_year,
pop_quantiles=pop_quantiles)
del vdf
del vdf1
del vdf2
# construct figure from data
fig = pch_graph_plot(data,
width=850,
height=500,
xlabel='',
ylabel='',
title='')
del data
return fig
REQUIRED_REFORM_KEYS = set(['policy'])
REQUIRED_ASSUMP_KEYS = set(['consumption',
'growdiff_baseline', 'growdiff_response'])
@staticmethod
def read_json_param_objects(reform, assump):
"""
Read JSON reform and assump objects and
return a composite dictionary containing four key:dict pairs:
'policy':dict, 'consumption':dict,
'growdiff_baseline':dict, and 'growdiff_response':dict.
Note that either of the two function arguments can be None.
If reform is None, the dict in the 'policy':dict pair is empty.
If assump is None, the dict in all the other key:dict pairs is empty.
Also note that either of the two function arguments can be strings
containing a valid JSON string (rather than a local filename).
Either of the two function arguments can also be a valid URL string
beginning with 'http' and pointing to a valid JSON file hosted online.
The reform file/URL contents or JSON string must be like this:
{"policy": {...}} OR {...}
(in other words, the top-level policy key is optional)
and the assump file/URL contents or JSON string must be like this:
{"consumption": {...},
"growdiff_baseline": {...},
"growdiff_response": {...}}
The {...} should be empty like this {} if not specifying a policy
reform or if not specifying any non-default economic assumptions
of that type.
The 'policy' subdictionary of the returned dictionary is
suitable as input into the Policy.implement_reform method.
The 'consumption' subdictionary of the returned dictionary is
suitable as input into the Consumption.update_consumption method.
The 'growdiff_baseline' subdictionary of the returned dictionary is
suitable as input into the GrowDiff.update_growdiff method.
The 'growdiff_response' subdictionary of the returned dictionary is
suitable as input into the GrowDiff.update_growdiff method.
"""
# construct the composite dictionary
param_dict = dict()
param_dict['policy'] = Policy.read_json_reform(reform)
param_dict['consumption'] = Consumption.read_json_update(assump)
for topkey in ['growdiff_baseline', 'growdiff_response']:
param_dict[topkey] = GrowDiff.read_json_update(assump, topkey)
# return the composite dictionary
return param_dict
@staticmethod
def reform_documentation(params, policy_dicts=None):
"""
Generate reform documentation versus current-law policy.
Parameters
----------
params: dict
dictionary is structured like dict returned from
the static Calculator.read_json_param_objects() method
policy_dicts : list of dict or None
each dictionary in list is a params['policy'] dictionary
representing second and subsequent elements of a compound
reform; None implies no compound reform with the simple
reform characterized in the params['policy'] dictionary
Returns
-------
doc: String
the documentation for the specified policy reform
"""
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
# nested function used only in reform_documentation function
def param_doc(years_list, updated, baseline):
"""
Parameters
----------
years_list: list of parameter-change years
updated: reform Policy or updated GrowDiff object
base: current-law Policy or default GrowDiff object
Returns
-------
doc: String
"""
# pylint: disable=too-many-locals
# nested function used only in param_doc
def lines(text, num_indent_spaces, max_line_length=77):
"""
Return list of text lines, each one of which is no longer
than max_line_length, with the second and subsequent lines
being indented by the number of specified num_indent_spaces;
each line in the list ends with the '\n' character
"""
if len(text) < max_line_length:
# all text fits on one line
line = text + '\n'
return [line]
# all text does not fix on one line
first_line = True
line_list = list()
words = text.split()
while words:
if first_line:
line = ''
first_line = False
else:
line = ' ' * num_indent_spaces
while (words and
(len(words[0]) + len(line)) < max_line_length):
line += words.pop(0) + ' '
line = line[:-1] + '\n'
line_list.append(line)
return line_list
# begin main logic of nested function param_doc
# pylint: disable=too-many-nested-blocks
doc = ''
assert isinstance(years_list, list)
years = sorted(years_list)
for year in years:
baseline.set_year(year)
updated.set_year(year)
assert set(baseline.keys()) == set(updated.keys())
params_with_diff = list()
for pname in baseline.keys():
upda_value = getattr(updated, pname)
base_value = getattr(baseline, pname)
if (
(isinstance(upda_value, np.ndarray) and
np.allclose(upda_value, base_value)) or
(not isinstance(upda_value, np.ndarray) and
upda_value != base_value)
):
params_with_diff.append(pname)
if params_with_diff:
mdata_base = baseline.specification(meta_data=True)
# write year
doc += '{}:\n'.format(year)
for pname in sorted(params_with_diff):
# write updated value line
pval = getattr(updated, pname).tolist()[0]
if mdata_base[pname]['type'] == 'bool':
if isinstance(pval, list):
pval = [bool(item) for item in pval]
else:
pval = bool(pval)
doc += ' {} : {}\n'.format(pname, pval)
# ... write optional param-vector-index line
if isinstance(pval, list):
labels = paramtools.consistent_labels(
[mdata_base[pname]["value"][0]]
)
label = None
for _label in labels:
if _label not in ("value", "year"):
label = _label
break
if label:
lv = baseline._stateless_label_grid[label]
lv = [
str(item) for item in lv
]
doc += ' ' * (
4 + len(pname)
) + '{}\n'.format(lv)
# ... write param-name line
name = mdata_base[pname]['title']
for line in lines('name: ' + name, 6):
doc += ' ' + line
# ... write param-description line
desc = mdata_base[pname]['description']
for line in lines('desc: ' + desc, 6):
doc += ' ' + line
# ... write param-baseline-value line
if isinstance(baseline, Policy):
pval = getattr(baseline, pname).tolist()[0]
ptype = mdata_base[pname]['type']
if isinstance(pval, list):
if ptype == 'bool':
pval = [bool(item) for item in pval]
elif ptype == 'bool':
pval = bool(pval)
doc += ' baseline_value: {}\n'.format(pval)
else: # if baseline is GrowDiff object
# each GrowDiff parameter has zero as default value
doc += ' baseline_value: 0.0\n'
del mdata_base
return doc
# begin main logic of reform_documentation
# create Policy object with current-law-policy values
gdiff_base = GrowDiff()
gdiff_base.update_growdiff(params['growdiff_baseline'])
gfactors_clp = GrowFactors()
gdiff_base.apply_to(gfactors_clp)
clp = Policy(gfactors=gfactors_clp)
# create Policy object with post-reform values
gdiff_resp = GrowDiff()
gdiff_resp.update_growdiff(params['growdiff_response'])
gfactors_ref = GrowFactors()
gdiff_base.apply_to(gfactors_ref)
gdiff_resp.apply_to(gfactors_ref)
ref = Policy(gfactors=gfactors_ref)
ref.implement_reform(params['policy'])
reform_years = Policy.years_in_revision(params['policy'])
if policy_dicts is not None: # compound reform has been specified
assert isinstance(policy_dicts, list)
for policy_dict in policy_dicts:
ref.implement_reform(policy_dict)
xyears = Policy.years_in_revision(policy_dict)
for year in xyears:
if year not in reform_years:
reform_years.append(year)
# generate documentation text
doc = 'REFORM DOCUMENTATION\n'
# ... documentation for baseline growdiff assumptions
doc += 'Baseline Growth-Difference Assumption Values by Year:\n'
years = GrowDiff.years_in_revision(params['growdiff_baseline'])
if years:
doc += param_doc(years, gdiff_base, GrowDiff())
else:
doc += 'none: no baseline GrowDiff assumptions specified\n'
# ... documentation for reform growdiff assumptions
doc += 'Response Growth-Difference Assumption Values by Year:\n'
years = GrowDiff.years_in_revision(params['growdiff_response'])
if years:
doc += param_doc(years, gdiff_resp, GrowDiff())
else:
doc += 'none: no response GrowDiff assumptions specified\n'
# ... documentation for (possibly compound) policy reform
if policy_dicts is None:
doc += 'Policy Reform Parameter Values by Year:\n'
else:
doc += 'Compound Policy Reform Parameter Values by Year:\n'
# ... use clp and ref Policy objects to generate documentation
if reform_years:
doc += param_doc(reform_years, ref, clp)
else:
doc += 'none: using current-law policy parameters\n'
# cleanup local objects
del gdiff_base
del gfactors_clp
del gdiff_resp
del gfactors_ref
del clp
del ref
del years
del reform_years
# return documentation string
return doc
def ce_aftertax_income(self, calc,
custom_params=None,
require_no_agg_tax_change=True):
"""
Return dictionary that contains certainty-equivalent of the
expected utility of after-tax expanded income computed for
several constant-relative-risk-aversion parameter values
for each of two Calculator objects: self, which represents
the pre-reform situation, and calc, which represents the
post-reform situation, both of which MUST have had calc_call()
called before being passed to this function.
IMPORTANT NOTES: These normative welfare calculations are very
simple. It is assumed that utility is a function of only
consumption, and that consumption is equal to after-tax
income. This means that any assumed responses that
change work effort will not affect utility via the
correpsonding change in leisure. And any saving response to
changes in after-tax income do not affect consumption.
The cmin value is the consumption level below which marginal
utility is considered to be constant. This allows the handling
of filing units with very low or even negative after-tax expanded
income in the expected-utility and certainty-equivalent calculations.
"""
# check that calc and self are consistent
assert isinstance(calc, Calculator)
assert calc.array_len == self.array_len
assert calc.current_year == self.current_year
assert np.allclose(calc.consump_benval_params(),
self.consump_benval_params())
# extract data from self and calc
records_variables = ['s006', 'combined', 'expanded_income']
df1 = self.dataframe(records_variables)
df2 = calc.dataframe(records_variables)
cedict = ce_aftertax_expanded_income(
df1, df2,
custom_params=custom_params,
require_no_agg_tax_change=require_no_agg_tax_change)
cedict['year'] = self.current_year
return cedict
# ----- begin private methods of Calculator class -----
def _taxinc_to_amt(self):
"""
Call TaxInc through AMT functions.
"""
TaxInc(self.__policy, self.__records)
SchXYZTax(self.__policy, self.__records)
GainsTax(self.__policy, self.__records)
AGIsurtax(self.__policy, self.__records)
NetInvIncTax(self.__policy, self.__records)
AMT(self.__policy, self.__records)
def _calc_one_year(self, zero_out_calc_vars=False):
"""
Call all the functions except those in the calc_all() method.
"""
# pylint: disable=too-many-statements
if zero_out_calc_vars:
self.__records.zero_out_changing_calculated_vars()
# pdb.set_trace()
EI_PayrollTax(self.__policy, self.__records)
DependentCare(self.__policy, self.__records)
Adj(self.__policy, self.__records)
ALD_InvInc_ec_base(self.__policy, self.__records)
CapGains(self.__policy, self.__records)
SSBenefits(self.__policy, self.__records)
AGI(self.__policy, self.__records)
ItemDedCap(self.__policy, self.__records)
ItemDed(self.__policy, self.__records)
AdditionalMedicareTax(self.__policy, self.__records)
StdDed(self.__policy, self.__records)
# Store calculated standard deduction, calculate
# taxes with standard deduction, store AMT + Regular Tax
std = self.array('standard').copy()
item = self.array('c04470').copy()
item_no_limit = self.array('c21060').copy()
item_phaseout = self.array('c21040').copy()
item_component_variable_names = ['c17000', 'c18300', 'c19200',
'c19700', 'c20500', 'c20800']
item_cvar = dict()
for cvname in item_component_variable_names:
item_cvar[cvname] = self.array(cvname).copy()
self.zeroarray('c04470')
self.zeroarray('c21060')
self.zeroarray('c21040')
for cvname in item_component_variable_names:
self.zeroarray(cvname)
self._taxinc_to_amt()
std_taxes = self.array('c05800').copy()
# Set standard deduction to zero, calculate taxes w/o
# standard deduction, and store AMT + Regular Tax
self.zeroarray('standard')
self.array('c21060', item_no_limit)
self.array('c21040', item_phaseout)
self.array('c04470', item)
self._taxinc_to_amt()
item_taxes = self.array('c05800').copy()
# Replace standard deduction with zero so the filing unit
# would always be better off itemizing
self.array('standard', np.where(item_taxes < std_taxes,
0., std))
self.array('c04470', np.where(item_taxes < std_taxes,
item, 0.))
self.array('c21060', np.where(item_taxes < std_taxes,
item_no_limit, 0.))
self.array('c21040', np.where(item_taxes < std_taxes,
item_phaseout, 0.))
for cvname in item_component_variable_names:
self.array(cvname, np.where(item_taxes < std_taxes,
item_cvar[cvname], 0.))
del std
del item
del item_no_limit
del item_phaseout
del item_cvar
# Calculate taxes with optimal itemized deduction
self._taxinc_to_amt()
F2441(self.__policy, self.__records)
EITC(self.__policy, self.__records)
RefundablePayrollTaxCredit(self.__policy, self.__records)
PersonalTaxCredit(self.__policy, self.__records)
AmOppCreditParts(self.__policy, self.__records)
SchR(self.__policy, self.__records)
EducationTaxCredit(self.__policy, self.__records)
CharityCredit(self.__policy, self.__records)
ChildDepTaxCredit(self.__policy, self.__records)
NonrefundableCredits(self.__policy, self.__records)
AdditionalCTC(self.__policy, self.__records)
C1040(self.__policy, self.__records)
CTC_new(self.__policy, self.__records)
CDCC_new(self.__policy, self.__records)
BennetRomneyChildTaxCredit(self.__policy, self.__records)
NewYoungChildTaxCredit(self.__policy, self.__records)
RomneyChildAllowance(self.__policy, self.__records)
TwoStepChildTaxCredit(self.__policy, self.__records)
IITAX(self.__policy, self.__records)
| [
"taxcalc.calcfunctions.EITC",
"taxcalc.calcfunctions.UBI",
"taxcalc.utils.pch_graph_plot",
"taxcalc.utils.ce_aftertax_expanded_income",
"taxcalc.utils.create_diagnostic_table",
"numpy.allclose",
"taxcalc.calcfunctions.TaxInc",
"taxcalc.calcfunctions.C1040",
"taxcalc.policy.Policy.read_json_reform",
... | [((7458, 7492), 'taxcalc.calcfunctions.UBI', 'UBI', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (7461, 7492), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7502, 7523), 'taxcalc.calcfunctions.BenefitPrograms', 'BenefitPrograms', (['self'], {}), '(self)\n', (7517, 7523), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7582, 7601), 'taxcalc.calcfunctions.BenefitSurtax', 'BenefitSurtax', (['self'], {}), '(self)\n', (7595, 7601), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7611, 7634), 'taxcalc.calcfunctions.BenefitLimitation', 'BenefitLimitation', (['self'], {}), '(self)\n', (7628, 7634), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7644, 7687), 'taxcalc.calcfunctions.FairShareTax', 'FairShareTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (7656, 7687), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7697, 7738), 'taxcalc.calcfunctions.LumpSumTax', 'LumpSumTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (7707, 7738), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7748, 7791), 'taxcalc.calcfunctions.ExpandIncome', 'ExpandIncome', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (7760, 7791), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((7801, 7846), 'taxcalc.calcfunctions.AfterTaxIncome', 'AfterTaxIncome', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (7815, 7846), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((11057, 11086), 'copy.deepcopy', 'copy.deepcopy', (['self.__records'], {}), '(self.__records)\n', (11070, 11086), False, 'import copy\n'), ((11374, 11410), 'copy.deepcopy', 'copy.deepcopy', (['self.__stored_records'], {}), '(self.__stored_records)\n', (11387, 11410), False, 'import copy\n'), ((14067, 14086), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (14080, 14086), False, 'import copy\n'), ((14428, 14470), 'taxcalc.utils.create_diagnostic_table', 'create_diagnostic_table', (['varlist', 'yearlist'], {}), '(varlist, yearlist)\n', (14451, 14470), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((19191, 19278), 'taxcalc.utils.create_distribution_table', 'create_distribution_table', (['var_dataframe', 'groupby', 'imeasure', 'pop_quantiles', 'scaling'], {}), '(var_dataframe, groupby, imeasure, pop_quantiles,\n scaling)\n', (19216, 19278), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((22909, 23007), 'taxcalc.utils.create_difference_table', 'create_difference_table', (['self_var_dframe', 'calc_var_dframe', 'groupby', 'tax_to_diff', 'pop_quantiles'], {}), '(self_var_dframe, calc_var_dframe, groupby,\n tax_to_diff, pop_quantiles)\n', (22932, 23007), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((38829, 39094), 'taxcalc.utils.mtr_graph_data', 'mtr_graph_data', (['vdf'], {'year': 'self.current_year', 'mars': 'mars', 'mtr_measure': 'mtr_measure', 'alt_e00200p_text': 'alt_e00200p_text', 'mtr_wrt_full_compen': 'mtr_wrt_full_compen', 'income_measure': 'income_measure', 'pop_quantiles': 'pop_quantiles', 'dollar_weighting': 'dollar_weighting'}), '(vdf, year=self.current_year, mars=mars, mtr_measure=\n mtr_measure, alt_e00200p_text=alt_e00200p_text, mtr_wrt_full_compen=\n mtr_wrt_full_compen, income_measure=income_measure, pop_quantiles=\n pop_quantiles, dollar_weighting=dollar_weighting)\n', (38843, 39094), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((39651, 39756), 'taxcalc.utils.xtr_graph_plot', 'xtr_graph_plot', (['data'], {'width': '(850)', 'height': '(500)', 'xlabel': '""""""', 'ylabel': '""""""', 'title': '""""""', 'legendloc': '"""bottom_right"""'}), "(data, width=850, height=500, xlabel='', ylabel='', title='',\n legendloc='bottom_right')\n", (39665, 39756), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((43256, 43369), 'taxcalc.utils.atr_graph_data', 'atr_graph_data', (['vdf'], {'year': 'self.current_year', 'mars': 'mars', 'atr_measure': 'atr_measure', 'pop_quantiles': 'pop_quantiles'}), '(vdf, year=self.current_year, mars=mars, atr_measure=\n atr_measure, pop_quantiles=pop_quantiles)\n', (43270, 43369), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((43630, 43735), 'taxcalc.utils.xtr_graph_plot', 'xtr_graph_plot', (['data'], {'width': '(850)', 'height': '(500)', 'xlabel': '""""""', 'ylabel': '""""""', 'title': '""""""', 'legendloc': '"""bottom_right"""'}), "(data, width=850, height=500, xlabel='', ylabel='', title='',\n legendloc='bottom_right')\n", (43644, 43735), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((45687, 45726), 'numpy.allclose', 'np.allclose', (["vdf1['s006']", "vdf2['s006']"], {}), "(vdf1['s006'], vdf2['s006'])\n", (45698, 45726), True, 'import numpy as np\n'), ((45743, 45782), 'numpy.allclose', 'np.allclose', (["vdf1['XTOT']", "vdf2['XTOT']"], {}), "(vdf1['XTOT'], vdf2['XTOT'])\n", (45754, 45782), True, 'import numpy as np\n'), ((45798, 45812), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (45810, 45812), True, 'import pandas as pd\n'), ((46074, 46146), 'taxcalc.utils.pch_graph_data', 'pch_graph_data', (['vdf'], {'year': 'self.current_year', 'pop_quantiles': 'pop_quantiles'}), '(vdf, year=self.current_year, pop_quantiles=pop_quantiles)\n', (46088, 46146), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((46284, 46359), 'taxcalc.utils.pch_graph_plot', 'pch_graph_plot', (['data'], {'width': '(850)', 'height': '(500)', 'xlabel': '""""""', 'ylabel': '""""""', 'title': '""""""'}), "(data, width=850, height=500, xlabel='', ylabel='', title='')\n", (46298, 46359), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((48793, 48824), 'taxcalc.policy.Policy.read_json_reform', 'Policy.read_json_reform', (['reform'], {}), '(reform)\n', (48816, 48824), False, 'from taxcalc.policy import Policy\n'), ((48862, 48898), 'taxcalc.consumption.Consumption.read_json_update', 'Consumption.read_json_update', (['assump'], {}), '(assump)\n', (48890, 48898), False, 'from taxcalc.consumption import Consumption\n'), ((55945, 55955), 'taxcalc.growdiff.GrowDiff', 'GrowDiff', ([], {}), '()\n', (55953, 55955), False, 'from taxcalc.growdiff import GrowDiff\n'), ((56045, 56058), 'taxcalc.growfactors.GrowFactors', 'GrowFactors', ([], {}), '()\n', (56056, 56058), False, 'from taxcalc.growfactors import GrowFactors\n'), ((56117, 56146), 'taxcalc.policy.Policy', 'Policy', ([], {'gfactors': 'gfactors_clp'}), '(gfactors=gfactors_clp)\n', (56123, 56146), False, 'from taxcalc.policy import Policy\n'), ((56225, 56235), 'taxcalc.growdiff.GrowDiff', 'GrowDiff', ([], {}), '()\n', (56233, 56235), False, 'from taxcalc.growdiff import GrowDiff\n'), ((56325, 56338), 'taxcalc.growfactors.GrowFactors', 'GrowFactors', ([], {}), '()\n', (56336, 56338), False, 'from taxcalc.growfactors import GrowFactors\n'), ((56440, 56469), 'taxcalc.policy.Policy', 'Policy', ([], {'gfactors': 'gfactors_ref'}), '(gfactors=gfactors_ref)\n', (56446, 56469), False, 'from taxcalc.policy import Policy\n'), ((56542, 56584), 'taxcalc.policy.Policy.years_in_revision', 'Policy.years_in_revision', (["params['policy']"], {}), "(params['policy'])\n", (56566, 56584), False, 'from taxcalc.policy import Policy\n'), ((57244, 57299), 'taxcalc.growdiff.GrowDiff.years_in_revision', 'GrowDiff.years_in_revision', (["params['growdiff_baseline']"], {}), "(params['growdiff_baseline'])\n", (57270, 57299), False, 'from taxcalc.growdiff import GrowDiff\n'), ((57620, 57675), 'taxcalc.growdiff.GrowDiff.years_in_revision', 'GrowDiff.years_in_revision', (["params['growdiff_response']"], {}), "(params['growdiff_response'])\n", (57646, 57675), False, 'from taxcalc.growdiff import GrowDiff\n'), ((60545, 60668), 'taxcalc.utils.ce_aftertax_expanded_income', 'ce_aftertax_expanded_income', (['df1', 'df2'], {'custom_params': 'custom_params', 'require_no_agg_tax_change': 'require_no_agg_tax_change'}), '(df1, df2, custom_params=custom_params,\n require_no_agg_tax_change=require_no_agg_tax_change)\n', (60572, 60668), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((60947, 60984), 'taxcalc.calcfunctions.TaxInc', 'TaxInc', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (60953, 60984), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((60994, 61034), 'taxcalc.calcfunctions.SchXYZTax', 'SchXYZTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61003, 61034), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61044, 61083), 'taxcalc.calcfunctions.GainsTax', 'GainsTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61052, 61083), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61093, 61133), 'taxcalc.calcfunctions.AGIsurtax', 'AGIsurtax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61102, 61133), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61143, 61186), 'taxcalc.calcfunctions.NetInvIncTax', 'NetInvIncTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61155, 61186), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61196, 61230), 'taxcalc.calcfunctions.AMT', 'AMT', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61199, 61230), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61566, 61610), 'taxcalc.calcfunctions.EI_PayrollTax', 'EI_PayrollTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61579, 61610), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61620, 61664), 'taxcalc.calcfunctions.DependentCare', 'DependentCare', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61633, 61664), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61674, 61708), 'taxcalc.calcfunctions.Adj', 'Adj', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61677, 61708), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61718, 61767), 'taxcalc.calcfunctions.ALD_InvInc_ec_base', 'ALD_InvInc_ec_base', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61736, 61767), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61777, 61816), 'taxcalc.calcfunctions.CapGains', 'CapGains', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61785, 61816), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61826, 61867), 'taxcalc.calcfunctions.SSBenefits', 'SSBenefits', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61836, 61867), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61877, 61911), 'taxcalc.calcfunctions.AGI', 'AGI', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61880, 61911), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61921, 61962), 'taxcalc.calcfunctions.ItemDedCap', 'ItemDedCap', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61931, 61962), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((61972, 62010), 'taxcalc.calcfunctions.ItemDed', 'ItemDed', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (61979, 62010), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((62020, 62072), 'taxcalc.calcfunctions.AdditionalMedicareTax', 'AdditionalMedicareTax', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (62041, 62072), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((62082, 62119), 'taxcalc.calcfunctions.StdDed', 'StdDed', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (62088, 62119), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64344, 64380), 'taxcalc.calcfunctions.F2441', 'F2441', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64349, 64380), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64390, 64425), 'taxcalc.calcfunctions.EITC', 'EITC', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64394, 64425), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64435, 64492), 'taxcalc.calcfunctions.RefundablePayrollTaxCredit', 'RefundablePayrollTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64461, 64492), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64502, 64550), 'taxcalc.calcfunctions.PersonalTaxCredit', 'PersonalTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64519, 64550), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64560, 64607), 'taxcalc.calcfunctions.AmOppCreditParts', 'AmOppCreditParts', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64576, 64607), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64617, 64652), 'taxcalc.calcfunctions.SchR', 'SchR', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64621, 64652), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64662, 64711), 'taxcalc.calcfunctions.EducationTaxCredit', 'EducationTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64680, 64711), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64721, 64765), 'taxcalc.calcfunctions.CharityCredit', 'CharityCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64734, 64765), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64775, 64823), 'taxcalc.calcfunctions.ChildDepTaxCredit', 'ChildDepTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64792, 64823), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64833, 64884), 'taxcalc.calcfunctions.NonrefundableCredits', 'NonrefundableCredits', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64853, 64884), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64894, 64938), 'taxcalc.calcfunctions.AdditionalCTC', 'AdditionalCTC', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64907, 64938), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64948, 64984), 'taxcalc.calcfunctions.C1040', 'C1040', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (64953, 64984), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((64994, 65032), 'taxcalc.calcfunctions.CTC_new', 'CTC_new', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65001, 65032), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((65042, 65081), 'taxcalc.calcfunctions.CDCC_new', 'CDCC_new', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65050, 65081), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((65091, 65148), 'taxcalc.calcfunctions.BennetRomneyChildTaxCredit', 'BennetRomneyChildTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65117, 65148), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((65158, 65211), 'taxcalc.calcfunctions.NewYoungChildTaxCredit', 'NewYoungChildTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65180, 65211), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((65221, 65272), 'taxcalc.calcfunctions.RomneyChildAllowance', 'RomneyChildAllowance', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65241, 65272), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((65282, 65334), 'taxcalc.calcfunctions.TwoStepChildTaxCredit', 'TwoStepChildTaxCredit', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65303, 65334), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((65344, 65380), 'taxcalc.calcfunctions.IITAX', 'IITAX', (['self.__policy', 'self.__records'], {}), '(self.__policy, self.__records)\n', (65349, 65380), False, 'from taxcalc.calcfunctions import TaxInc, SchXYZTax, GainsTax, AGIsurtax, NetInvIncTax, AMT, EI_PayrollTax, Adj, DependentCare, ALD_InvInc_ec_base, CapGains, SSBenefits, UBI, AGI, ItemDedCap, ItemDed, StdDed, AdditionalMedicareTax, F2441, EITC, RefundablePayrollTaxCredit, ChildDepTaxCredit, AdditionalCTC, CTC_new, CDCC_new, BennetRomneyChildTaxCredit, NewYoungChildTaxCredit, RomneyChildAllowance, TwoStepChildTaxCredit, PersonalTaxCredit, SchR, AmOppCreditParts, EducationTaxCredit, CharityCredit, NonrefundableCredits, C1040, IITAX, BenefitSurtax, BenefitLimitation, FairShareTax, LumpSumTax, BenefitPrograms, ExpandIncome, AfterTaxIncome\n'), ((4203, 4224), 'copy.deepcopy', 'copy.deepcopy', (['policy'], {}), '(policy)\n', (4216, 4224), False, 'import copy\n'), ((4384, 4406), 'copy.deepcopy', 'copy.deepcopy', (['records'], {}), '(records)\n', (4397, 4406), False, 'import copy\n'), ((4692, 4705), 'taxcalc.consumption.Consumption', 'Consumption', ([], {}), '()\n', (4703, 4705), False, 'from taxcalc.consumption import Consumption\n'), ((10708, 10732), 'numpy.zeros', 'np.zeros', (['self.array_len'], {}), '(self.array_len)\n', (10716, 10732), True, 'import numpy as np\n'), ((18642, 18684), 'numpy.allclose', 'np.allclose', (['im1', 'im2'], {'rtol': '(0.0)', 'atol': '(0.01)'}), '(im1, im2, rtol=0.0, atol=0.01)\n', (18653, 18684), True, 'import numpy as np\n'), ((19974, 20061), 'taxcalc.utils.create_distribution_table', 'create_distribution_table', (['var_dataframe', 'groupby', 'imeasure', 'pop_quantiles', 'scaling'], {}), '(var_dataframe, groupby, imeasure, pop_quantiles,\n scaling)\n', (19999, 20061), False, 'from taxcalc.utils import DIST_VARIABLES, create_distribution_table, DIFF_VARIABLES, create_difference_table, create_diagnostic_table, ce_aftertax_expanded_income, mtr_graph_data, atr_graph_data, xtr_graph_plot, pch_graph_data, pch_graph_plot\n'), ((32322, 32365), 'numpy.where', 'np.where', (['(mars == 2)', 'mtr_payrolltax', 'np.nan'], {}), '(mars == 2, mtr_payrolltax, np.nan)\n', (32330, 32365), True, 'import numpy as np\n'), ((32395, 32437), 'numpy.where', 'np.where', (['(mars == 2)', 'mtr_incometax', 'np.nan'], {}), '(mars == 2, mtr_incometax, np.nan)\n', (32403, 32437), True, 'import numpy as np\n'), ((32466, 32507), 'numpy.where', 'np.where', (['(mars == 2)', 'mtr_combined', 'np.nan'], {}), '(mars == 2, mtr_combined, np.nan)\n', (32474, 32507), True, 'import numpy as np\n'), ((49000, 49041), 'taxcalc.growdiff.GrowDiff.read_json_update', 'GrowDiff.read_json_update', (['assump', 'topkey'], {}), '(assump, topkey)\n', (49025, 49041), False, 'from taxcalc.growdiff import GrowDiff\n'), ((63508, 63550), 'numpy.where', 'np.where', (['(item_taxes < std_taxes)', '(0.0)', 'std'], {}), '(item_taxes < std_taxes, 0.0, std)\n', (63516, 63550), True, 'import numpy as np\n'), ((63622, 63665), 'numpy.where', 'np.where', (['(item_taxes < std_taxes)', 'item', '(0.0)'], {}), '(item_taxes < std_taxes, item, 0.0)\n', (63630, 63665), True, 'import numpy as np\n'), ((63735, 63787), 'numpy.where', 'np.where', (['(item_taxes < std_taxes)', 'item_no_limit', '(0.0)'], {}), '(item_taxes < std_taxes, item_no_limit, 0.0)\n', (63743, 63787), True, 'import numpy as np\n'), ((63857, 63909), 'numpy.where', 'np.where', (['(item_taxes < std_taxes)', 'item_phaseout', '(0.0)'], {}), '(item_taxes < std_taxes, item_phaseout, 0.0)\n', (63865, 63909), True, 'import numpy as np\n'), ((4792, 4818), 'copy.deepcopy', 'copy.deepcopy', (['consumption'], {}), '(consumption)\n', (4805, 4818), False, 'import copy\n'), ((9101, 9122), 'numpy.column_stack', 'np.column_stack', (['arys'], {}), '(arys)\n', (9116, 9122), True, 'import numpy as np\n'), ((17539, 17582), 'numpy.multiply', 'np.multiply', (["dframe['s006']", "dframe['XTOT']"], {}), "(dframe['s006'], dframe['XTOT'])\n", (17550, 17582), True, 'import numpy as np\n'), ((56835, 56872), 'taxcalc.policy.Policy.years_in_revision', 'Policy.years_in_revision', (['policy_dict'], {}), '(policy_dict)\n', (56859, 56872), False, 'from taxcalc.policy import Policy\n'), ((57368, 57378), 'taxcalc.growdiff.GrowDiff', 'GrowDiff', ([], {}), '()\n', (57376, 57378), False, 'from taxcalc.growdiff import GrowDiff\n'), ((57744, 57754), 'taxcalc.growdiff.GrowDiff', 'GrowDiff', ([], {}), '()\n', (57752, 57754), False, 'from taxcalc.growdiff import GrowDiff\n'), ((64035, 64091), 'numpy.where', 'np.where', (['(item_taxes < std_taxes)', 'item_cvar[cvname]', '(0.0)'], {}), '(item_taxes < std_taxes, item_cvar[cvname], 0.0)\n', (64043, 64091), True, 'import numpy as np\n'), ((52562, 52597), 'numpy.allclose', 'np.allclose', (['upda_value', 'base_value'], {}), '(upda_value, base_value)\n', (52573, 52597), True, 'import numpy as np\n'), ((53679, 53740), 'paramtools.consistent_labels', 'paramtools.consistent_labels', (["[mdata_base[pname]['value'][0]]"], {}), "([mdata_base[pname]['value'][0]])\n", (53707, 53740), False, 'import paramtools\n')] |
# import start
import ast
import asyncio
import calendar
import platform
import subprocess as sp
import time
import traceback
import xml.etree.ElementTree as Et
from collections import defaultdict
from datetime import datetime
import math
import numpy as np
import pandas as pd
from Utility.CDPConfigValues import CDPConfigValues
from Utility.Utilities import Utilities
from Utility.WebConstants import WebConstants
from WebConnection.WebConnection import WebConnection
# import end
## Function to reverse a string
#def reverse(string):
# string = string[::-1]
# return string
class Preprocessor:
""" Preprocessor class is used for preparing the extracted data to be fed to the training algorithm
for further processing.
"""
def __init__(self, project, previous_preprocessed_df=None, preprocessed=None):
"""
:param timestamp_column: Contains the committer timestamp
:type timestamp_column: str
:param email_column: Contains the committer timestamp
:type email_column: str
:param project: project key to be processed
:type project: str
:param project_name: project name to be processed
:type project_name: str
:param web_constants: Constants load from file
:type web_constants: class WebConstants
:param base_timestamp: Instantiating committer timestamp
:type base_timestamp: str
:param developer_stats_df: creating dataframe variable for developer stats
:type developer_stats_df: pandas dataframe
:param developer_sub_module_stats_df: creating dataframe variable for developer sub module stats
:type developer_sub_module_stats_df: pandas dataframe
"""
self.timestamp_column = "COMMITTER_TIMESTAMP"
self.email_column = "COMMITTER_EMAIL"
self.project = project
self.project_name = CDPConfigValues.configFetcher.get('name', project)
self.web_constants = WebConstants(project)
self.base_timestamp = ""
self.developer_stats_df = ""
self.developer_sub_module_stats_df = ""
if preprocessed is None:
if previous_preprocessed_df is None:
self.file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
self.github_data_dump_df = pd.read_csv(
f"{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.commit_details_file_name}")
self.pre_processed_file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
CDPConfigValues.create_directory(self.pre_processed_file_path)
self.stats_dataframe = pd.DataFrame()
self.sub_module_list = list()
else:
self.file_path = f"{CDPConfigValues.schedule_file_path}/{self.project_name}"
self.github_data_dump_df = pd.DataFrame(previous_preprocessed_df)
self.github_data_dump_df = self.github_data_dump_df.apply(
lambda x: x.str.strip() if x.dtype == "object" else x)
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = self.github_data_dump_df["COMMITTER_TIMESTAMP"].apply(
lambda x: pd.Timestamp(x, tz="UTC"))
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = self.github_data_dump_df["COMMITTER_TIMESTAMP"].apply(
lambda x: pd.Timestamp(x))
self.github_data_dump_df['COMMITTER_TIMESTAMP'] = self.github_data_dump_df['COMMITTER_TIMESTAMP'].astype(
str)
self.github_data_dump_df['COMMITTER_TIMESTAMP'] = self.github_data_dump_df['COMMITTER_TIMESTAMP'].apply(
lambda x: x[:-6])
self.filter_data_frame(self.github_data_dump_df)
if self.github_data_dump_df.shape[0] != 0:
self.github_data_dump_df["COMMITTER_EMAIL"] = \
self.github_data_dump_df[["COMMITTER_EMAIL", "COMMITTER_NAME"]].apply(self.replace_blank_email, axis=1)
else:
self.github_data_dump_df = previous_preprocessed_df
@staticmethod
def replace_blank_email(row):
if row["COMMITTER_EMAIL"] is None or row["COMMITTER_EMAIL"] == "":
return str(row["COMMITTER_NAME"]).replace(" ", "") + "@noemail"
else:
return row["COMMITTER_EMAIL"]
def filter_data_frame(self, data_frame):
if self.project_name == "spring-boot":
data_frame = data_frame[data_frame["FILE_NAME"].str.endswith(".java")]
elif self.project_name == "opencv":
data_frame = data_frame[
(data_frame["FILE_NAME"].str.endswith(".hpp") |
data_frame["FILE_NAME"].str.endswith(".cpp") |
data_frame["FILE_NAME"].str.endswith(".h") |
data_frame["FILE_NAME"].str.endswith(".cc") |
data_frame["FILE_NAME"].str.endswith(".c") |
data_frame["FILE_NAME"].str.endswith(".py") |
data_frame["FILE_NAME"].str.endswith(".java") |
data_frame["FILE_NAME"].str.endswith(".cl")
)]
# data_frame["FILE_NAME"].str.endswith(".cs")
elif self.project_name == "corefx":
data_frame = data_frame[
(data_frame["FILE_NAME"].str.endswith(".cs") |
data_frame["FILE_NAME"].str.endswith(".h") |
data_frame["FILE_NAME"].str.endswith(".c") |
data_frame["FILE_NAME"].str.endswith(".vb"))]
self.github_data_dump_df = data_frame
def convert_month_day_date_hour_to_categorical(self, ):
"""
This method takes the month, day and hour and applies one hot encoding manually
"""
convert_date_to_categorical_start_time = time.time()
timestamp_column_in_df = self.github_data_dump_df['COMMITTER_TIMESTAMP']
dayList = list()
monthList = list()
dateList = list()
hourList = list()
mondayList = list()
tuesdayList = list()
wednesdayList = list()
thursdayList = list()
fridayList = list()
saturdayList = list()
sundayList = list()
for timestamp_value in timestamp_column_in_df:
new_date_format = datetime.strptime(timestamp_value, '%Y-%m-%d %H:%M:%S')
weekdayStr = calendar.day_name[new_date_format.weekday()]
dayList.append(weekdayStr)
if weekdayStr == 'Sunday':
sundayList.append('1')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Monday':
sundayList.append('0')
mondayList.append('1')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Tuesday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('1')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Wednesday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('1')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Thursday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('1')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Friday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('1')
saturdayList.append('0')
elif weekdayStr == 'Saturday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('1')
monthList.append(new_date_format.month)
dateList.append(new_date_format.day)
hourList.append(new_date_format.hour)
self.github_data_dump_df['DAY'] = dayList
self.github_data_dump_df['MONTH'] = monthList
self.github_data_dump_df['DATE'] = dateList
self.github_data_dump_df['HOUR'] = hourList
self.github_data_dump_df['SUNDAY'] = sundayList
self.github_data_dump_df['MONDAY'] = mondayList
self.github_data_dump_df['TUESDAY'] = tuesdayList
self.github_data_dump_df['WEDNESDAY'] = wednesdayList
self.github_data_dump_df['THURSDAY'] = thursdayList
self.github_data_dump_df['FRIDAY'] = fridayList
self.github_data_dump_df['SATURDAY'] = saturdayList
convert_date_to_categorical_end_time = time.time()
print(f"Time taken to convert datetime to Categorical is "
f"{convert_date_to_categorical_end_time - convert_date_to_categorical_start_time}")
@staticmethod
def file_status_to_cat(value):
"""
THelper method for replacing string to single character value
"""
if value == 'modified':
return 'M'
elif value == 'added':
return 'A'
elif value == 'renamed':
return 'R'
else:
return 'D'
def file_status_to_categorical(self, ):
"""
This method modifies the string value of the file status to categorical (single character)
"""
file_status_start_time = time.time()
self.github_data_dump_df['FILE_STATUS'] = self.github_data_dump_df['FILE_STATUS'].apply(self.file_status_to_cat)
file_status_end_time = time.time()
print(f"Time Taken to convert file status to categorical {file_status_end_time - file_status_start_time}")
def determine_commit_is_fix(self, closed_events_df=None):
"""
This method modifies the dataframe to label commits as isFix corresponding to commmits
:param closed_events_df: dataframe containing the closed events list
:type closed_events_df: pandas dataframe
"""
commit_isFix_start_time = time.time()
if closed_events_df is None:
closed_issue_df = pd.read_csv(
f"{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.closed_events_list_file_name}")
else:
closed_issue_df = closed_events_df
commits_closed_df = pd.DataFrame(
closed_issue_df.loc[closed_issue_df["commitid"] != ""]["commitid"].drop_duplicates())
commits_closed_df = commits_closed_df.dropna()
commits_closed_df.columns = ["COMMIT_ID"]
search_pattern = "|".join(commits_closed_df["COMMIT_ID"].to_list())
isFix = self.github_data_dump_df["COMMIT_ID"].str.contains(search_pattern)
self.github_data_dump_df["IsFix"] = isFix.replace((True, False), (1, 0))
commit_isFix_end_time = time.time()
print(f"Time Taken for determining for Commit is for Fix {commit_isFix_end_time - commit_isFix_start_time}")
def get_commit_type(self):
"""
This method based on the commit message containing merge text labels each record as a merge or non-merge
commit.
"""
commit_type_start_time = time.time()
search_pattern = "|".join(["Merge pull request"])
isFix = self.github_data_dump_df["COMMIT_MESSAGE"].str.contains(search_pattern)
self.github_data_dump_df["COMMIT_TYPE"] = isFix.replace((True, False), (1, 0))
commit_type_end_time = time.time()
print(f"Time Taken for getting commit type is {commit_type_end_time - commit_type_start_time}")
def get_file_size(self, ):
"""
The method extracts the file size using the github rest URL service for each commit and corresponding
files.
"""
file_age_start_time = time.time()
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMITTER_TIMESTAMP"],
ascending=[True])
commit_id_list = self.github_data_dump_df["COMMIT_ID"].drop_duplicates().to_list()
print(f"Total Content Urls to be requested {len(commit_id_list)}")
file_size_url_list = Utilities.format_url(self.web_constants.file_size_url, commit_id_list)
batch_size = int(CDPConfigValues.git_api_batch_size)
web_connection = WebConnection()
results = web_connection.get_async_file_size(file_size_url_list, self.github_data_dump_df, self.web_constants,
batch_size)
file_size = results[0]
failed_urls = results[1]
loop_counter = 1
while len(failed_urls) > 0 and loop_counter < 200:
loop_counter = loop_counter + 1
print(f"Sleeping for {60 * loop_counter} Seconds in get_file_size ...")
time.sleep(60 * loop_counter)
print(f"Total Failed URL's re-trying {len(failed_urls)}")
results = web_connection.get_async_file_size(failed_urls, self.github_data_dump_df, self.web_constants,
batch_size=batch_size)
failed_urls = results[1]
file_size = file_size + results[0]
file_size_df = pd.DataFrame(file_size, columns=["COMMIT_ID", "FILE_NAME", "FILE_SIZE"])
file_size_df = file_size_df.drop_duplicates()
file_size_df = file_size_df.sort_values(by=["COMMIT_ID"])
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMIT_ID"])
self.github_data_dump_df = pd.merge(self.github_data_dump_df, file_size_df, how="left",
left_on=["COMMIT_ID", "FILE_NAME"], right_on=["COMMIT_ID", "FILE_NAME"])
file_age_end_time = time.time()
print(f"Fetched all file sizes in {file_age_end_time - file_age_start_time}")
@staticmethod
async def calculate_commit_file_age_and_number_of_developer_mp(file_df, file_name):
"""
The method is a helper method which calculates the file age and number of developers for a single file
:param file_df: dataframe containing the file details
:type file_df: pandas dataframe
:param file_name: Name of the file
:type file_name: str
"""
number_of_developers, file_age = list(), list()
counter = 0
df_len = len(file_df)
result = defaultdict()
# file_age_normal = list()
while counter < df_len:
# Changed as part of review comment
# if counter == 0 or file_df["FILE_STATUS"].iloc[counter] == "A":
if counter == 0:
file_age.append(0)
# file_age_normal.append(0)
elif counter > 0:
# file_age_normal.append(
# (file_df["COMMITTER_TIMESTAMP"].iloc[counter] - file_df["COMMITTER_TIMESTAMP"].iloc[
# counter - 1]))
age = (file_df["COMMITTER_TIMESTAMP"].iloc[counter] - file_df["COMMITTER_TIMESTAMP"].iloc[
counter - 1]).days * 24 * 3600 + \
(file_df["COMMITTER_TIMESTAMP"].iloc[counter] - file_df["COMMITTER_TIMESTAMP"].iloc[
counter - 1]).seconds
file_age.append(age)
current_timestamp = file_df["COMMITTER_TIMESTAMP"].iloc[counter]
# if file_df["FILE_STATUS"].iloc[counter] == "A":
# Changed as part of review comment
if counter == 0:
number_of_developers.append(1)
else:
number_of_developers.append(
len(set(file_df.loc[file_df["COMMITTER_TIMESTAMP"] <= current_timestamp]["COMMITTER_NAME"])))
counter = counter + 1
await asyncio.sleep(0)
result[file_name] = (file_age, number_of_developers)
return result
async def execute_calculate_commit_file_age_and_number_of_developer_mp(self, batch):
result = await asyncio.gather(
*[self.calculate_commit_file_age_and_number_of_developer_mp(
self.github_data_dump_df.loc[self.github_data_dump_df["FILE_NAME"] == file][
["COMMIT_ID", "COMMITTER_NAME", "FILE_STATUS", "COMMITTER_TIMESTAMP"]], file) for file in batch]
)
return result
def get_commit_file_age_and_number_of_developer_mp(self, ):
"""
The method calculates the file age which is difference of current change and the last change
for that file. The other output is the number of developers who have worked on that file.
"""
commit_age_no_of_dev_start_time = time.time()
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = pd.to_datetime(
self.github_data_dump_df["COMMITTER_TIMESTAMP"])
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["FILE_NAME", "COMMITTER_TIMESTAMP"],
ascending=[True, True])
file_names = self.github_data_dump_df["FILE_NAME"]
file_names = file_names.drop_duplicates().to_list()
commit_file_age, number_of_developers, failed_batches = list(), list(), list()
results = defaultdict()
batch_size = 100
file_batches = list(Utilities.create_batches(file_names, batch_size=batch_size))
print(f"For Getting Commit File Age and Numbre of Developers, Batch size {batch_size}")
total_batches = len(file_batches)
batch_counter, percent = 0, 0
print(f"Total Batches to be executed for getting commit file age and number of developer is {total_batches}")
for batch in file_batches:
try:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(asyncio.new_event_loop())
if (total_batches * percent) // 100 == batch_counter:
print(
f"Total Batches completed is {batch_counter} and Failed batches Count is {len(failed_batches)}")
percent = percent + 10
results_list = loop.run_until_complete(
self.execute_calculate_commit_file_age_and_number_of_developer_mp(batch))
for result in results_list:
for result_key in result.keys():
results[result_key] = result[result_key]
except Exception as e:
print(f"Exception Occurred!!!\n{traceback.print_tb(e.__traceback__)}")
for file_name in batch:
failed_batches.append(file_name)
batch_counter = batch_counter + 1
"""Retrieving the result of the dictionary on sorted order of the keys (author is the result_key)"""
for result_key in sorted(results.keys()):
commit_file_age = commit_file_age + results[result_key][0]
number_of_developers = number_of_developers + results[result_key][1]
self.github_data_dump_df["FILE_AGE"] = commit_file_age
self.github_data_dump_df["NO_OF_DEV"] = number_of_developers
commit_age_no_of_dev_end_time = time.time()
print(f"Time Taken FILE_AGE and NO_OF_DEV {commit_age_no_of_dev_end_time - commit_age_no_of_dev_start_time}")
async def calculate_developer_experience(self, file_df, author_name):
"""
Helper method for developer experience
"""
file_df["Year"] = (pd.to_datetime(self.base_timestamp) - pd.to_datetime(file_df["COMMITTER_TIMESTAMP"])) / (
np.timedelta64(1, 'D') * 365)
file_df["Year"] = file_df["Year"].apply(lambda x: math.ceil(x) + 1)
unique_file_df = file_df
unique_file_df = unique_file_df.drop_duplicates()
exp = list()
dev_exp = defaultdict()
counter = 0
while counter < (len(unique_file_df)):
current_timestamp = unique_file_df["COMMITTER_TIMESTAMP"].iloc[counter]
commit_id = unique_file_df["COMMIT_ID"].iloc[counter]
# if counter == 0:
# exp.append((commit_id, current_timestamp, 0))
# else:
# year_count = unique_file_df.loc[unique_file_df["COMMITTER_TIMESTAMP"] < current_timestamp][
# "Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
# year_count["Exp"] = year_count["Counts"] / (year_count["Year"])
#
# exp.append((commit_id, current_timestamp, year_count["Exp"].sum()))
# year_count = unique_file_df.iloc[counter]
# Changed as part of review comment
year_count = unique_file_df.iloc[0:counter + 1][
"Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
# year_count = unique_file_df.loc[unique_file_df["COMMITTER_TIMESTAMP"] <= current_timestamp][
# "Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
year_count["Exp"] = year_count["Counts"] / (year_count["Year"])
exp.append((commit_id, current_timestamp, year_count["Exp"].sum()))
counter = counter + 1
exp_df = pd.DataFrame(exp, columns=["COMMIT_ID", "COMMITTER_TIMESTAMP", "EXP"])
file_df = pd.merge(file_df, exp_df, how="left", left_on=["COMMIT_ID", "COMMITTER_TIMESTAMP"],
right_on=["COMMIT_ID", "COMMITTER_TIMESTAMP"])
await asyncio.sleep(0)
dev_exp[author_name] = file_df["EXP"].to_list()
return dev_exp
async def execute_calculate_developer_experience(self, batch):
"""
Helper method for developer experience
"""
result = await asyncio.gather(
*[self.calculate_developer_experience(
self.github_data_dump_df.loc[self.github_data_dump_df["COMMITTER_NAME"] == author_name][
["COMMIT_ID", "COMMITTER_TIMESTAMP"]], author_name) for author_name in batch]
)
return result
@staticmethod
async def calculate_developer_experience_from_calender_year(file_df, author_name):
"""
Helper method for developer experience
"""
file_df["Year"] = pd.DatetimeIndex(file_df['COMMITTER_TIMESTAMP']).year
unique_file_df = file_df
unique_file_df = unique_file_df.drop_duplicates()
exp = list()
dev_exp = defaultdict()
counter = 0
while counter < (len(unique_file_df)):
current_timestamp = unique_file_df["COMMITTER_TIMESTAMP"].iloc[counter]
commit_id = unique_file_df["COMMIT_ID"].iloc[counter]
if counter == 0:
exp.append((commit_id, current_timestamp, 0))
else:
year_count = unique_file_df.loc[unique_file_df["COMMITTER_TIMESTAMP"] < current_timestamp][
"Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
year_count["Exp"] = year_count["Counts"] / ((datetime.today().year - year_count["Year"]) + 1)
exp.append((commit_id, current_timestamp, year_count["Exp"].sum()))
counter = counter + 1
exp_df = pd.DataFrame(exp, columns=["COMMIT_ID", "COMMITTER_TIMESTAMP", "EXP"])
file_df = pd.merge(file_df, exp_df, how="left", left_on=["COMMIT_ID", "COMMITTER_TIMESTAMP"],
right_on=["COMMIT_ID", "COMMITTER_TIMESTAMP"])
await asyncio.sleep(0)
dev_exp[author_name] = file_df["EXP"].to_list()
return dev_exp
async def execute_calculate_developer_experience_from_calender_year(self, batch):
"""
Helper method for developer experience
"""
result = await asyncio.gather(
*[self.calculate_developer_experience_from_calender_year(
self.github_data_dump_df.loc[self.github_data_dump_df["COMMITTER_NAME"] == author_name][
["COMMIT_ID", "COMMITTER_TIMESTAMP"]], author_name) for author_name in batch]
)
return result
def get_developer_experience_using_mp(self, execution_flag):
"""
The method calculates developer recent experience based on either 365 days or calendar based calculation.
It gives higher weight to the experience for first 365 days and so on. Previous years have lower weightage
:param execution_flag: flag to check calculation for 365 or calendar based
:type execution_flag: bool
"""
dev_exp_start_time = time.time()
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMITTER_TIMESTAMP"],
ascending=[True])
self.base_timestamp = self.github_data_dump_df["COMMITTER_TIMESTAMP"].drop_duplicates().to_list()[-1]
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMITTER_NAME", "COMMITTER_TIMESTAMP"],
ascending=[True, True])
failed_batches, developer_exp = list(), list()
author_exp = defaultdict()
author_names = self.github_data_dump_df["COMMITTER_NAME"]
author_names = author_names.drop_duplicates().to_list()
batch_size = int(CDPConfigValues.configFetcher.get('author_stat_batch_size', self.project))
author_batches = list(Utilities.create_batches(author_names, batch_size=batch_size))
print(f"Developer Experience Batch size {batch_size}")
total_batches = len(author_batches)
batch_counter, percent = 0, 0
print(f"Total Batches to be executed for getting Developer Experience is {total_batches}")
for batch in author_batches:
try:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(asyncio.new_event_loop())
if (total_batches * percent) // 100 == batch_counter:
print(
f"Total Batches completed is {batch_counter} and Failed batches Count is {len(failed_batches)}")
percent = percent + 10
if execution_flag:
results_list = loop.run_until_complete(
self.execute_calculate_developer_experience_from_calender_year(batch))
else:
results_list = loop.run_until_complete(self.execute_calculate_developer_experience(batch))
for result in results_list:
for author_key in result.keys():
author_exp[author_key] = result[author_key]
except Exception as e:
print(f"Exception Occurred!!!\n{traceback.print_tb(e.__traceback__)}")
for file_name in batch:
failed_batches.append(file_name)
batch_counter = batch_counter + 1
"""Retrieving the result of the dictionary on sorted order of the keys (author is the author_key)"""
for author_key in sorted(author_exp.keys()):
developer_exp = developer_exp + author_exp[author_key]
if execution_flag:
self.github_data_dump_df["DEV_REXP_CALENDER_YEAR_WISE"] = developer_exp
else:
self.github_data_dump_df["DEV_REXP_365_DAYS_WISE"] = developer_exp
dev_exp_end_time = time.time()
print(f"Time Taken For Dev RExperience {dev_exp_end_time - dev_exp_start_time}")
def parse_xml(self, commit_id, file):
"""
The method parses the output of the git show command
:param commit_id:
:type commit_id: str
:param file: file
:type file: str
"""
command = f"git show {commit_id}:{file}"
query = sp.Popen(command, cwd=f"{CDPConfigValues.local_git_repo}/{self.project_name}", stdout=sp.PIPE,
stderr=sp.PIPE, shell=True)
(stdout, sdterr) = query.communicate()
xml_string = stdout.decode("utf-8", errors='replace')
tree = Et.fromstring(xml_string)
submodule = []
for module in tree.iter():
if str(module.tag).__contains__("module") and not str(module.tag).__contains__("modules"):
if module.text != "":
if str(module.text).__contains__("/"):
submodule.append(str(module.text).split("/")[1])
else:
submodule.append(module.text)
# await asyncio.sleep(1)
return submodule
def get_submodule_list(self, commit_id):
"""
THe method based on the commitid retrieves the sub modules impacted for each of the commit
:param commit_id:
:type commit_id: str
"""
if platform.system().capitalize() == "Linux":
command = f"git ls-tree --full-tree -r {commit_id} | grep pom.xml"
else:
command = f"git ls-tree --full-tree -r {commit_id} | findstr pom.xml"
query = sp.Popen(command, cwd=f"{CDPConfigValues.local_git_repo}/{self.project_name}", stdout=sp.PIPE,
stderr=sp.PIPE, shell=True)
(stdout, sdterr) = query.communicate()
file_list = stdout.decode("utf-8", errors='replace').split("\n")
sub_module_list = []
for file in file_list:
if file != "":
file_name = file.split('\t')[1]
if file_name != "":
module_list = self.parse_xml(commit_id, file_name)
if len(module_list) > 0:
sub_module_list = sub_module_list + module_list
sub_module_list = set(sub_module_list)
return sub_module_list
async def execute_submodule_list(self, batch):
"""
Hepler method
:param batch: commit id batch
:type batch: list
"""
result = await asyncio.gather(
*[self.get_submodule_list(commit_id) for commit_id in batch]
)
return result
async def get_modules(self, commit_id, file):
"""
Hepler method
:param commit_id: commit id
:type commit_id: str
:param file: file complete name
:type file: str
"""
sub_modules = file.split("/")
sub_modules = sub_modules[:-1]
sub_modules = sub_modules[::-1]
for sub_module in sub_modules:
if sub_module in self.sub_module_list:
result = (commit_id, file, sub_module)
return result
if str(file).__contains__('src'):
if str(file).startswith('/src'):
if str(file.split('/src')[1]).__contains__('src'):
sub_module = file.split('/src')[1].split('/src')[0].split("/")[-1]
result = (commit_id, file, sub_module)
return result
else:
sub_module = file.split('/src')[0].split("/")[-1]
result = (commit_id, file, sub_module)
return result
result = (commit_id, file, "")
return result
async def get_sub_modules_and_stats(self, commit_id, file_list):
"""
Hepler method
:param commit_id: commit id
:type commit_id: str
:param file_list: list of file impacted
:type file_list: list
"""
module_list = []
result_list = []
for file in file_list:
result = await self.get_modules(commit_id, file)
result_list.append(result)
if result[2] != "":
module_list.append(result)
# module_list = set(module_list)
result_data_frame = pd.DataFrame(result_list, columns=["COMMIT_ID", "FILE_NAME", "SUB_MODULE"])
# Changed as part of review comment
# result_data_frame["NS"] = len(set(module_list))
result_data_frame["NS"] = len(set(result_data_frame["SUB_MODULE"].to_list()))
return result_data_frame
async def execute_sub_module_stat(self, batch):
result = await asyncio.gather(
*[self.get_sub_modules_and_stats(commit_id,
self.github_data_dump_df.loc[
self.github_data_dump_df["COMMIT_ID"] == commit_id][
"FILE_NAME"].drop_duplicates().to_list()) for commit_id in batch]
)
return result
def get_sub_module_stats(self, ):
"""
The method complies the sub module statistics for each of the commit. For each of the commit, how many
sub modules are impacted and is appended as a new column
"""
sub_module_stat_start_time = time.time()
commit_id_list = self.github_data_dump_df["COMMIT_ID"].drop_duplicates().to_list()
commit_id_batches = list(Utilities.create_batches(commit_id_list, batch_size=20))
total_batches = len(commit_id_batches)
batch_counter, percent = 0, 0
sub_module_list, failed_batches = list(), list()
sub_module_stats_df = pd.DataFrame()
if ast.literal_eval(CDPConfigValues.configFetcher.get('isPOMXmlExists', self.project)):
self.sub_module_list = list(self.get_submodule_list(commit_id_list[-1]))
else:
self.sub_module_list = []
print(f"Total Batches to be executed for getting sub module count commit wise is {total_batches}")
for batch in commit_id_batches:
try:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(asyncio.new_event_loop())
if (total_batches * percent) // 100 == batch_counter:
print(
f"Total Batches completed is {batch_counter} and Failed batches Count is {len(failed_batches)}")
percent = percent + 10
results_list = loop.run_until_complete(self.execute_sub_module_stat(batch))
for result in results_list:
sub_module_stats_df = pd.concat([sub_module_stats_df, result], ignore_index=True)
except Exception as e:
print(f"Exception Occurred!!!\n{traceback.print_tb(e.__traceback__)}")
for commit_id in batch:
failed_batches.append(commit_id)
batch_counter = batch_counter + 1
# sub_module_stats_df.to_csv(f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}/SUB_MODULE_FILE.csv",
# index=False)
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMIT_ID", "FILE_NAME"],
ascending=[True, True])
sub_module_stats_df = sub_module_stats_df.sort_values(by=["COMMIT_ID", "FILE_NAME"], ascending=[True, True])
self.github_data_dump_df = pd.merge(self.github_data_dump_df, sub_module_stats_df, how="left",
left_on=["COMMIT_ID", "FILE_NAME"],
right_on=["COMMIT_ID", "FILE_NAME"])
# self.github_data_dump_df["NS"] = self.github_data_dump_df["NS"].apply(lambda x: 0 if x is None else x)
sub_module_stat_end_time = time.time()
print(f"Time Taken For Sub Module Stats Calculation {sub_module_stat_end_time - sub_module_stat_start_time}")
@staticmethod
async def developer_stats(data_frame, email, timestamp_column, email_column):
"""
Helper method
"""
result_list = []
data_frame = data_frame.drop_duplicates()
data_frame = data_frame.sort_values(by=[timestamp_column], ascending=[True])
dev_data_frame = data_frame[["COMMIT_ID", timestamp_column]].drop_duplicates()
if len(data_frame) == 0:
print("Empty Data Frame")
count = 1
for index, row in dev_data_frame.iterrows():
result = (row["COMMIT_ID"], email, row[timestamp_column], count)
result_list.append(result)
count = count + 1
dev_stats_df = pd.DataFrame(result_list, columns=["COMMIT_ID", email_column, timestamp_column, "DEV_STATS"])
sub_module_list = data_frame["SUB_MODULE"].drop_duplicates().to_list()
result_list = []
for sub_module in sub_module_list:
if sub_module != "":
count = 0
sub_module_df = data_frame.loc[data_frame["SUB_MODULE"] == sub_module][["COMMIT_ID", timestamp_column]]
sub_module_df = sub_module_df.sort_values(by=[timestamp_column], ascending=[True])
for index, row in sub_module_df.iterrows():
result = (row["COMMIT_ID"], email, row[timestamp_column], sub_module, count)
result_list.append(result)
count = count + 1
sub_module_stats_df = pd.DataFrame(result_list,
columns=["COMMIT_ID", email_column, timestamp_column, "SUB_MODULE",
"SUB_MODULE_STATS"])
result = (dev_stats_df, sub_module_stats_df)
return result
async def get_developer_stats_async(self, batch, timestamp_column, email_column):
"""
Helper method
"""
result = await asyncio.gather(
*[self.developer_stats(self.github_data_dump_df.loc[self.github_data_dump_df[email_column] == email]
[["COMMIT_ID", timestamp_column, "SUB_MODULE"]], email, timestamp_column,
email_column)
for email in batch]
)
return result
def get_developer_stats(self, file_name=None):
"""
The method complies the developer experience since inception of the projects commit
history.
:param file_name: name of the file
:type file_name: str
"""
if file_name is not None:
self.github_data_dump_df = pd.read_csv(
f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{file_name}")
self.github_data_dump_df["COMMITTER_EMAIL"] = \
self.github_data_dump_df[["COMMITTER_EMAIL", "COMMITTER_NAME"]].apply(self.replace_blank_email, axis=1)
developer_stat_start_time = time.time()
failed_batches = list()
email_list = self.github_data_dump_df[self.email_column].drop_duplicates().to_list()
batch_size= 10
batches = list(Utilities.create_batches(email_list, batch_size=batch_size))
print(f"Developer Stats calculation Batch size {batch_size}")
total_batches = len(batches)
batch_counter, percent = 0, 0
print(f"Total Batches to be executed for getting developer stats is {total_batches}")
self.developer_stats_df = pd.DataFrame()
self.developer_sub_module_stats_df = pd.DataFrame()
for batch in batches:
try:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(asyncio.new_event_loop())
if (total_batches * percent) // 100 == batch_counter:
print(
f"Total Batches completed is {batch_counter} and Failed batches Count is {len(failed_batches)}")
percent = percent + 10
results_list = loop.run_until_complete(
self.get_developer_stats_async(batch, self.timestamp_column, self.email_column))
for result in results_list:
self.developer_stats_df = pd.concat([self.developer_stats_df, result[0]], ignore_index=True)
self.developer_sub_module_stats_df = pd.concat([self.developer_sub_module_stats_df, result[1]],
ignore_index=True)
except Exception as e:
print(f"Exception Occurred!!!\n{traceback.print_tb(e.__traceback__)}")
for file_name in batch:
failed_batches.append(file_name)
batch_counter = batch_counter + 1
# self.developer_stats_df.to_csv(
# f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{stats_column_name}.csv", index=False)
self.developer_stats_df = self.developer_stats_df.sort_values(by=[self.email_column, self.timestamp_column],
ascending=[True, True])
self.developer_stats_df = self.developer_stats_df.rename_axis(None)
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=[self.email_column, self.timestamp_column],
ascending=[True, True])
self.github_data_dump_df = pd.merge(self.github_data_dump_df, self.developer_stats_df, how="left",
left_on=["COMMIT_ID", self.email_column, self.timestamp_column],
right_on=["COMMIT_ID", self.email_column, self.timestamp_column])
self.developer_sub_module_stats_df = self.developer_sub_module_stats_df.sort_values(
by=[self.email_column, self.timestamp_column],
ascending=[True, True])
self.developer_sub_module_stats_df = self.developer_sub_module_stats_df.rename_axis(None)
self.github_data_dump_df = pd.merge(self.github_data_dump_df, self.developer_sub_module_stats_df, how="left",
left_on=["COMMIT_ID", self.email_column, self.timestamp_column,
"SUB_MODULE"],
right_on=["COMMIT_ID", self.email_column, self.timestamp_column,
"SUB_MODULE"])
self.github_data_dump_df["SUB_MODULE_STATS"] = self.github_data_dump_df["SUB_MODULE_STATS"].apply(
lambda x: 0 if x is None else x)
self.github_data_dump_df["SUB_MODULE_STATS"] = self.github_data_dump_df["SUB_MODULE_STATS"].apply(
lambda x: 0 if x == "" else x)
self.github_data_dump_df["SUB_MODULE_STATS"] = self.github_data_dump_df["SUB_MODULE_STATS"].fillna(0)
developer_stat_end_time = time.time()
print(f"Time Taken For Author Stat Calculation {developer_stat_end_time - developer_stat_start_time}")
def get_no_of_files_count(self):
"""
The method complies the total number of files changed/added for each of the commit
"""
# self.github_data_dump_df = pd.read_csv(f"{self.file_path}/old_features_preprocessed_step9.csv")
self.drop_data_frame_column("NF")
commit_list = self.github_data_dump_df["COMMIT_ID"].drop_duplicates().to_list()
result = []
for commit_id in commit_list:
result.append((commit_id, len(set(
self.github_data_dump_df[self.github_data_dump_df["COMMIT_ID"] == commit_id]["FILE_NAME"].to_list()))))
result_df = pd.DataFrame(result, columns=["COMMIT_ID", "NF"])
compare_df = pd.DataFrame(self.github_data_dump_df[["COMMIT_ID", "FILE_NAME"]])
compare_df = compare_df["COMMIT_ID"].value_counts().reset_index()
# compare_df.columns = ["COMMIT_ID", "FILE_NAME", "NF"]
#
# if result_df.equals(compare_df):
# print("Two Data Frame are same!!!")
self.github_data_dump_df = pd.merge(self.github_data_dump_df, result_df, how="left", left_on=["COMMIT_ID"],
right_on=["COMMIT_ID"])
def get_no_of_directories_count(self):
"""
The method compiles the total number of unique directories changed/added for each of the commit.
"""
self.drop_data_frame_column("ND")
commit_list = self.github_data_dump_df["COMMIT_ID"].drop_duplicates().to_list()
result = []
for commit_id in commit_list:
result.append((commit_id, len(set(
self.github_data_dump_df[self.github_data_dump_df["COMMIT_ID"] == commit_id][
"FILE_PARENT"].to_list()))))
result_df = pd.DataFrame(result, columns=["COMMIT_ID", "ND"])
self.github_data_dump_df = pd.merge(self.github_data_dump_df, result_df, how="left", left_on=["COMMIT_ID"],
right_on=["COMMIT_ID"])
def update_file_name_directory(self):
"""
Helper method
"""
fileNameList = list()
parentFileList = list()
timeFileModifiedList = list()
update_file_start_time = time.time()
filename_directory_column = self.github_data_dump_df['FILE_NAME']
fileModifiedCountSeries = filename_directory_column.value_counts()
fileModifiedCountDict = fileModifiedCountSeries.to_dict()
for value in filename_directory_column:
noTimesFileModified = fileModifiedCountDict[value]
timeFileModifiedList.append(noTimesFileModified)
fileName = value.split('/')[-1]
parentName = value.split(fileName)[0]
if parentName is None or parentName == "":
parentName = "/"
fileNameList.append(fileName)
parentFileList.append(parentName)
self.github_data_dump_df['FILE_PARENT'] = parentFileList
self.github_data_dump_df['FILE_NAME'] = fileNameList
self.github_data_dump_df['TIMES_FILE_MODIFIED'] = timeFileModifiedList
update_file_end_time = time.time()
print(f"Time Taken to execute update_file_name_directory {update_file_end_time - update_file_start_time}")
def calculate_file_changes(self):
"""
The method adds a new feature that calculates the file changes based on number of lines of code added,
modified or deleted over the number of changes spread across the file (entropy)
"""
calculate_file_changes_start_time = time.time()
self.github_data_dump_df["FileChanges"] = (self.github_data_dump_df["LINES_ADDED"] + self.github_data_dump_df["LINES_MODIFIED"] + self.github_data_dump_df["LINES_DELETED"])/(1 + self.github_data_dump_df["FILES_ENTROPY"])
calculate_file_changes_end_time = time.time()
print(f"Time Taken for calculate file changes is {calculate_file_changes_end_time - calculate_file_changes_start_time}")
# self.github_data_dump_df = self.github_data_dump_df.drop(columns=['LINES_ADDED', 'LINES_MODIFIED', 'FILES_ENTROPY'])
def drop_data_frame_column(self, column_name):
"""
The method drops unused colums from the dataframes
:param column_name: name of the column to be dropped
:type column_name: str
"""
try:
self.github_data_dump_df = self.github_data_dump_df.drop(column_name, axis=1)
except Exception as e:
pass
print(e)
def drop_unnecessary_columns(self):
"""
The method drops unused colums from the dataframes
"""
self.drop_data_frame_column('COMMIT_MESSAGE')
self.drop_data_frame_column('AUTHOR_EMAIL')
self.drop_data_frame_column('COMMITTER_EMAIL')
self.drop_data_frame_column('AUTHOR_NAME')
self.drop_data_frame_column('AUTHOR_TIMESTAMP')
self.drop_data_frame_column('SUB_MODULE')
self.drop_data_frame_column('DAY')
self.drop_data_frame_column('LINES_ADDED')
self.drop_data_frame_column('LINES_MODIFIED')
self.drop_data_frame_column('FILES_ENTROPY')
self.drop_data_frame_column('LINES_DELETED')
def rename_columns(self, old_name, new_name):
"""
The method renames columns to new newname
:param old_name: old column name
:type old_name: str
:param new_name: new column name
:type new_name: str
"""
self.github_data_dump_df.columns = [new_name if x == old_name else x for x in self.github_data_dump_df.columns]
def rename(self):
"""
The method renames columns to new newname
"""
# self.github_data_dump_df = pd.read_csv(f"{self.file_path}/old_features_preprocessed_step12.csv")
self.rename_columns("COMMITTER_NAME", "AUTHOR_NAME")
self.rename_columns("COMMITTER_TIMESTAMP", "TIMESTAMP")
self.rename_columns("FILE_URL", "CONTENTS_URL")
def merge_preprocessed_files(self, file, column_to_create_in_main_file):
"""
The method merges the columns to create the final preprocessed dataframe.
:param file: file to be merged
:type file: str
:param column_to_create_in_main_file: column to be added
:type column_to_create_in_main_file: str
"""
file_df = pd.read_csv(file)
file_df = file_df.sort_values(by=["COMMIT_ID"], ascending=[True])
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMIT_ID"], ascending=[True])
self.github_data_dump_df[column_to_create_in_main_file] = file_df[column_to_create_in_main_file]
def save_preprocessed_file(self, file_name):
"""
The method writes the dataframe as csv
:param file_name: file path
:type file_name: str
"""
self.github_data_dump_df.to_csv(f"{self.pre_processed_file_path}/{file_name}.csv", encoding='utf-8-sig',
index=False)
def save_preprocessed_file_as_csv_xlsx(self, file_name):
"""
The method writes the dataframe as excel and modified the timestamo format
:param file_name: file path
:type file_name: str
"""
try:
self.github_data_dump_df['TIMESTAMP'] = pd.to_datetime(self.github_data_dump_df['TIMESTAMP'],
format='%Y-%m-%dT%H:%M:%S')
writer = pd.ExcelWriter(f"{self.pre_processed_file_path}/{file_name}.xlsx", engine='xlsxwriter',
datetime_format='%Y-%m-%dT%H:%M:%S', options={'strings_to_urls': False})
self.github_data_dump_df.to_excel(writer, sheet_name='Sheet 1', index=False)
writer.save()
pd.read_excel(f"{self.pre_processed_file_path}/{file_name}.xlsx").to_csv(
f"{self.file_path}/{file_name}.csv",
encoding='utf-8-sig', index=False)
except Exception as e:
print(f"Exception {e}")
def save_schedule_file(self):
"""
The method creates directory with current date for each day's data collected and writes it to the
"""
current_date = datetime.today().strftime('%Y-%m-%d')
CDPConfigValues.create_directory(f"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}")
self.github_data_dump_df.to_csv(f"{CDPConfigValues.schedule_file_path}/{self.project_name}/"
f"{current_date}/{CDPConfigValues.final_feature_file}", encoding='utf-8-sig',
index=False)
def update_fill_na(self, file_name):
"""
The method fills the blank values for SUB_MODULE_STATS to 0
"""
self.github_data_dump_df = pd.read_csv(
f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{file_name}")
self.github_data_dump_df["SUB_MODULE_STATS"] = self.github_data_dump_df["SUB_MODULE_STATS"].fillna(0)
@staticmethod
def drop_additional_columns(file_df, column_name):
try:
file_df = file_df.drop(column_name, axis=1)
except Exception as e:
pass
print(e)
return file_df
def file_pre_process(project):
preprocessor = Preprocessor(project)
print("converting month day date hour to categorical")
preprocessor.convert_month_day_date_hour_to_categorical()
preprocessor.save_preprocessed_file("old_features_preprocessed_step0")
print("converting file status to categorical")
preprocessor.file_status_to_categorical()
preprocessor.save_preprocessed_file("old_features_preprocessed_step1")
print("determining commits as fix")
preprocessor.determine_commit_is_fix()
preprocessor.save_preprocessed_file("old_features_preprocessed_step2")
print("Getting file size of a commit")
preprocessor.get_file_size()
preprocessor.save_preprocessed_file("old_features_preprocessed_step3")
print("Getting file Commit age and Number of Developer")
preprocessor.get_commit_file_age_and_number_of_developer_mp()
preprocessor.save_preprocessed_file("old_features_preprocessed_step4")
print("Getting Developer Experience")
preprocessor.get_developer_experience_using_mp(True)
preprocessor.save_preprocessed_file("old_features_preprocessed_step5")
preprocessor.get_developer_experience_using_mp(False)
preprocessor.save_preprocessed_file("old_features_preprocessed_step6")
print("Getting Sub Module Stats")
preprocessor.get_sub_module_stats()
preprocessor.save_preprocessed_file("old_features_preprocessed_step7")
print("Getting Developer Stats")
preprocessor.get_developer_stats()
preprocessor.save_preprocessed_file("old_features_preprocessed_step8")
preprocessor.save_schedule_file()
print("Dropping Unnecessary columns")
preprocessor.drop_unnecessary_columns()
preprocessor.save_preprocessed_file("old_features_preprocessed_step9")
print("getting Unique Files per commit")
preprocessor.get_no_of_files_count()
preprocessor.save_preprocessed_file("old_features_preprocessed_step10")
print("Separating File name and Directory")
preprocessor.update_file_name_directory()
preprocessor.save_preprocessed_file("old_features_preprocessed_step11")
print("Getting Unique directories per commit..")
preprocessor.get_no_of_directories_count()
preprocessor.save_preprocessed_file("old_features_preprocessed_step12")
preprocessor.rename()
preprocessor.save_preprocessed_file("old_features_preprocessed_step13")
print("Creating Final preprocessed cdp file")
preprocessor.update_fill_na("old_features_preprocessed_step13.csv")
preprocessor.save_preprocessed_file("old_features_preprocessed_step14")
# Test Method for validation
if __name__ == "__main__":
for key in sorted(CDPConfigValues.cdp_projects.keys()):
start_time = time.time()
file_pre_process(key)
end_time = time.time()
print(f"Time Taken to complete Pre-processing {end_time - start_time}")
| [
"pandas.read_csv",
"WebConnection.WebConnection.WebConnection",
"pandas.DatetimeIndex",
"collections.defaultdict",
"Utility.Utilities.Utilities.format_url",
"pandas.DataFrame",
"Utility.Utilities.Utilities.create_batches",
"traceback.print_tb",
"pandas.merge",
"Utility.WebConstants.WebConstants",
... | [((1966, 2016), 'Utility.CDPConfigValues.CDPConfigValues.configFetcher.get', 'CDPConfigValues.configFetcher.get', (['"""name"""', 'project'], {}), "('name', project)\n", (1999, 2016), False, 'from Utility.CDPConfigValues import CDPConfigValues\n'), ((2046, 2067), 'Utility.WebConstants.WebConstants', 'WebConstants', (['project'], {}), '(project)\n', (2058, 2067), False, 'from Utility.WebConstants import WebConstants\n'), ((5881, 5892), 'time.time', 'time.time', ([], {}), '()\n', (5890, 5892), False, 'import time\n'), ((9612, 9623), 'time.time', 'time.time', ([], {}), '()\n', (9621, 9623), False, 'import time\n'), ((10349, 10360), 'time.time', 'time.time', ([], {}), '()\n', (10358, 10360), False, 'import time\n'), ((10513, 10524), 'time.time', 'time.time', ([], {}), '()\n', (10522, 10524), False, 'import time\n'), ((11021, 11032), 'time.time', 'time.time', ([], {}), '()\n', (11030, 11032), False, 'import time\n'), ((11813, 11824), 'time.time', 'time.time', ([], {}), '()\n', (11822, 11824), False, 'import time\n'), ((12168, 12179), 'time.time', 'time.time', ([], {}), '()\n', (12177, 12179), False, 'import time\n'), ((12444, 12455), 'time.time', 'time.time', ([], {}), '()\n', (12453, 12455), False, 'import time\n'), ((12781, 12792), 'time.time', 'time.time', ([], {}), '()\n', (12790, 12792), False, 'import time\n'), ((13181, 13251), 'Utility.Utilities.Utilities.format_url', 'Utilities.format_url', (['self.web_constants.file_size_url', 'commit_id_list'], {}), '(self.web_constants.file_size_url, commit_id_list)\n', (13201, 13251), False, 'from Utility.Utilities import Utilities\n'), ((13338, 13353), 'WebConnection.WebConnection.WebConnection', 'WebConnection', ([], {}), '()\n', (13351, 13353), False, 'from WebConnection.WebConnection import WebConnection\n'), ((14231, 14303), 'pandas.DataFrame', 'pd.DataFrame', (['file_size'], {'columns': "['COMMIT_ID', 'FILE_NAME', 'FILE_SIZE']"}), "(file_size, columns=['COMMIT_ID', 'FILE_NAME', 'FILE_SIZE'])\n", (14243, 14303), True, 'import pandas as pd\n'), ((14551, 14689), 'pandas.merge', 'pd.merge', (['self.github_data_dump_df', 'file_size_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID', 'FILE_NAME']", 'right_on': "['COMMIT_ID', 'FILE_NAME']"}), "(self.github_data_dump_df, file_size_df, how='left', left_on=[\n 'COMMIT_ID', 'FILE_NAME'], right_on=['COMMIT_ID', 'FILE_NAME'])\n", (14559, 14689), True, 'import pandas as pd\n'), ((14757, 14768), 'time.time', 'time.time', ([], {}), '()\n', (14766, 14768), False, 'import time\n'), ((15441, 15454), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (15452, 15454), False, 'from collections import defaultdict\n'), ((17707, 17718), 'time.time', 'time.time', ([], {}), '()\n', (17716, 17718), False, 'import time\n'), ((17778, 17841), 'pandas.to_datetime', 'pd.to_datetime', (["self.github_data_dump_df['COMMITTER_TIMESTAMP']"], {}), "(self.github_data_dump_df['COMMITTER_TIMESTAMP'])\n", (17792, 17841), True, 'import pandas as pd\n'), ((18291, 18304), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (18302, 18304), False, 'from collections import defaultdict\n'), ((20202, 20213), 'time.time', 'time.time', ([], {}), '()\n', (20211, 20213), False, 'import time\n'), ((20855, 20868), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (20866, 20868), False, 'from collections import defaultdict\n'), ((22243, 22313), 'pandas.DataFrame', 'pd.DataFrame', (['exp'], {'columns': "['COMMIT_ID', 'COMMITTER_TIMESTAMP', 'EXP']"}), "(exp, columns=['COMMIT_ID', 'COMMITTER_TIMESTAMP', 'EXP'])\n", (22255, 22313), True, 'import pandas as pd\n'), ((22333, 22467), 'pandas.merge', 'pd.merge', (['file_df', 'exp_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID', 'COMMITTER_TIMESTAMP']", 'right_on': "['COMMIT_ID', 'COMMITTER_TIMESTAMP']"}), "(file_df, exp_df, how='left', left_on=['COMMIT_ID',\n 'COMMITTER_TIMESTAMP'], right_on=['COMMIT_ID', 'COMMITTER_TIMESTAMP'])\n", (22341, 22467), True, 'import pandas as pd\n'), ((23467, 23480), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (23478, 23480), False, 'from collections import defaultdict\n'), ((24252, 24322), 'pandas.DataFrame', 'pd.DataFrame', (['exp'], {'columns': "['COMMIT_ID', 'COMMITTER_TIMESTAMP', 'EXP']"}), "(exp, columns=['COMMIT_ID', 'COMMITTER_TIMESTAMP', 'EXP'])\n", (24264, 24322), True, 'import pandas as pd\n'), ((24342, 24476), 'pandas.merge', 'pd.merge', (['file_df', 'exp_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID', 'COMMITTER_TIMESTAMP']", 'right_on': "['COMMIT_ID', 'COMMITTER_TIMESTAMP']"}), "(file_df, exp_df, how='left', left_on=['COMMIT_ID',\n 'COMMITTER_TIMESTAMP'], right_on=['COMMIT_ID', 'COMMITTER_TIMESTAMP'])\n", (24350, 24476), True, 'import pandas as pd\n'), ((25616, 25627), 'time.time', 'time.time', ([], {}), '()\n', (25625, 25627), False, 'import time\n'), ((26222, 26235), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (26233, 26235), False, 'from collections import defaultdict\n'), ((28448, 28459), 'time.time', 'time.time', ([], {}), '()\n', (28457, 28459), False, 'import time\n'), ((28897, 29028), 'subprocess.Popen', 'sp.Popen', (['command'], {'cwd': 'f"""{CDPConfigValues.local_git_repo}/{self.project_name}"""', 'stdout': 'sp.PIPE', 'stderr': 'sp.PIPE', 'shell': '(True)'}), "(command, cwd=\n f'{CDPConfigValues.local_git_repo}/{self.project_name}', stdout=sp.PIPE,\n stderr=sp.PIPE, shell=True)\n", (28905, 29028), True, 'import subprocess as sp\n'), ((29169, 29194), 'xml.etree.ElementTree.fromstring', 'Et.fromstring', (['xml_string'], {}), '(xml_string)\n', (29182, 29194), True, 'import xml.etree.ElementTree as Et\n'), ((30167, 30298), 'subprocess.Popen', 'sp.Popen', (['command'], {'cwd': 'f"""{CDPConfigValues.local_git_repo}/{self.project_name}"""', 'stdout': 'sp.PIPE', 'stderr': 'sp.PIPE', 'shell': '(True)'}), "(command, cwd=\n f'{CDPConfigValues.local_git_repo}/{self.project_name}', stdout=sp.PIPE,\n stderr=sp.PIPE, shell=True)\n", (30175, 30298), True, 'import subprocess as sp\n'), ((32973, 33048), 'pandas.DataFrame', 'pd.DataFrame', (['result_list'], {'columns': "['COMMIT_ID', 'FILE_NAME', 'SUB_MODULE']"}), "(result_list, columns=['COMMIT_ID', 'FILE_NAME', 'SUB_MODULE'])\n", (32985, 33048), True, 'import pandas as pd\n'), ((34044, 34055), 'time.time', 'time.time', ([], {}), '()\n', (34053, 34055), False, 'import time\n'), ((34410, 34424), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (34422, 34424), True, 'import pandas as pd\n'), ((36224, 36369), 'pandas.merge', 'pd.merge', (['self.github_data_dump_df', 'sub_module_stats_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID', 'FILE_NAME']", 'right_on': "['COMMIT_ID', 'FILE_NAME']"}), "(self.github_data_dump_df, sub_module_stats_df, how='left', left_on\n =['COMMIT_ID', 'FILE_NAME'], right_on=['COMMIT_ID', 'FILE_NAME'])\n", (36232, 36369), True, 'import pandas as pd\n'), ((36602, 36613), 'time.time', 'time.time', ([], {}), '()\n', (36611, 36613), False, 'import time\n'), ((37457, 37554), 'pandas.DataFrame', 'pd.DataFrame', (['result_list'], {'columns': "['COMMIT_ID', email_column, timestamp_column, 'DEV_STATS']"}), "(result_list, columns=['COMMIT_ID', email_column,\n timestamp_column, 'DEV_STATS'])\n", (37469, 37554), True, 'import pandas as pd\n'), ((38252, 38370), 'pandas.DataFrame', 'pd.DataFrame', (['result_list'], {'columns': "['COMMIT_ID', email_column, timestamp_column, 'SUB_MODULE', 'SUB_MODULE_STATS']"}), "(result_list, columns=['COMMIT_ID', email_column,\n timestamp_column, 'SUB_MODULE', 'SUB_MODULE_STATS'])\n", (38264, 38370), True, 'import pandas as pd\n'), ((39756, 39767), 'time.time', 'time.time', ([], {}), '()\n', (39765, 39767), False, 'import time\n'), ((40278, 40292), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (40290, 40292), True, 'import pandas as pd\n'), ((40338, 40352), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (40350, 40352), True, 'import pandas as pd\n'), ((42254, 42464), 'pandas.merge', 'pd.merge', (['self.github_data_dump_df', 'self.developer_stats_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID', self.email_column, self.timestamp_column]", 'right_on': "['COMMIT_ID', self.email_column, self.timestamp_column]"}), "(self.github_data_dump_df, self.developer_stats_df, how='left',\n left_on=['COMMIT_ID', self.email_column, self.timestamp_column],\n right_on=['COMMIT_ID', self.email_column, self.timestamp_column])\n", (42262, 42464), True, 'import pandas as pd\n'), ((42869, 43124), 'pandas.merge', 'pd.merge', (['self.github_data_dump_df', 'self.developer_sub_module_stats_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID', self.email_column, self.timestamp_column, 'SUB_MODULE']", 'right_on': "['COMMIT_ID', self.email_column, self.timestamp_column, 'SUB_MODULE']"}), "(self.github_data_dump_df, self.developer_sub_module_stats_df, how=\n 'left', left_on=['COMMIT_ID', self.email_column, self.timestamp_column,\n 'SUB_MODULE'], right_on=['COMMIT_ID', self.email_column, self.\n timestamp_column, 'SUB_MODULE'])\n", (42877, 43124), True, 'import pandas as pd\n'), ((43755, 43766), 'time.time', 'time.time', ([], {}), '()\n', (43764, 43766), False, 'import time\n'), ((44531, 44580), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': "['COMMIT_ID', 'NF']"}), "(result, columns=['COMMIT_ID', 'NF'])\n", (44543, 44580), True, 'import pandas as pd\n'), ((44603, 44669), 'pandas.DataFrame', 'pd.DataFrame', (["self.github_data_dump_df[['COMMIT_ID', 'FILE_NAME']]"], {}), "(self.github_data_dump_df[['COMMIT_ID', 'FILE_NAME']])\n", (44615, 44669), True, 'import pandas as pd\n'), ((44947, 45056), 'pandas.merge', 'pd.merge', (['self.github_data_dump_df', 'result_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID']", 'right_on': "['COMMIT_ID']"}), "(self.github_data_dump_df, result_df, how='left', left_on=[\n 'COMMIT_ID'], right_on=['COMMIT_ID'])\n", (44955, 45056), True, 'import pandas as pd\n'), ((45687, 45736), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': "['COMMIT_ID', 'ND']"}), "(result, columns=['COMMIT_ID', 'ND'])\n", (45699, 45736), True, 'import pandas as pd\n'), ((45773, 45882), 'pandas.merge', 'pd.merge', (['self.github_data_dump_df', 'result_df'], {'how': '"""left"""', 'left_on': "['COMMIT_ID']", 'right_on': "['COMMIT_ID']"}), "(self.github_data_dump_df, result_df, how='left', left_on=[\n 'COMMIT_ID'], right_on=['COMMIT_ID'])\n", (45781, 45882), True, 'import pandas as pd\n'), ((46162, 46173), 'time.time', 'time.time', ([], {}), '()\n', (46171, 46173), False, 'import time\n'), ((47071, 47082), 'time.time', 'time.time', ([], {}), '()\n', (47080, 47082), False, 'import time\n'), ((47534, 47545), 'time.time', 'time.time', ([], {}), '()\n', (47543, 47545), False, 'import time\n'), ((47817, 47828), 'time.time', 'time.time', ([], {}), '()\n', (47826, 47828), False, 'import time\n'), ((50502, 50519), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (50513, 50519), True, 'import pandas as pd\n'), ((52487, 52600), 'Utility.CDPConfigValues.CDPConfigValues.create_directory', 'CDPConfigValues.create_directory', (['f"""{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}"""'], {}), "(\n f'{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}')\n", (52519, 52600), False, 'from Utility.CDPConfigValues import CDPConfigValues\n'), ((53037, 53135), 'pandas.read_csv', 'pd.read_csv', (['f"""{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{file_name}"""'], {}), "(\n f'{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{file_name}'\n )\n", (53048, 53135), True, 'import pandas as pd\n'), ((56149, 56184), 'Utility.CDPConfigValues.CDPConfigValues.cdp_projects.keys', 'CDPConfigValues.cdp_projects.keys', ([], {}), '()\n', (56182, 56184), False, 'from Utility.CDPConfigValues import CDPConfigValues\n'), ((56208, 56219), 'time.time', 'time.time', ([], {}), '()\n', (56217, 56219), False, 'import time\n'), ((56269, 56280), 'time.time', 'time.time', ([], {}), '()\n', (56278, 56280), False, 'import time\n'), ((6369, 6424), 'datetime.datetime.strptime', 'datetime.strptime', (['timestamp_value', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(timestamp_value, '%Y-%m-%d %H:%M:%S')\n", (6386, 6424), False, 'from datetime import datetime\n'), ((11100, 11224), 'pandas.read_csv', 'pd.read_csv', (['f"""{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.closed_events_list_file_name}"""'], {}), "(\n f'{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.closed_events_list_file_name}'\n )\n", (11111, 11224), True, 'import pandas as pd\n'), ((13827, 13856), 'time.sleep', 'time.sleep', (['(60 * loop_counter)'], {}), '(60 * loop_counter)\n', (13837, 13856), False, 'import time\n'), ((16823, 16839), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (16836, 16839), False, 'import asyncio\n'), ((18358, 18417), 'Utility.Utilities.Utilities.create_batches', 'Utilities.create_batches', (['file_names'], {'batch_size': 'batch_size'}), '(file_names, batch_size=batch_size)\n', (18382, 18417), False, 'from Utility.Utilities import Utilities\n'), ((22506, 22522), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (22519, 22522), False, 'import asyncio\n'), ((23282, 23330), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["file_df['COMMITTER_TIMESTAMP']"], {}), "(file_df['COMMITTER_TIMESTAMP'])\n", (23298, 23330), True, 'import pandas as pd\n'), ((24515, 24531), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (24528, 24531), False, 'import asyncio\n'), ((26391, 26464), 'Utility.CDPConfigValues.CDPConfigValues.configFetcher.get', 'CDPConfigValues.configFetcher.get', (['"""author_stat_batch_size"""', 'self.project'], {}), "('author_stat_batch_size', self.project)\n", (26424, 26464), False, 'from Utility.CDPConfigValues import CDPConfigValues\n'), ((26496, 26557), 'Utility.Utilities.Utilities.create_batches', 'Utilities.create_batches', (['author_names'], {'batch_size': 'batch_size'}), '(author_names, batch_size=batch_size)\n', (26520, 26557), False, 'from Utility.Utilities import Utilities\n'), ((34180, 34235), 'Utility.Utilities.Utilities.create_batches', 'Utilities.create_batches', (['commit_id_list'], {'batch_size': '(20)'}), '(commit_id_list, batch_size=20)\n', (34204, 34235), False, 'from Utility.Utilities import Utilities\n'), ((34453, 34518), 'Utility.CDPConfigValues.CDPConfigValues.configFetcher.get', 'CDPConfigValues.configFetcher.get', (['"""isPOMXmlExists"""', 'self.project'], {}), "('isPOMXmlExists', self.project)\n", (34486, 34518), False, 'from Utility.CDPConfigValues import CDPConfigValues\n'), ((39440, 39538), 'pandas.read_csv', 'pd.read_csv', (['f"""{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{file_name}"""'], {}), "(\n f'{CDPConfigValues.preprocessed_file_path}/{self.project_name}/{file_name}'\n )\n", (39451, 39538), True, 'import pandas as pd\n'), ((39941, 40000), 'Utility.Utilities.Utilities.create_batches', 'Utilities.create_batches', (['email_list'], {'batch_size': 'batch_size'}), '(email_list, batch_size=batch_size)\n', (39965, 40000), False, 'from Utility.Utilities import Utilities\n'), ((51512, 51598), 'pandas.to_datetime', 'pd.to_datetime', (["self.github_data_dump_df['TIMESTAMP']"], {'format': '"""%Y-%m-%dT%H:%M:%S"""'}), "(self.github_data_dump_df['TIMESTAMP'], format=\n '%Y-%m-%dT%H:%M:%S')\n", (51526, 51598), True, 'import pandas as pd\n'), ((51682, 51852), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['f"""{self.pre_processed_file_path}/{file_name}.xlsx"""'], {'engine': '"""xlsxwriter"""', 'datetime_format': '"""%Y-%m-%dT%H:%M:%S"""', 'options': "{'strings_to_urls': False}"}), "(f'{self.pre_processed_file_path}/{file_name}.xlsx', engine=\n 'xlsxwriter', datetime_format='%Y-%m-%dT%H:%M:%S', options={\n 'strings_to_urls': False})\n", (51696, 51852), True, 'import pandas as pd\n'), ((2409, 2529), 'pandas.read_csv', 'pd.read_csv', (['f"""{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.commit_details_file_name}"""'], {}), "(\n f'{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.commit_details_file_name}'\n )\n", (2420, 2529), True, 'import pandas as pd\n'), ((2669, 2731), 'Utility.CDPConfigValues.CDPConfigValues.create_directory', 'CDPConfigValues.create_directory', (['self.pre_processed_file_path'], {}), '(self.pre_processed_file_path)\n', (2701, 2731), False, 'from Utility.CDPConfigValues import CDPConfigValues\n'), ((2772, 2786), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2784, 2786), True, 'import pandas as pd\n'), ((2988, 3026), 'pandas.DataFrame', 'pd.DataFrame', (['previous_preprocessed_df'], {}), '(previous_preprocessed_df)\n', (3000, 3026), True, 'import pandas as pd\n'), ((18789, 18813), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (18811, 18813), False, 'import asyncio\n'), ((20511, 20546), 'pandas.to_datetime', 'pd.to_datetime', (['self.base_timestamp'], {}), '(self.base_timestamp)\n', (20525, 20546), True, 'import pandas as pd\n'), ((20549, 20595), 'pandas.to_datetime', 'pd.to_datetime', (["file_df['COMMITTER_TIMESTAMP']"], {}), "(file_df['COMMITTER_TIMESTAMP'])\n", (20563, 20595), True, 'import pandas as pd\n'), ((20617, 20639), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (20631, 20639), True, 'import numpy as np\n'), ((26881, 26905), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (26903, 26905), False, 'import asyncio\n'), ((34846, 34870), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (34868, 34870), False, 'import asyncio\n'), ((40424, 40448), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (40446, 40448), False, 'import asyncio\n'), ((52441, 52457), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (52455, 52457), False, 'from datetime import datetime\n'), ((3314, 3339), 'pandas.Timestamp', 'pd.Timestamp', (['x'], {'tz': '"""UTC"""'}), "(x, tz='UTC')\n", (3326, 3339), True, 'import pandas as pd\n'), ((3484, 3499), 'pandas.Timestamp', 'pd.Timestamp', (['x'], {}), '(x)\n', (3496, 3499), True, 'import pandas as pd\n'), ((18853, 18877), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (18875, 18877), False, 'import asyncio\n'), ((20705, 20717), 'math.ceil', 'math.ceil', (['x'], {}), '(x)\n', (20714, 20717), False, 'import math\n'), ((26945, 26969), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (26967, 26969), False, 'import asyncio\n'), ((29932, 29949), 'platform.system', 'platform.system', ([], {}), '()\n', (29947, 29949), False, 'import platform\n'), ((34910, 34934), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (34932, 34934), False, 'import asyncio\n'), ((35377, 35436), 'pandas.concat', 'pd.concat', (['[sub_module_stats_df, result]'], {'ignore_index': '(True)'}), '([sub_module_stats_df, result], ignore_index=True)\n', (35386, 35436), True, 'import pandas as pd\n'), ((40488, 40512), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (40510, 40512), False, 'import asyncio\n'), ((41024, 41090), 'pandas.concat', 'pd.concat', (['[self.developer_stats_df, result[0]]'], {'ignore_index': '(True)'}), '([self.developer_stats_df, result[0]], ignore_index=True)\n', (41033, 41090), True, 'import pandas as pd\n'), ((41148, 41225), 'pandas.concat', 'pd.concat', (['[self.developer_sub_module_stats_df, result[1]]'], {'ignore_index': '(True)'}), '([self.developer_sub_module_stats_df, result[1]], ignore_index=True)\n', (41157, 41225), True, 'import pandas as pd\n'), ((52007, 52072), 'pandas.read_excel', 'pd.read_excel', (['f"""{self.pre_processed_file_path}/{file_name}.xlsx"""'], {}), "(f'{self.pre_processed_file_path}/{file_name}.xlsx')\n", (52020, 52072), True, 'import pandas as pd\n'), ((19537, 19572), 'traceback.print_tb', 'traceback.print_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (19555, 19572), False, 'import traceback\n'), ((24066, 24082), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (24080, 24082), False, 'from datetime import datetime\n'), ((27807, 27842), 'traceback.print_tb', 'traceback.print_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (27825, 27842), False, 'import traceback\n'), ((35521, 35556), 'traceback.print_tb', 'traceback.print_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (35539, 35556), False, 'import traceback\n'), ((41377, 41412), 'traceback.print_tb', 'traceback.print_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (41395, 41412), False, 'import traceback\n')] |
import os.path
import numpy as np
import tensorflow as tf
import SharedArray as sa
from musegan2.models import GAN, RefineGAN, End2EndGAN
from config import TF_CONFIG, EXP_CONFIG, MODEL_CONFIG, TRAIN_CONFIG
NUM_BATCH = 50
def main():
with tf.Session(config=TF_CONFIG) as sess:
gan = GAN(sess, MODEL_CONFIG)
gan.init_all()
gan.load_latest('../checkpoints/')
# Prepare feed dictionaries
print("[*] Preparing data...")
z_sample = np.random.normal(size=(NUM_BATCH, gan.config['batch_size'],
gan.config['z_dim']))
feed_dict_sample = [{gan.z: z_sample[i]} for i in range(NUM_BATCH)]
# Run sampler
print("[*] Running sampler...")
results = np.array([
sess.run(gan.G.tensor_out, feed_dict_sample[i])
for i in range(NUM_BATCH)
])
reshaped = results.reshape((-1,) + results.shape[3:])
results_round = (reshaped > 0.5)
results_bernoulli = np.ceil(
reshaped - np.random.uniform(size=reshaped.shape)
)
# Run evaluation
print("[*] Running evaluation...")
mat_path = os.path.join(gan.config['eval_dir'], 'round.npy')
_ = gan.metrics.eval(results_round, mat_path=mat_path)
mat_path = os.path.join(gan.config['eval_dir'], 'bernoulli.npy')
_ = gan.metrics.eval(results_bernoulli, mat_path=mat_path)
if __name__ == "__main__":
main()
| [
"numpy.random.uniform",
"tensorflow.Session",
"numpy.random.normal",
"musegan2.models.GAN"
] | [((245, 273), 'tensorflow.Session', 'tf.Session', ([], {'config': 'TF_CONFIG'}), '(config=TF_CONFIG)\n', (255, 273), True, 'import tensorflow as tf\n'), ((297, 320), 'musegan2.models.GAN', 'GAN', (['sess', 'MODEL_CONFIG'], {}), '(sess, MODEL_CONFIG)\n', (300, 320), False, 'from musegan2.models import GAN, RefineGAN, End2EndGAN\n'), ((482, 568), 'numpy.random.normal', 'np.random.normal', ([], {'size': "(NUM_BATCH, gan.config['batch_size'], gan.config['z_dim'])"}), "(size=(NUM_BATCH, gan.config['batch_size'], gan.config[\n 'z_dim']))\n", (498, 568), True, 'import numpy as np\n'), ((1046, 1084), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'reshaped.shape'}), '(size=reshaped.shape)\n', (1063, 1084), True, 'import numpy as np\n')] |
#from __future__ import absolute_import
#from __future__ import print_function
import os
import sys
import math
import json
import random
import numpy as np
import traceback
from scipy.stats import lognorm
from scipy.stats import uniform
from scipy.stats import weibull_min
from scipy.stats import gamma
################################
# Risk-based Predictive Model developed using NGSIM data
################################
#lag crash risk prediction #revised the function
def risk_lag_prior(Speed_Merge,Acce_Merge,Remaining_Distance,Acce_lag,Gap_lag,Speed_dif):
beta=[-0.648166,0.291651,-2.67226,0.010325,2.736206,-1.9484,3.314949] #beta=[intercept,variable1,variable2,...]
X=[1,Speed_Merge,Acce_Merge,Remaining_Distance,Acce_lag,Gap_lag,Speed_dif]
#df.loc[t] = X
Pr_lag_prior=1/(1+np.exp(-np.dot(beta,X)))
#print("Pr_lag_prior",Pr_lag_prior)
return Pr_lag_prior
def lag_conflict_likelihood(Gap_lag):
#weibull cdf
c=1.177935
loc=0
scale=15.060868
x_upper=Gap_lag+0.5
x_lower=Gap_lag-0.5
Pr_gap_lag_conflict=weibull_min.cdf(x_upper,c,loc,scale)-weibull_min.cdf(x_lower, c,loc,scale)
return Pr_gap_lag_conflict
def lag_nonconflict_likelihood(Gap_lag):
shape = 3.82145718
rate = 0.06710455
#gamma cdf
loc=0
scale=1/rate
x_upper=Gap_lag+0.5
x_lower=Gap_lag-0.5
Pr_gap_lag_nonconflict=gamma.cdf(x_upper,shape,loc,scale)-gamma.cdf(x_lower,shape,loc,scale)
return Pr_gap_lag_nonconflict
def risk_lag_posterior(Gap_lag,Pr_lag_prior,Pr_gap_lag_nonconflict,Pr_gap_lag_conflict):
##print ('1-Pr_lag_prior = ', 1-Pr_lag_prior)
##print ('Pr_gap_lag_conflict * Pr_lag_prior = ', Pr_gap_lag_conflict * Pr_lag_prior)
##print ('((Pr_gap_lag_conflict * Pr_lag_prior) + Pr_gap_lag_nonconflict *(1-Pr_lag_prior)) = ', ((Pr_gap_lag_conflict * Pr_lag_prior) + Pr_gap_lag_nonconflict *(1-Pr_lag_prior)))
##print ('Pr_lag_prior * Pr_gap_lag_conflict = ', Pr_lag_prior * Pr_gap_lag_conflict)
denom = ((Pr_gap_lag_conflict * Pr_lag_prior) + Pr_gap_lag_nonconflict *(1-Pr_lag_prior))
if(denom == 0):
return 1.0
Pr_lag_posterior= (Pr_lag_prior * Pr_gap_lag_conflict)/denom
##print ('Pr_lag_posterior: ', Pr_lag_posterior)
##print ('')
return Pr_lag_posterior
def risk_lead_prior(Acce_Merge,Remaining_Distance,Speed_lead,Acce_lead,Gap_lead):
beta=[-0.871,-1.257,0.029,-0.034,0.451,-0.301]
X=[1,Acce_Merge,Remaining_Distance,Speed_lead,Acce_lead,Gap_lead]
Pr_lead_prior=1/(1+np.exp(-np.dot(beta,X)))
return Pr_lead_prior
def lead_conflict_likelihood(Gap_lead):
#uniform cdf
loc= -4.996666
scale=52.099515+4.996666
x_upper=Gap_lead+0.5
x_lower=Gap_lead-0.5
Pr_gap_lead_conflict=uniform.cdf(x_upper,loc,scale)-uniform.cdf(x_lower,loc,scale)
return Pr_gap_lead_conflict
def lead_nonconflict_likelihood(Gap_lead):
#weibull cdf
c= 1.609829
loc=0
scale=49.765264
x_upper=Gap_lead+0.5
x_lower=Gap_lead-0.5
Pr_gap_lead_conflict=weibull_min.cdf(x_upper,c,loc,scale)-weibull_min.cdf(x_lower, c,loc,scale)
return Pr_gap_lead_conflict
def risk_lead_posterior(Gap_lead,Pr_lead_prior,Pr_gap_lead_nonconflict,Pr_gap_lead_conflict):
denom = (Pr_gap_lead_conflict*Pr_lead_prior+Pr_gap_lead_nonconflict*(1-Pr_lead_prior))
if(denom == 0):
return 1.0
Pr_lead_posterior=Pr_lead_prior*Pr_gap_lead_conflict/(Pr_gap_lead_conflict*Pr_lead_prior+Pr_gap_lead_nonconflict*(1-Pr_lead_prior))
return Pr_lead_posterior
def safety_distance_min(Speed_Merge,Speed_lead):
b_av=4.6
b_lead=4.2
tau_av=0.9
S_min=Speed_Merge*tau_av+1/2*np.square(Speed_Merge)/b_av-1/2*np.square(Speed_lead)/b_lead
return S_min
| [
"scipy.stats.gamma.cdf",
"scipy.stats.uniform.cdf",
"numpy.square",
"scipy.stats.weibull_min.cdf",
"numpy.dot"
] | [((1070, 1109), 'scipy.stats.weibull_min.cdf', 'weibull_min.cdf', (['x_upper', 'c', 'loc', 'scale'], {}), '(x_upper, c, loc, scale)\n', (1085, 1109), False, 'from scipy.stats import weibull_min\n'), ((1107, 1146), 'scipy.stats.weibull_min.cdf', 'weibull_min.cdf', (['x_lower', 'c', 'loc', 'scale'], {}), '(x_lower, c, loc, scale)\n', (1122, 1146), False, 'from scipy.stats import weibull_min\n'), ((1381, 1418), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['x_upper', 'shape', 'loc', 'scale'], {}), '(x_upper, shape, loc, scale)\n', (1390, 1418), False, 'from scipy.stats import gamma\n'), ((1416, 1453), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['x_lower', 'shape', 'loc', 'scale'], {}), '(x_lower, shape, loc, scale)\n', (1425, 1453), False, 'from scipy.stats import gamma\n'), ((2752, 2784), 'scipy.stats.uniform.cdf', 'uniform.cdf', (['x_upper', 'loc', 'scale'], {}), '(x_upper, loc, scale)\n', (2763, 2784), False, 'from scipy.stats import uniform\n'), ((2783, 2815), 'scipy.stats.uniform.cdf', 'uniform.cdf', (['x_lower', 'loc', 'scale'], {}), '(x_lower, loc, scale)\n', (2794, 2815), False, 'from scipy.stats import uniform\n'), ((3028, 3067), 'scipy.stats.weibull_min.cdf', 'weibull_min.cdf', (['x_upper', 'c', 'loc', 'scale'], {}), '(x_upper, c, loc, scale)\n', (3043, 3067), False, 'from scipy.stats import weibull_min\n'), ((3065, 3104), 'scipy.stats.weibull_min.cdf', 'weibull_min.cdf', (['x_lower', 'c', 'loc', 'scale'], {}), '(x_lower, c, loc, scale)\n', (3080, 3104), False, 'from scipy.stats import weibull_min\n'), ((3686, 3707), 'numpy.square', 'np.square', (['Speed_lead'], {}), '(Speed_lead)\n', (3695, 3707), True, 'import numpy as np\n'), ((816, 831), 'numpy.dot', 'np.dot', (['beta', 'X'], {}), '(beta, X)\n', (822, 831), True, 'import numpy as np\n'), ((2529, 2544), 'numpy.dot', 'np.dot', (['beta', 'X'], {}), '(beta, X)\n', (2535, 2544), True, 'import numpy as np\n'), ((3654, 3676), 'numpy.square', 'np.square', (['Speed_Merge'], {}), '(Speed_Merge)\n', (3663, 3676), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ╔═════════════════════════════════════════════════════════════════════════════════════════════════════════════╗
# ║ ║
# ║ __ __ ____ __ ║
# ║ /\ \/\ \ /\ _`\ /\ \ __ ║
# ║ \ \ \_\ \ __ _____ _____ __ __ \ \ \/\_\ ___ \_\ \/\_\ ___ __ ║
# ║ \ \ _ \ /'__`\ /\ '__`\/\ '__`\/\ \/\ \ \ \ \/_/_ / __`\ /'_` \/\ \ /' _ `\ /'_ `\ ║
# ║ \ \ \ \ \/\ \L\.\_\ \ \L\ \ \ \L\ \ \ \_\ \ \ \ \L\ \/\ \L\ \/\ \L\ \ \ \/\ \/\ \/\ \L\ \ ║
# ║ \ \_\ \_\ \__/.\_\\ \ ,__/\ \ ,__/\/`____ \ \ \____/\ \____/\ \___,_\ \_\ \_\ \_\ \____ \ ║
# ║ \/_/\/_/\/__/\/_/ \ \ \/ \ \ \/ `/___/> \ \/___/ \/___/ \/__,_ /\/_/\/_/\/_/\/___L\ \ ║
# ║ \ \_\ \ \_\ /\___/ /\____/ ║
# ║ \/_/ \/_/ \/__/ \_/__/ ║
# ║ ║
# ║ 49 4C 6F 76 65 59 6F 75 2C 42 75 74 59 6F 75 4B 6E 6F 77 4E 6F 74 68 69 6E 67 2E ║
# ║ ║
# ╚═════════════════════════════════════════════════════════════════════════════════════════════════════════════╝
# @Author : <NAME>
# @File : evaluation_metrics3D.py
import numpy as np
import SimpleITK as sitk
import glob
import os
from scipy.spatial import distance
from sklearn.metrics import f1_score
def numeric_score(pred, gt):
FP = np.float(np.sum((pred == 255) & (gt == 0)))
FN = np.float(np.sum((pred == 0) & (gt == 255)))
TP = np.float(np.sum((pred == 255) & (gt == 255)))
TN = np.float(np.sum((pred == 0) & (gt == 0)))
return FP, FN, TP, TN
def Dice(pred, gt):
pred = np.int64(pred / 255)
gt = np.int64(gt / 255)
dice = np.sum(pred[gt == 1]) * 2.0 / (np.sum(pred) + np.sum(gt))
return dice
def IoU(pred, gt):
pred = np.int64(pred / 255)
gt = np.int64(gt / 255)
m1 = np.sum(pred[gt == 1])
m2 = np.sum(pred == 1) + np.sum(gt == 1) - m1
iou = m1 / m2
return iou
def metrics_3d(pred, gt):
FP, FN, TP, TN = numeric_score(pred, gt)
tpr = TP / (TP + FN + 1e-10)
fnr = FN / (FN + TP + 1e-10)
fpr = FN / (FP + TN + 1e-10)
iou = TP / (TP + FN + FP + 1e-10)
return tpr, fnr, fpr, iou
def over_rate(pred, gt):
# pred = np.int64(pred / 255)
# gt = np.int64(gt / 255)
Rs = np.float(np.sum(gt == 255))
Os = np.float(np.sum((pred == 255) & (gt == 0)))
OR = Os / (Rs + Os)
return OR
def under_rate(pred, gt):
# pred = np.int64(pred / 255)
# gt = np.int64(gt / 255)
Rs = np.float(np.sum(gt == 255))
Us = np.float(np.sum((pred == 0) & (gt == 255)))
Os = np.float(np.sum((pred == 255) & (gt == 0)))
UR = Us / (Rs + Os)
return UR
| [
"numpy.sum",
"numpy.int64"
] | [((2251, 2271), 'numpy.int64', 'np.int64', (['(pred / 255)'], {}), '(pred / 255)\n', (2259, 2271), True, 'import numpy as np\n'), ((2281, 2299), 'numpy.int64', 'np.int64', (['(gt / 255)'], {}), '(gt / 255)\n', (2289, 2299), True, 'import numpy as np\n'), ((2417, 2437), 'numpy.int64', 'np.int64', (['(pred / 255)'], {}), '(pred / 255)\n', (2425, 2437), True, 'import numpy as np\n'), ((2447, 2465), 'numpy.int64', 'np.int64', (['(gt / 255)'], {}), '(gt / 255)\n', (2455, 2465), True, 'import numpy as np\n'), ((2475, 2496), 'numpy.sum', 'np.sum', (['pred[gt == 1]'], {}), '(pred[gt == 1])\n', (2481, 2496), True, 'import numpy as np\n'), ((1998, 2031), 'numpy.sum', 'np.sum', (['((pred == 255) & (gt == 0))'], {}), '((pred == 255) & (gt == 0))\n', (2004, 2031), True, 'import numpy as np\n'), ((2051, 2084), 'numpy.sum', 'np.sum', (['((pred == 0) & (gt == 255))'], {}), '((pred == 0) & (gt == 255))\n', (2057, 2084), True, 'import numpy as np\n'), ((2104, 2139), 'numpy.sum', 'np.sum', (['((pred == 255) & (gt == 255))'], {}), '((pred == 255) & (gt == 255))\n', (2110, 2139), True, 'import numpy as np\n'), ((2159, 2190), 'numpy.sum', 'np.sum', (['((pred == 0) & (gt == 0))'], {}), '((pred == 0) & (gt == 0))\n', (2165, 2190), True, 'import numpy as np\n'), ((2929, 2946), 'numpy.sum', 'np.sum', (['(gt == 255)'], {}), '(gt == 255)\n', (2935, 2946), True, 'import numpy as np\n'), ((2966, 2999), 'numpy.sum', 'np.sum', (['((pred == 255) & (gt == 0))'], {}), '((pred == 255) & (gt == 0))\n', (2972, 2999), True, 'import numpy as np\n'), ((3149, 3166), 'numpy.sum', 'np.sum', (['(gt == 255)'], {}), '(gt == 255)\n', (3155, 3166), True, 'import numpy as np\n'), ((3186, 3219), 'numpy.sum', 'np.sum', (['((pred == 0) & (gt == 255))'], {}), '((pred == 0) & (gt == 255))\n', (3192, 3219), True, 'import numpy as np\n'), ((3239, 3272), 'numpy.sum', 'np.sum', (['((pred == 255) & (gt == 0))'], {}), '((pred == 255) & (gt == 0))\n', (3245, 3272), True, 'import numpy as np\n'), ((2311, 2332), 'numpy.sum', 'np.sum', (['pred[gt == 1]'], {}), '(pred[gt == 1])\n', (2317, 2332), True, 'import numpy as np\n'), ((2342, 2354), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (2348, 2354), True, 'import numpy as np\n'), ((2357, 2367), 'numpy.sum', 'np.sum', (['gt'], {}), '(gt)\n', (2363, 2367), True, 'import numpy as np\n'), ((2506, 2523), 'numpy.sum', 'np.sum', (['(pred == 1)'], {}), '(pred == 1)\n', (2512, 2523), True, 'import numpy as np\n'), ((2526, 2541), 'numpy.sum', 'np.sum', (['(gt == 1)'], {}), '(gt == 1)\n', (2532, 2541), True, 'import numpy as np\n')] |
'''
The goal of this script is to compare the output of nrutils' strain calculation
method to the output of an independent MATLAB code of the same method. For convinience,
ascii data for the MATLAB routine's output is saved within this repository.
-- <EMAIL> 2016 --
'''
# Import useful things
from os.path import dirname, basename, isdir, realpath
from numpy import array,ones,pi,loadtxt
from matplotlib.pyplot import *
from os import system
system('clear')
# from nrutils import *
from nrutils.core.nrsc import *
#
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
# Search for simulations: Use the CFUIB high resolution base case
A = scsearch(keyword="base_96",unique=True,verbose=True)
# Convert a single simulation into a waveform object with desired multipoles
y = gwylm( scentry_obj = A[0], lm=[2,2], dt=0.4, verbose=True )
# load and plot external data files
matlab_output_file_location = '/Users/book/JOKI/Libs/KOALA/nrutils_dev/review/data/CFUIB0029_l2m2_r140.asc'
matlab_strain = loadtxt( matlab_output_file_location )
#
plot( matlab_strain[:,0], matlab_strain[:,1], color='b', label='dakit (MATLAB)' )
plot( y.hlm[0].t, y.hlm[0].amp, '--g', label='nrutils (Python)' )
xlabel(r'$t/M$'); ylabel(r'$|rMh(t)|$');
legend(frameon=False,loc=2)
show()
savefig( 'check-strain-cfuib0029.pdf' )
# # plot frequency domain strain and show all current plots
# y.plot(kind='strain',show=True,domain='freq')
| [
"matplotlib.rc",
"numpy.loadtxt",
"os.system"
] | [((444, 459), 'os.system', 'system', (['"""clear"""'], {}), "('clear')\n", (450, 459), False, 'from os import system\n'), ((546, 613), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n", (548, 613), False, 'from matplotlib import rc\n'), ((610, 633), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (612, 633), False, 'from matplotlib import rc\n'), ((1061, 1097), 'numpy.loadtxt', 'loadtxt', (['matlab_output_file_location'], {}), '(matlab_output_file_location)\n', (1068, 1097), False, 'from numpy import array, ones, pi, loadtxt\n')] |
import os
import time
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
import datetime
import numpy as np
import sys
def LangMCriterion(input, target):
target = target.view(-1, 1)
logprob_select = torch.gather(input, 1, target)
mask = target.data.gt(0) # generate the mask
if isinstance(input, Variable):
mask = Variable(mask, volatile=input.volatile)
out = torch.masked_select(logprob_select, mask)
loss = -torch.sum(out) # get the average loss.
return loss
def train(model, train_loader, eval_loader, args):
t = datetime.datetime.now()
cur_time = '%s-%s-%s-%s-%s' % (t.year, t.month, t.day, t.hour, t.minute)
save_path = os.path.join(args.output, cur_time)
args.save_path = save_path
utils.create_dir(save_path)
optim = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
lr_default = args.lr if eval_loader is not None else 7e-4
lr_decay_step = 2
lr_decay_rate = .25
lr_decay_epochs = range(10, 30, lr_decay_step) if eval_loader is not None else range(10, 20, lr_decay_step)
gradual_warmup_steps = [0.5 * lr_default, 1.0 * lr_default, 1.5 * lr_default, 2.0 * lr_default]
logger = utils.Logger(os.path.join(save_path, 'log.txt'))
for arg in vars(args):
logger.write('{:<20}: {}'.format(arg, getattr(args, arg)))
best_eval_score = 0
model.train()
start_time = time.time()
best_cnt = 0
print('Training ... ')
for epoch in range(args.epochs):
total_loss = 0
count = 0
train_score = 0
t = time.time()
train_iter = iter(train_loader)
# TODO: get learning rate
# lr = adjust_learning_rate(optim, epoch, args.lr)
if epoch < 4:
optim.param_groups[0]['lr'] = gradual_warmup_steps[epoch]
lr = optim.param_groups[0]['lr']
elif epoch in lr_decay_epochs:
optim.param_groups[0]['lr'] *= lr_decay_rate
lr = optim.param_groups[0]['lr']
else:
lr = optim.param_groups[0]['lr']
iter_step = 0
for i in range(len(train_loader)):
average_loss_tmp = 0
count_tmp = 0
train_data = next(train_iter)
image, image_id, history, question, answer, answerT, ans_len, ans_idx, ques_ori, opt, opt_len, opt_idx = train_data
batch_size = question.size(0)
image = image.view(image.size(0), -1, args.img_feat_size)
img_input = Variable(image).cuda()
for rnd in range(10):
ques = question[:, rnd, :]
his = history[:, :rnd + 1, :].clone().view(-1, args.his_length)
ans = answer[:, rnd, :]
tans = answerT[:, rnd, :]
opt_ans = opt[:, rnd, :].clone().view(-1, args.ans_length)
ques = Variable(ques).cuda().long()
his = Variable(his).cuda().long()
ans = Variable(ans).cuda().long()
tans = Variable(tans).cuda().long()
opt_ans = Variable(opt_ans).cuda().long()
pred = model(img_input, ques, his, ans, tans, rnd + 1)
loss = LangMCriterion(pred.view(-1, args.vocab_size), tans)
loss = loss / torch.sum(tans.data.gt(0))
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), 0.25)
optim.step()
model.zero_grad()
average_loss_tmp += loss.data[0]
total_loss += loss.data[0]
count += 1
count_tmp += 1
sys.stdout.write('Training: Epoch {:d} Step {:d}/{:d} \r'.format(epoch + 1, i + 1, len(train_loader)))
if (i+1) % 50 == 0:
average_loss_tmp /= count_tmp
print("step {} / {} (epoch {}), g_loss {:.3f}, lr = {:.6f}".format(i + 1, len(train_loader), epoch + 1, average_loss_tmp, lr))
iter_step += 1
total_loss /= count
logger.write('Epoch %d : learningRate %4f train loss %4f Time: %3f' % (epoch + 1, lr, total_loss, time.time() - start_time))
model.eval()
print('Evaluating ... ')
start_time = time.time()
rank_all = evaluate(model, eval_loader, args)
R1 = np.sum(np.array(rank_all) == 1) / float(len(rank_all))
R5 = np.sum(np.array(rank_all) <= 5) / float(len(rank_all))
R10 = np.sum(np.array(rank_all) <= 10) / float(len(rank_all))
ave = np.sum(np.array(rank_all)) / float(len(rank_all))
mrr = np.sum(1 / (np.array(rank_all, dtype='float'))) / float(len(rank_all))
logger.write('Epoch %d: mrr: %f R1: %f R5 %f R10 %f Mean %f time: %.2f' % (epoch + 1, mrr, R1, R5, R10, ave, time.time()-start_time))
eval_score = mrr
model_path = os.path.join(save_path, 'model_epoch_%d.pth' % (epoch + 1))
torch.save({'epoch': epoch,
'args': args,
'model': model.state_dict()}, model_path)
if eval_score > best_eval_score:
model_path = os.path.join(save_path, 'best_model.pth')
torch.save({'epoch': epoch,
'args': args,
'model': model.state_dict()}, model_path)
best_eval_score = eval_score
best_cnt = 0
else:
best_cnt = best_cnt + 1
if best_cnt > 10:
break
return model
def evaluate(model, eval_loader, args, Eval=False):
rank_all_tmp = []
eval_iter = iter(eval_loader)
step = 0
for i in range(len(eval_loader)):
eval_data = next(eval_iter)
image, image_id, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen = eval_data
image = image.view(image.size(0), -1, args.img_feat_size)
img_input = Variable(image).cuda()
batch_size = question.size(0)
for rnd in range(10):
ques, tans = question[:, rnd, :], opt_answerT[:, rnd, :].clone().view(-1, args.ans_length)
his = history[:, :rnd + 1, :].clone().view(-1, args.his_length)
ans = opt_answer[:, rnd, :, :].clone().view(-1, args.ans_length)
gt_id = answer_ids[:, rnd]
ques = Variable(ques).cuda().long()
tans = Variable(tans).cuda().long()
his = Variable(his).cuda().long()
ans = Variable(ans).cuda().long()
gt_index = Variable(gt_id).cuda().long()
pred = model(img_input, ques, his, ans, tans, rnd + 1, Training=False)
logprob = - pred.permute(1, 0, 2).contiguous().view(-1, args.vocab_size)
logprob_select = torch.gather(logprob, 1, tans.t().contiguous().view(-1, 1))
mask = tans.t().data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(args.ans_length, -1, 100).sum(0).view(-1, 100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b * 100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1, 1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
step += 1
if Eval:
sys.stdout.write('Evaluating: {:d}/{:d} \r'.format(i, len(eval_loader)))
if (i+1) % 50 == 0:
R1 = np.sum(np.array(rank_all_tmp) == 1) / float(len(rank_all_tmp))
R5 = np.sum(np.array(rank_all_tmp) <= 5) / float(len(rank_all_tmp))
R10 = np.sum(np.array(rank_all_tmp) <= 10) / float(len(rank_all_tmp))
ave = np.sum(np.array(rank_all_tmp)) / float(len(rank_all_tmp))
mrr = np.sum(1 / (np.array(rank_all_tmp, dtype='float'))) / float(len(rank_all_tmp))
print('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' % (i+1, len(eval_loader), mrr, R1, R5, R10, ave))
return rank_all_tmp
| [
"utils.create_dir",
"torch.masked_select",
"torch.gather",
"torch.sum",
"torch.autograd.Variable",
"datetime.datetime.now",
"time.time",
"numpy.array",
"os.path.join",
"torch.sort"
] | [((241, 271), 'torch.gather', 'torch.gather', (['input', '(1)', 'target'], {}), '(input, 1, target)\n', (253, 271), False, 'import torch\n'), ((424, 465), 'torch.masked_select', 'torch.masked_select', (['logprob_select', 'mask'], {}), '(logprob_select, mask)\n', (443, 465), False, 'import torch\n'), ((594, 617), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (615, 617), False, 'import datetime\n'), ((711, 746), 'os.path.join', 'os.path.join', (['args.output', 'cur_time'], {}), '(args.output, cur_time)\n', (723, 746), False, 'import os\n'), ((782, 809), 'utils.create_dir', 'utils.create_dir', (['save_path'], {}), '(save_path)\n', (798, 809), False, 'import utils\n'), ((1439, 1450), 'time.time', 'time.time', ([], {}), '()\n', (1448, 1450), False, 'import time\n'), ((374, 413), 'torch.autograd.Variable', 'Variable', (['mask'], {'volatile': 'input.volatile'}), '(mask, volatile=input.volatile)\n', (382, 413), False, 'from torch.autograd import Variable\n'), ((478, 492), 'torch.sum', 'torch.sum', (['out'], {}), '(out)\n', (487, 492), False, 'import torch\n'), ((1250, 1284), 'os.path.join', 'os.path.join', (['save_path', '"""log.txt"""'], {}), "(save_path, 'log.txt')\n", (1262, 1284), False, 'import os\n'), ((1618, 1629), 'time.time', 'time.time', ([], {}), '()\n', (1627, 1629), False, 'import time\n'), ((4262, 4273), 'time.time', 'time.time', ([], {}), '()\n', (4271, 4273), False, 'import time\n'), ((4873, 4932), 'os.path.join', 'os.path.join', (['save_path', "('model_epoch_%d.pth' % (epoch + 1))"], {}), "(save_path, 'model_epoch_%d.pth' % (epoch + 1))\n", (4885, 4932), False, 'import os\n'), ((5132, 5173), 'os.path.join', 'os.path.join', (['save_path', '"""best_model.pth"""'], {}), "(save_path, 'best_model.pth')\n", (5144, 5173), False, 'import os\n'), ((7364, 7383), 'torch.sort', 'torch.sort', (['prob', '(1)'], {}), '(prob, 1)\n', (7374, 7383), False, 'import torch\n'), ((4555, 4573), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (4563, 4573), True, 'import numpy as np\n'), ((5943, 5958), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (5951, 5958), False, 'from torch.autograd import Variable\n'), ((6960, 7001), 'torch.autograd.Variable', 'Variable', (['mask'], {'volatile': 'logprob.volatile'}), '(mask, volatile=logprob.volatile)\n', (6968, 7001), False, 'from torch.autograd import Variable\n'), ((2532, 2547), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (2540, 2547), False, 'from torch.autograd import Variable\n'), ((4348, 4366), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (4356, 4366), True, 'import numpy as np\n'), ((4416, 4434), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (4424, 4434), True, 'import numpy as np\n'), ((4485, 4503), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (4493, 4503), True, 'import numpy as np\n'), ((4624, 4657), 'numpy.array', 'np.array', (['rank_all'], {'dtype': '"""float"""'}), "(rank_all, dtype='float')\n", (4632, 4657), True, 'import numpy as np\n'), ((4160, 4171), 'time.time', 'time.time', ([], {}), '()\n', (4169, 4171), False, 'import time\n'), ((4800, 4811), 'time.time', 'time.time', ([], {}), '()\n', (4809, 4811), False, 'import time\n'), ((8003, 8025), 'numpy.array', 'np.array', (['rank_all_tmp'], {}), '(rank_all_tmp)\n', (8011, 8025), True, 'import numpy as np\n'), ((6349, 6363), 'torch.autograd.Variable', 'Variable', (['ques'], {}), '(ques)\n', (6357, 6363), False, 'from torch.autograd import Variable\n'), ((6397, 6411), 'torch.autograd.Variable', 'Variable', (['tans'], {}), '(tans)\n', (6405, 6411), False, 'from torch.autograd import Variable\n'), ((6444, 6457), 'torch.autograd.Variable', 'Variable', (['his'], {}), '(his)\n', (6452, 6457), False, 'from torch.autograd import Variable\n'), ((6490, 6503), 'torch.autograd.Variable', 'Variable', (['ans'], {}), '(ans)\n', (6498, 6503), False, 'from torch.autograd import Variable\n'), ((6541, 6556), 'torch.autograd.Variable', 'Variable', (['gt_id'], {}), '(gt_id)\n', (6549, 6556), False, 'from torch.autograd import Variable\n'), ((7748, 7770), 'numpy.array', 'np.array', (['rank_all_tmp'], {}), '(rank_all_tmp)\n', (7756, 7770), True, 'import numpy as np\n'), ((7832, 7854), 'numpy.array', 'np.array', (['rank_all_tmp'], {}), '(rank_all_tmp)\n', (7840, 7854), True, 'import numpy as np\n'), ((7917, 7939), 'numpy.array', 'np.array', (['rank_all_tmp'], {}), '(rank_all_tmp)\n', (7925, 7939), True, 'import numpy as np\n'), ((8088, 8125), 'numpy.array', 'np.array', (['rank_all_tmp'], {'dtype': '"""float"""'}), "(rank_all_tmp, dtype='float')\n", (8096, 8125), True, 'import numpy as np\n'), ((2909, 2923), 'torch.autograd.Variable', 'Variable', (['ques'], {}), '(ques)\n', (2917, 2923), False, 'from torch.autograd import Variable\n'), ((2960, 2973), 'torch.autograd.Variable', 'Variable', (['his'], {}), '(his)\n', (2968, 2973), False, 'from torch.autograd import Variable\n'), ((3010, 3023), 'torch.autograd.Variable', 'Variable', (['ans'], {}), '(ans)\n', (3018, 3023), False, 'from torch.autograd import Variable\n'), ((3061, 3075), 'torch.autograd.Variable', 'Variable', (['tans'], {}), '(tans)\n', (3069, 3075), False, 'from torch.autograd import Variable\n'), ((3116, 3133), 'torch.autograd.Variable', 'Variable', (['opt_ans'], {}), '(opt_ans)\n', (3124, 3133), False, 'from torch.autograd import Variable\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 19:56:58 2018
@author: mirandayuan
"""
import numpy as np
#Transmission Probabilities (order:Verb, Noun, Adv.) e1
#TProb = [[0.3,0.2,0],[0.1,0.4,0.4],[0.3,0.1,0.1],[0,0,0.1]]
TProb = [[3,2,0],[1,4,4],[3,1,1],[0,0,0.1]]
#Emission Probabilities e3
#EProb = [[0.003,0.001,0],[0.004,0.003,0],[0,0,0.002]]
EProb = [[3,1,0],[4,3,0],[0,0,2]]
sentence = ["learning","changes","throughly"]
size = len(sentence)
#initially
Viterbi = [[0 for i in range(size)] for i in range(size + 1)]
Viterbi[0] = [1,0,0]
result = [0 for i in range(size+1)]
#Start
Viterbi[1] = np.multiply(np.multiply(Viterbi[0][0], TProb[0]), EProb[0])
e = 4
#Viterbi Algorithm
for i in range (2,size+1):
e += 4
for j in range(0,size):
temp = np.multiply(np.multiply(Viterbi[i-1][j], TProb[j+1]), EProb[i-1])
#print(temp)
if (temp >= Viterbi[i]).all():
Viterbi[i] = temp
result[i-1] = j
#print(Viterbi[i])
#print(result[i-1])
#print(e)
#End
End = Viterbi[size] * TProb[size]
result[size] = End[0]
for k in range(0,size):
if End[k] > result[size]:
result[size] = k
print("POS of the sentence tokens:")
for v in range(1,size+1):
print(sentence[v-1])
if(result[v] == 0):
print("verb")
elif(result[v]==1):
print("noun")
else:
print("adv")
print("the probability is: ")
prob = End[size-1] / pow(10,e)
print(prob)
| [
"numpy.multiply"
] | [((642, 678), 'numpy.multiply', 'np.multiply', (['Viterbi[0][0]', 'TProb[0]'], {}), '(Viterbi[0][0], TProb[0])\n', (653, 678), True, 'import numpy as np\n'), ((809, 853), 'numpy.multiply', 'np.multiply', (['Viterbi[i - 1][j]', 'TProb[j + 1]'], {}), '(Viterbi[i - 1][j], TProb[j + 1])\n', (820, 853), True, 'import numpy as np\n')] |
from copy import copy
import sys
import codecs
import nltk
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.corpus import stopwords
from nltk.tree import Tree
import numpy as np
import os
import re
from wordcloud import WordCloud, ImageColorGenerator
import random
from PIL import Image
from icon_font_to_png import IconFont, FontAwesomeDownloader
from icon_font_to_png.icon_font_downloader import IconFontDownloader
from fontdump.core import GoogleFontGroup
import requests
import functools
class IoniconsDownloader(IconFontDownloader):
"""
Ionic icon font downloader
Project page:
http://ionicons.com/
"""
css_url = ('https://raw.githubusercontent.com/driftyco/ionicons/master/css/ionicons.css')
ttf_url = ('https://raw.githubusercontent.com/driftyco/ionicons/master/fonts/ionicons.ttf')
def get_latest_version_number(self):
return self._get_latest_tag_from_github(
'https://api.github.com/repos/github/ionicons'
)
CUR_DIR = os.path.dirname(__file__)
FA_PATH = os.path.join(CUR_DIR, "exported/")
FONTS_PATH = os.path.join(CUR_DIR, "fonts/")
font_path = "/Library/Fonts/Michroma.ttf"
WORD_CLOUD_DEFAULTS = {
"background_color" : "white",
"max_words" : 400,
"width": 1000,
"height": 500,
"max_font_size": 250,
"mask": None,
"font_path": None,
"relative_scaling": None,
}
common_articleswords = [
'foto', 'video', 'foto|video', 'video|foto', 'anni', 'giorni', 'sono',
'``', "''", '""', '...',
'fa', 'fate', 'fanno', 'news', 'fare', "'s",
'altre', 'altro', 'altri', 'ancora', 'sempre', 'quando', 'dove',
'de', 'dei', 'coi', 'con',
'prima', 'dopo', 'mai', 'ancora', 'ecco', 'quanto', 'uno', 'così', 'durante', 'mentre',
'mai', 'senza', 'oggi', "c'è", "essere", 'avere', 'già', 'quasi', 'molto', 'poco',
]
def get_continuous_chunks(text):
chunked = ne_chunk(pos_tag(word_tokenize(text)))
prev = None
continuous_chunk = []
current_chunk = []
for i in chunked:
if i in common_articleswords:
continue
if type(i) == Tree:
current_chunk.append(" ".join([token for token, pos in i.leaves()]))
elif current_chunk:
named_entity = " ".join(current_chunk)
if named_entity not in continuous_chunk:
continuous_chunk.append(named_entity)
current_chunk = []
else:
continue
if continuous_chunk:
named_entity = " ".join(current_chunk)
if named_entity not in continuous_chunk:
continuous_chunk.append(named_entity)
return continuous_chunk
def compute_frequencies(
text,
encoding="latin-1", language='italian', min_len=3):
# NLTK's default stopwords. musst be loaded if not present
default_stopwords = set(nltk.corpus.stopwords.words(language))
words = nltk.word_tokenize(text)
# default_stopwords = set(nltk.corpus.stopwords.words(language))
# fp = codecs.open(input_file, 'r', encoding)
# words = nltk.word_tokenize(fp.read())
seen_in_chunks = []
chunks = []
lines = text.split("\n")
for line in lines:
chunk = get_continuous_chunks(line)
chunks.extend(chunk)
for x in chunk:
seen_in_chunks.extend(x.split(" "))
words = [word for word in words if word not in seen_in_chunks]
words.extend(chunks)
# Remove punctuation
# text = text.translate(None, string.punctuation)
# Remove single-character tokens (mostly punctuation)
words = [word for word in words if len(word) >= int(min_len)]
# Remove numbers
#words = [word for word in words if not word.isnumeric()]
# Stemming words seems to make matters worse, disabled
#stemmer = nltk.stem.snowball.SnowballStemmer('italian')
#words = [stemmer.stem(word) for word in words]
# Remove stopwords
words = [word for word in words if word.lower() not in default_stopwords]
# Remove custom list of words
words = [word for word in words if word.lower() not in common_articleswords]
# Calculate frequency distribution
fdist = nltk.FreqDist(words)
# Output top 50 words
frequencies = []
for word, frequency in fdist.most_common(400):
print('%s;%d' % (word, frequency))
frequencies.append((word, frequency))
# frequencies.append((word.encode(encoding), frequency))
return frequencies
def load_frequencies(filename):
with open(filename, "rt") as f:
txtdata = f.read()
lines = txtdata.split("\n")
pieces = [line.split(",") for line in lines]
data = [[piece[0], int(piece[1])] for piece in pieces if piece and len(piece)==2]
return data
def save_frequencies(data, filename):
txtdata = "\n".join([", ".join(str(x) for x in f) for f in data])
with open(filename, "wt") as f:
f.write(txtdata)
def make_mask(icon, size=1000, source="fa", color="black", background_color='white'):
if source == 'image':
downloader = None
if source =='ionic':
downloader = IoniconsDownloader(FA_PATH)
elif source == 'fa':
downloader = FontAwesomeDownloader(FA_PATH)
if downloader:
downloader.download_files()
icon_font = IconFont(downloader.css_path, downloader.ttf_path, keep_prefix=True)
icon_font.export_icon(icon, size, color='black', scale='auto',
filename=None, export_dir='exported')
#icon = "circle"
# http://stackoverflow.com/questions/7911451/pil-convert-png-or-gif-with-transparency-to-jpg-without
icon_path = FA_PATH + "%s.png" % icon
else:
icon_path = icon
#icon_path = os.path.join(d, "lord-ganesh.jpg")
icon = Image.open(icon_path)
if source == 'image':
icon = icon.resize((size, size), Image.ANTIALIAS)
mask = Image.new("RGB", icon.size, background_color)
mask.paste(icon,icon)
mask = np.array(mask)
return mask
def get_google_font(google_fonts_url):
if not os.path.isdir(FONTS_PATH):
os.mkdir(FONTS_PATH)
g = GoogleFontGroup(google_fonts_url)
for font in g.fonts:
if 'ttf' not in font.styles:
return None
font_style = font.styles['ttf']
pattern = r'url\((.+)\) '
font_url = re.findall(pattern, font_style.src)[0]
r = requests.get(font_url, stream=True)
if r.status_code == 200:
font_dest = os.path.join(FONTS_PATH, font.primary_name+".ttf")
with open(font_dest, "wb") as fontfile:
for chunk in r:
fontfile.write(chunk)
return font_dest
return None
def save_cloud(frequencies, output, options={}, color_func=None,canvas_width=0, canvas_height=0):
base_options = copy(WORD_CLOUD_DEFAULTS)
base_options.update(options)
clean_options = { x : base_options[x] for x in base_options if base_options[x] is not None}
wordcloud = WordCloud(**clean_options).generate_from_frequencies(frequencies)
if(color_func):
wordcloud = wordcloud.recolor(color_func=color_func)
image = wordcloud.to_image()
if clean_options.get("height") != clean_options.get("width") and not canvas_width and not canvas_height:
canvas_height = clean_options.get("height")
canvas_width = clean_options.get("width")
if(canvas_width and canvas_height):
final_image = Image.new(image.mode, (canvas_width, canvas_height), clean_options.get("background_color"))
offset = (int((final_image.size[0] - image.size[0]) / 2), int((final_image.size[1] - image.size[1]) / 2))
final_image.paste(image, offset)
return final_image.save(output)
return image.save(output)
def get_color_func(base_hue, saturation=85, vibrance=0, max_l=90, min_l=40, forced_colors={}):
def grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs):
# TODO: We should validate user input
if word in forced_colors:
return forced_colors[word]
if(kwargs.get('vibrance', None)):
vibrance = kwargs.get('vibrance')
base_hue = kwargs.get('base_hue')
min_l = kwargs.get('min_l')
max_l = kwargs.get('max_l')
base_hue = random.randint(base_hue-vibrance, base_hue+vibrance) % 360
return "hsl(%s, %s%%, %s%%)" % (base_hue, saturation, random.randint(min_l, max_l))
return functools.partial(grey_color_func, base_hue=base_hue, vibrance=vibrance,
min_l=min_l, max_l=max_l)
| [
"os.mkdir",
"PIL.Image.new",
"wordcloud.WordCloud",
"icon_font_to_png.IconFont",
"os.path.join",
"nltk.word_tokenize",
"random.randint",
"icon_font_to_png.FontAwesomeDownloader",
"os.path.dirname",
"re.findall",
"requests.get",
"functools.partial",
"nltk.corpus.stopwords.words",
"os.path.i... | [((1008, 1033), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1023, 1033), False, 'import os\n'), ((1044, 1078), 'os.path.join', 'os.path.join', (['CUR_DIR', '"""exported/"""'], {}), "(CUR_DIR, 'exported/')\n", (1056, 1078), False, 'import os\n'), ((1092, 1123), 'os.path.join', 'os.path.join', (['CUR_DIR', '"""fonts/"""'], {}), "(CUR_DIR, 'fonts/')\n", (1104, 1123), False, 'import os\n'), ((2924, 2948), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (2942, 2948), False, 'import nltk\n'), ((4182, 4202), 'nltk.FreqDist', 'nltk.FreqDist', (['words'], {}), '(words)\n', (4195, 4202), False, 'import nltk\n'), ((5771, 5792), 'PIL.Image.open', 'Image.open', (['icon_path'], {}), '(icon_path)\n', (5781, 5792), False, 'from PIL import Image\n'), ((5889, 5934), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'icon.size', 'background_color'], {}), "('RGB', icon.size, background_color)\n", (5898, 5934), False, 'from PIL import Image\n'), ((5972, 5986), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (5980, 5986), True, 'import numpy as np\n'), ((6121, 6154), 'fontdump.core.GoogleFontGroup', 'GoogleFontGroup', (['google_fonts_url'], {}), '(google_fonts_url)\n', (6136, 6154), False, 'from fontdump.core import GoogleFontGroup\n'), ((6821, 6846), 'copy.copy', 'copy', (['WORD_CLOUD_DEFAULTS'], {}), '(WORD_CLOUD_DEFAULTS)\n', (6825, 6846), False, 'from copy import copy\n'), ((8482, 8584), 'functools.partial', 'functools.partial', (['grey_color_func'], {'base_hue': 'base_hue', 'vibrance': 'vibrance', 'min_l': 'min_l', 'max_l': 'max_l'}), '(grey_color_func, base_hue=base_hue, vibrance=vibrance,\n min_l=min_l, max_l=max_l)\n', (8499, 8584), False, 'import functools\n'), ((2873, 2910), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['language'], {}), '(language)\n', (2900, 2910), False, 'import nltk\n'), ((5303, 5371), 'icon_font_to_png.IconFont', 'IconFont', (['downloader.css_path', 'downloader.ttf_path'], {'keep_prefix': '(True)'}), '(downloader.css_path, downloader.ttf_path, keep_prefix=True)\n', (5311, 5371), False, 'from icon_font_to_png import IconFont, FontAwesomeDownloader\n'), ((6056, 6081), 'os.path.isdir', 'os.path.isdir', (['FONTS_PATH'], {}), '(FONTS_PATH)\n', (6069, 6081), False, 'import os\n'), ((6091, 6111), 'os.mkdir', 'os.mkdir', (['FONTS_PATH'], {}), '(FONTS_PATH)\n', (6099, 6111), False, 'import os\n'), ((6385, 6420), 'requests.get', 'requests.get', (['font_url'], {'stream': '(True)'}), '(font_url, stream=True)\n', (6397, 6420), False, 'import requests\n'), ((1951, 1970), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (1964, 1970), False, 'from nltk import ne_chunk, pos_tag, word_tokenize\n'), ((5195, 5225), 'icon_font_to_png.FontAwesomeDownloader', 'FontAwesomeDownloader', (['FA_PATH'], {}), '(FA_PATH)\n', (5216, 5225), False, 'from icon_font_to_png import IconFont, FontAwesomeDownloader\n'), ((6334, 6369), 're.findall', 're.findall', (['pattern', 'font_style.src'], {}), '(pattern, font_style.src)\n', (6344, 6369), False, 'import re\n'), ((6478, 6530), 'os.path.join', 'os.path.join', (['FONTS_PATH', "(font.primary_name + '.ttf')"], {}), "(FONTS_PATH, font.primary_name + '.ttf')\n", (6490, 6530), False, 'import os\n'), ((6993, 7019), 'wordcloud.WordCloud', 'WordCloud', ([], {}), '(**clean_options)\n', (7002, 7019), False, 'from wordcloud import WordCloud, ImageColorGenerator\n'), ((8319, 8375), 'random.randint', 'random.randint', (['(base_hue - vibrance)', '(base_hue + vibrance)'], {}), '(base_hue - vibrance, base_hue + vibrance)\n', (8333, 8375), False, 'import random\n'), ((8440, 8468), 'random.randint', 'random.randint', (['min_l', 'max_l'], {}), '(min_l, max_l)\n', (8454, 8468), False, 'import random\n')] |
import pandas as pd
import torch
from tqdm import tqdm
from sklearn.preprocessing import minmax_scale
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# load the data, and form the tensor dataset
from opt import LeastSquaresProxPointOptimizer
# load data
df = pd.read_csv('boston.csv')
inputs = minmax_scale(df[['RM','LSTAT','PTRATIO']].to_numpy()) # rescale inputs
inputs = np.hstack([inputs, np.ones((inputs.shape[0], 1))]) # add "1" to each sample
labels = minmax_scale(df['MEDV'].to_numpy())
dataset = torch.utils.data.TensorDataset(torch.tensor(inputs), -torch.tensor(labels))
# setup experiment parameters
batch_sizes = [1, 2, 3, 4, 5, 6]
experiments = range(20)
epochs = range(10)
step_sizes = np.geomspace(0.001, 100, 30)
# run experiments and record results
losses = pd.DataFrame(columns=['batch_size', 'step_size', 'experiment', 'epoch', 'loss'])
total_epochs = len(batch_sizes) * len(experiments) * len(step_sizes) * len(epochs)
with tqdm(total=total_epochs, desc='batch_size = NA, step_size = NA, experiment = NA',
unit='epochs',
ncols=160) as pbar:
for batch_size in batch_sizes:
for step_size in step_sizes:
for experiment in experiments:
x = torch.empty(4, requires_grad=False, dtype=torch.float64)
torch.nn.init.normal_(x)
optimizer = LeastSquaresProxPointOptimizer(x, step_size)
for epoch in epochs:
epoch_loss = 0.
for A_batch, b_batch in torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=batch_size):
batch_losses = optimizer.step(A_batch, b_batch)
epoch_loss += torch.sum(batch_losses).item()
epoch_loss /= len(dataset)
losses = losses.append(pd.DataFrame.from_dict(
{'batch_size': [batch_size],
'step_size': [step_size],
'experiment': [experiment],
'epoch': [epoch],
'loss': [epoch_loss]}), sort=True)
pbar.update()
pbar.set_description(f'batch_size = {batch_size}, step_size = {step_size}, experiment = {experiment}')
# save and read from CSV - so that we can use the results instead of re-computing them,
# by commenting everything up to the next line.
losses.to_csv('results.txt', header=True, index=False)
losses = pd.read_csv('results.txt', header=0)
best_losses = losses[['batch_size', 'step_size', 'experiment', 'loss']]\
.groupby(['batch_size', 'step_size', 'experiment'], as_index=False)\
.min()
sns.set()
plot_losses = best_losses.copy()
plot_losses.loc[:, 'batch_size'] = plot_losses.loc[:, 'batch_size'].astype(str)
ax = sns.lineplot(x='step_size', y='loss', hue='batch_size', data=plot_losses, err_style='band')
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
plot_losses = best_losses[best_losses['step_size'] >= 0.1]
plot_losses.loc[:, 'batch_size'] = plot_losses.loc[:, 'batch_size'].astype(str)
ax = sns.lineplot(x='step_size', y='loss', hue='batch_size', data=plot_losses, err_style='band')
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
| [
"pandas.DataFrame",
"seaborn.lineplot",
"tqdm.tqdm",
"opt.LeastSquaresProxPointOptimizer",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"torch.utils.data.DataLoader",
"torch.sum",
"pandas.read_csv",
"numpy.geomspace",
"torch.empty",
"numpy.ones",
"torch.nn.init.normal_",
"seabor... | [((287, 312), 'pandas.read_csv', 'pd.read_csv', (['"""boston.csv"""'], {}), "('boston.csv')\n", (298, 312), True, 'import pandas as pd\n'), ((730, 758), 'numpy.geomspace', 'np.geomspace', (['(0.001)', '(100)', '(30)'], {}), '(0.001, 100, 30)\n', (742, 758), True, 'import numpy as np\n'), ((806, 891), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['batch_size', 'step_size', 'experiment', 'epoch', 'loss']"}), "(columns=['batch_size', 'step_size', 'experiment', 'epoch', 'loss']\n )\n", (818, 891), True, 'import pandas as pd\n'), ((2487, 2523), 'pandas.read_csv', 'pd.read_csv', (['"""results.txt"""'], {'header': '(0)'}), "('results.txt', header=0)\n", (2498, 2523), True, 'import pandas as pd\n'), ((2683, 2692), 'seaborn.set', 'sns.set', ([], {}), '()\n', (2690, 2692), True, 'import seaborn as sns\n'), ((2811, 2906), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""step_size"""', 'y': '"""loss"""', 'hue': '"""batch_size"""', 'data': 'plot_losses', 'err_style': '"""band"""'}), "(x='step_size', y='loss', hue='batch_size', data=plot_losses,\n err_style='band')\n", (2823, 2906), True, 'import seaborn as sns\n'), ((2945, 2955), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2953, 2955), True, 'import matplotlib.pyplot as plt\n'), ((3101, 3196), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""step_size"""', 'y': '"""loss"""', 'hue': '"""batch_size"""', 'data': 'plot_losses', 'err_style': '"""band"""'}), "(x='step_size', y='loss', hue='batch_size', data=plot_losses,\n err_style='band')\n", (3113, 3196), True, 'import seaborn as sns\n'), ((3235, 3245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3243, 3245), True, 'import matplotlib.pyplot as plt\n'), ((565, 585), 'torch.tensor', 'torch.tensor', (['inputs'], {}), '(inputs)\n', (577, 585), False, 'import torch\n'), ((975, 1091), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_epochs', 'desc': '"""batch_size = NA, step_size = NA, experiment = NA"""', 'unit': '"""epochs"""', 'ncols': '(160)'}), "(total=total_epochs, desc=\n 'batch_size = NA, step_size = NA, experiment = NA', unit='epochs',\n ncols=160)\n", (979, 1091), False, 'from tqdm import tqdm\n'), ((421, 450), 'numpy.ones', 'np.ones', (['(inputs.shape[0], 1)'], {}), '((inputs.shape[0], 1))\n', (428, 450), True, 'import numpy as np\n'), ((588, 608), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (600, 608), False, 'import torch\n'), ((1247, 1303), 'torch.empty', 'torch.empty', (['(4)'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(4, requires_grad=False, dtype=torch.float64)\n', (1258, 1303), False, 'import torch\n'), ((1320, 1344), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['x'], {}), '(x)\n', (1341, 1344), False, 'import torch\n'), ((1374, 1418), 'opt.LeastSquaresProxPointOptimizer', 'LeastSquaresProxPointOptimizer', (['x', 'step_size'], {}), '(x, step_size)\n', (1404, 1418), False, 'from opt import LeastSquaresProxPointOptimizer\n'), ((1536, 1609), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': 'batch_size'}), '(dataset, shuffle=True, batch_size=batch_size)\n', (1563, 1609), False, 'import torch\n'), ((1843, 1994), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'batch_size': [batch_size], 'step_size': [step_size], 'experiment': [\n experiment], 'epoch': [epoch], 'loss': [epoch_loss]}"], {}), "({'batch_size': [batch_size], 'step_size': [step_size\n ], 'experiment': [experiment], 'epoch': [epoch], 'loss': [epoch_loss]})\n", (1865, 1994), True, 'import pandas as pd\n'), ((1721, 1744), 'torch.sum', 'torch.sum', (['batch_losses'], {}), '(batch_losses)\n', (1730, 1744), False, 'import torch\n')] |
import numpy as np
def rotate(angle, desired_angle):
xa = [np.cos(angle), np.sin(angle)]
rot = np.array([[np.cos(-desired_angle), -np.sin(-desired_angle)], [np.sin(-desired_angle), np.cos(-desired_angle)]])
delta_v = np.dot(rot, xa)
delta = np.arctan2(delta_v[1], delta_v[0])
return delta
def is_near_zero(s):
return abs(s) < 1e-6
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def skew(v):
v = np.ravel(v)
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
def vec6_to_se3(v):
return np.r_[np.c_[skew(v[:3]), v[3:]], [[0, 0, 0, 1]]]
def vec6_to_SE3(v):
se3 = vec6_to_se3(v)
return se3_to_SE3(se3)
def so3_to_vec(so3):
return np.array([so3[2, 1], so3[0, 2], so3[1, 0]])
def w_split(w):
return normalize(w), np.linalg.norm(w)
def so3_to_SO3(so3):
w_theta = so3_to_vec(so3)
if is_near_zero(np.linalg.norm(w_theta)):
return np.eye(3)
else:
theta = w_split(w_theta)[1]
w_skew = so3 / theta
return np.eye(3) + np.sin(theta)*w_skew + (1 - np.cos(theta))*w_skew@w_skew # Rodriguez formula
def se3_to_SE3(se3):
w_theta = so3_to_vec(se3[0:3, 0:3])
if is_near_zero(np.linalg.norm(w_theta)):
return np.r_[np.c_[np.eye(3), se3[0:3, 3]], [[0, 0, 0, 1]]]
else:
theta = w_split(w_theta)[1]
w_skew = se3[0:3, 0:3] / theta
return np.r_[
np.c_[
so3_to_SO3(se3[0:3, 0:3]),
(np.eye(3) * theta + (1 - np.cos(theta)) * w_skew + (theta - np.sin(theta)) * w_skew @ w_skew) @ se3[0:3, 3] / theta
],
[[0, 0, 0, 1]]
]
| [
"numpy.arctan2",
"numpy.eye",
"numpy.ravel",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"numpy.dot"
] | [((231, 246), 'numpy.dot', 'np.dot', (['rot', 'xa'], {}), '(rot, xa)\n', (237, 246), True, 'import numpy as np\n'), ((259, 293), 'numpy.arctan2', 'np.arctan2', (['delta_v[1]', 'delta_v[0]'], {}), '(delta_v[1], delta_v[0])\n', (269, 293), True, 'import numpy as np\n'), ((390, 407), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (404, 407), True, 'import numpy as np\n'), ((486, 497), 'numpy.ravel', 'np.ravel', (['v'], {}), '(v)\n', (494, 497), True, 'import numpy as np\n'), ((509, 573), 'numpy.array', 'np.array', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (517, 573), True, 'import numpy as np\n'), ((764, 807), 'numpy.array', 'np.array', (['[so3[2, 1], so3[0, 2], so3[1, 0]]'], {}), '([so3[2, 1], so3[0, 2], so3[1, 0]])\n', (772, 807), True, 'import numpy as np\n'), ((65, 78), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (71, 78), True, 'import numpy as np\n'), ((80, 93), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (86, 93), True, 'import numpy as np\n'), ((851, 868), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (865, 868), True, 'import numpy as np\n'), ((942, 965), 'numpy.linalg.norm', 'np.linalg.norm', (['w_theta'], {}), '(w_theta)\n', (956, 965), True, 'import numpy as np\n'), ((983, 992), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (989, 992), True, 'import numpy as np\n'), ((1256, 1279), 'numpy.linalg.norm', 'np.linalg.norm', (['w_theta'], {}), '(w_theta)\n', (1270, 1279), True, 'import numpy as np\n'), ((116, 138), 'numpy.cos', 'np.cos', (['(-desired_angle)'], {}), '(-desired_angle)\n', (122, 138), True, 'import numpy as np\n'), ((167, 189), 'numpy.sin', 'np.sin', (['(-desired_angle)'], {}), '(-desired_angle)\n', (173, 189), True, 'import numpy as np\n'), ((191, 213), 'numpy.cos', 'np.cos', (['(-desired_angle)'], {}), '(-desired_angle)\n', (197, 213), True, 'import numpy as np\n'), ((1083, 1092), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1089, 1092), True, 'import numpy as np\n'), ((141, 163), 'numpy.sin', 'np.sin', (['(-desired_angle)'], {}), '(-desired_angle)\n', (147, 163), True, 'import numpy as np\n'), ((1095, 1108), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1101, 1108), True, 'import numpy as np\n'), ((1123, 1136), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1129, 1136), True, 'import numpy as np\n'), ((1309, 1318), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1315, 1318), True, 'import numpy as np\n'), ((1548, 1557), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1554, 1557), True, 'import numpy as np\n'), ((1573, 1586), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1579, 1586), True, 'import numpy as np\n'), ((1608, 1621), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1614, 1621), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
from sklearn.neighbors import KDTree
class ReliefF(object):
"""Feature selection using data-mined expert knowledge.
Based on the ReliefF algorithm as introduced in:
Kononenko, Igor et al. Overcoming the myopia of inductive learning algorithms with RELIEFF (1997), Applied Intelligence, 7(1), p39-55
"""
def __init__(self, n_neighbors=100, n_features_to_keep=10):
"""Sets up ReliefF to perform feature selection.
Parameters
----------
n_neighbors: int (default: 100)
The number of neighbors to consider when assigning feature importance scores.
More neighbors results in more accurate scores, but takes longer.
Returns
-------
None
"""
self.feature_scores = None
self.top_features = None
self.tree = None
self.n_neighbors = n_neighbors
self.n_features_to_keep = n_features_to_keep
def fit(self, X, y):
"""Computes the feature importance scores from the training data.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
None
"""
self.feature_scores = np.zeros(X.shape[1])
self.tree = KDTree(X)
for source_index in range(X.shape[0]):
distances, indices = self.tree.query(X[source_index].reshape(1, -1), k=self.n_neighbors + 1)
# First match is self, so ignore it
for neighbor_index in indices[0][1:]:
similar_features = X[source_index] == X[neighbor_index]
label_match = y[source_index] == y[neighbor_index]
# If the labels match, then increment features that match and decrement features that do not match
# Do the opposite if the labels do not match
if label_match:
self.feature_scores[similar_features] += 1.
self.feature_scores[~similar_features] -= 1.
else:
self.feature_scores[~similar_features] += 1.
self.feature_scores[similar_features] -= 1.
self.top_features = np.argsort(self.feature_scores)[::-1]
def transform(self, X):
"""Reduces the feature set down to the top `n_features_to_keep` features.
Parameters
----------
X: array-like {n_samples, n_features}
Feature matrix to perform feature selection on
Returns
-------
X_reduced: array-like {n_samples, n_features_to_keep}
Reduced feature matrix
"""
return X[:, self.top_features[self.n_features_to_keep]]
| [
"sklearn.neighbors.KDTree",
"numpy.argsort",
"numpy.zeros"
] | [((2520, 2540), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (2528, 2540), True, 'import numpy as np\n'), ((2561, 2570), 'sklearn.neighbors.KDTree', 'KDTree', (['X'], {}), '(X)\n', (2567, 2570), False, 'from sklearn.neighbors import KDTree\n'), ((3488, 3519), 'numpy.argsort', 'np.argsort', (['self.feature_scores'], {}), '(self.feature_scores)\n', (3498, 3519), True, 'import numpy as np\n')] |
import numpy as np
class ZernikeCoefficients(object):
FIRST_ZERNIKE_MODE = 2
def __init__(self, coefficients, counter=0):
self._coefficients = coefficients
self._counter = counter
def zernikeIndexes(self):
return np.arange(self.FIRST_ZERNIKE_MODE,
self.FIRST_ZERNIKE_MODE + self.numberOfModes())
def numberOfModes(self):
return len(self._coefficients)
def getZ(self, zernikeIndexes):
return self.toNumpyArray()[np.array(zernikeIndexes) -
self.FIRST_ZERNIKE_MODE]
def toDictionary(self):
keys = self.zernikeIndexes()
values = self._coefficients
return dict(list(zip(keys, values)))
def toNumpyArray(self):
return self._coefficients
@staticmethod
def fromNumpyArray(coefficientsAsNumpyArray, counter=0):
return ZernikeCoefficients(np.array(coefficientsAsNumpyArray), counter)
def counter(self):
return self._counter
def setCounter(self, counter):
self._counter = counter
def __eq__(self, o):
if self._counter != o._counter:
return False
if not np.array_equal(self._coefficients, o._coefficients):
return False
return True
def __ne__(self, o):
return not self.__eq__(o)
def __str__(self):
return str(self._coefficients)
| [
"numpy.array_equal",
"numpy.array"
] | [((915, 949), 'numpy.array', 'np.array', (['coefficientsAsNumpyArray'], {}), '(coefficientsAsNumpyArray)\n', (923, 949), True, 'import numpy as np\n'), ((1187, 1238), 'numpy.array_equal', 'np.array_equal', (['self._coefficients', 'o._coefficients'], {}), '(self._coefficients, o._coefficients)\n', (1201, 1238), True, 'import numpy as np\n'), ((502, 526), 'numpy.array', 'np.array', (['zernikeIndexes'], {}), '(zernikeIndexes)\n', (510, 526), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import tensorflow as tf
import glob
import argparse
import os
INPUT_SIZE = 512 # input image size for Generator
ATTENTION_SIZE = 32 # size of contextual attention
def sort(str_lst):
return [s for s in sorted(str_lst)]
# reconstruct residual from patches
def reconstruct_residual_from_patches(residual, multiple):
residual = np.reshape(residual, [ATTENTION_SIZE, ATTENTION_SIZE, multiple, multiple, 3])
residual = np.transpose(residual, [0,2,1,3,4])
return np.reshape(residual, [ATTENTION_SIZE * multiple, ATTENTION_SIZE * multiple, 3])
# extract image patches
def extract_image_patches(img, multiple):
h, w, c = img.shape
img = np.reshape(img, [h//multiple, multiple, w//multiple, multiple, c])
img = np.transpose(img, [0,2,1,3,4])
return img
# residual aggregation module
def residual_aggregate(residual, attention, multiple):
residual = extract_image_patches(residual, multiple * INPUT_SIZE//ATTENTION_SIZE)
residual = np.reshape(residual, [1, residual.shape[0] * residual.shape[1], -1])
residual = np.matmul(attention, residual)
residual = reconstruct_residual_from_patches(residual, multiple * INPUT_SIZE//ATTENTION_SIZE)
return residual
# resize image by averaging neighbors
def resize_ave(img, multiple):
img = img.astype(np.float32)
img_patches = extract_image_patches(img, multiple)
img = np.mean(img_patches, axis=(2,3))
return img
# pre-processing module
def pre_process(raw_img, raw_mask, multiple):
raw_mask = raw_mask.astype(np.float32) / 255.
raw_img = raw_img.astype(np.float32)
# resize raw image & mask to desinated size
large_img = cv2.resize(raw_img, (multiple * INPUT_SIZE, multiple * INPUT_SIZE), interpolation = cv2. INTER_LINEAR)
large_mask = cv2.resize(raw_mask, (multiple * INPUT_SIZE, multiple * INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
# down-sample large image & mask to 512x512
small_img = resize_ave(large_img, multiple)
small_mask = cv2.resize(raw_mask, (INPUT_SIZE, INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
# set hole region to 1. and backgroun to 0.
small_mask = 1. - small_mask
return large_img, large_mask, small_img, small_mask
# post-processing module
def post_process(raw_img, large_img, large_mask, res_512, img_512, mask_512, attention, multiple):
# compute the raw residual map
h, w, c = raw_img.shape
low_base = cv2.resize(res_512.astype(np.float32), (INPUT_SIZE * multiple, INPUT_SIZE * multiple), interpolation = cv2.INTER_LINEAR)
low_large = cv2.resize(img_512.astype(np.float32), (INPUT_SIZE * multiple, INPUT_SIZE * multiple), interpolation = cv2.INTER_LINEAR)
residual = (large_img - low_large) * large_mask
# reconstruct residual map using residual aggregation module
residual = residual_aggregate(residual, attention, multiple)
# compute large inpainted result
res_large = low_base + residual
res_large = np.clip(res_large, 0., 255.)
# resize large inpainted result to raw size
res_raw = cv2.resize(res_large, (w, h), interpolation = cv2.INTER_LINEAR)
# paste the hole region to the original raw image
mask = cv2.resize(mask_512.astype(np.float32), (w, h), interpolation = cv2.INTER_LINEAR)
mask = np.expand_dims(mask, axis=2)
res_raw = res_raw * mask + raw_img * (1. - mask)
return res_raw.astype(np.uint8)
def inpaint(raw_img,
raw_mask,
sess,
inpainted_512_node,
attention_node,
mask_512_node,
img_512_ph,
mask_512_ph,
multiple):
# pre-processing
img_large, mask_large, img_512, mask_512 = pre_process(raw_img, raw_mask, multiple)
# neural network
inpainted_512, attention, mask_512 = sess.run([inpainted_512_node, attention_node, mask_512_node], feed_dict={img_512_ph: [img_512] , mask_512_ph:[mask_512[:,:,0:1]]})
# post-processing
res_raw_size = post_process(raw_img, img_large, mask_large, \
inpainted_512[0], img_512, mask_512[0], attention[0], multiple)
return res_raw_size
def read_imgs_masks(args):
paths_img = glob.glob(args.images+'/*.*[gG]')
paths_mask = glob.glob(args.masks+'/*.*[gG]')
paths_img = sort(paths_img)
paths_mask = sort(paths_mask)
print('#imgs: ' + str(len(paths_img)))
print('#imgs: ' + str(len(paths_mask)))
print(paths_img)
print(paths_mask)
return paths_img, paths_mask
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.images = '/home/socialab157/Desktop/YLD_fig/test_inpainting_methods/images' # input image directory
args.masks = '/home/socialab157/Desktop/YLD_fig/test_inpainting_methods/masks' # input mask director
args.output_dir = '/home/socialab157/Desktop/YLD_fig/test_inpainting_methods/results' # output directory
args.multiple = 6 # multiples of image resizing
paths_img, paths_mask = read_imgs_masks(args)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with tf.Graph().as_default():
with open('./pb/hifill.pb', "rb") as f:
output_graph_def = tf.GraphDef()
output_graph_def.ParseFromString(f.read())
tf.import_graph_def(output_graph_def, name="")
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
image_ph = sess.graph.get_tensor_by_name('img:0')
mask_ph = sess.graph.get_tensor_by_name('mask:0')
inpainted_512_node = sess.graph.get_tensor_by_name('inpainted:0')
attention_node = sess.graph.get_tensor_by_name('attention:0')
mask_512_node = sess.graph.get_tensor_by_name('mask_processed:0')
for path_img, path_mask in zip(paths_img, paths_mask):
raw_img = cv2.imread(path_img)
raw_mask = cv2.imread(path_mask)
raw_mask = cv2.bitwise_not(raw_mask)
inpainted = inpaint(raw_img, raw_mask, sess, inpainted_512_node, attention_node, mask_512_node, image_ph, mask_ph, args.multiple)
filename = args.output_dir + '/' + os.path.basename(path_img)
cv2.imwrite(filename + '_inpainted.jpg', inpainted)
| [
"argparse.ArgumentParser",
"numpy.clip",
"numpy.mean",
"glob.glob",
"cv2.imwrite",
"numpy.transpose",
"os.path.exists",
"numpy.reshape",
"tensorflow.GraphDef",
"cv2.resize",
"cv2.bitwise_not",
"os.path.basename",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflo... | [((4505, 4530), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4528, 4530), False, 'import argparse\n'), ((369, 446), 'numpy.reshape', 'np.reshape', (['residual', '[ATTENTION_SIZE, ATTENTION_SIZE, multiple, multiple, 3]'], {}), '(residual, [ATTENTION_SIZE, ATTENTION_SIZE, multiple, multiple, 3])\n', (379, 446), True, 'import numpy as np\n'), ((462, 501), 'numpy.transpose', 'np.transpose', (['residual', '[0, 2, 1, 3, 4]'], {}), '(residual, [0, 2, 1, 3, 4])\n', (474, 501), True, 'import numpy as np\n'), ((509, 588), 'numpy.reshape', 'np.reshape', (['residual', '[ATTENTION_SIZE * multiple, ATTENTION_SIZE * multiple, 3]'], {}), '(residual, [ATTENTION_SIZE * multiple, ATTENTION_SIZE * multiple, 3])\n', (519, 588), True, 'import numpy as np\n'), ((690, 760), 'numpy.reshape', 'np.reshape', (['img', '[h // multiple, multiple, w // multiple, multiple, c]'], {}), '(img, [h // multiple, multiple, w // multiple, multiple, c])\n', (700, 760), True, 'import numpy as np\n'), ((767, 801), 'numpy.transpose', 'np.transpose', (['img', '[0, 2, 1, 3, 4]'], {}), '(img, [0, 2, 1, 3, 4])\n', (779, 801), True, 'import numpy as np\n'), ((1000, 1068), 'numpy.reshape', 'np.reshape', (['residual', '[1, residual.shape[0] * residual.shape[1], -1]'], {}), '(residual, [1, residual.shape[0] * residual.shape[1], -1])\n', (1010, 1068), True, 'import numpy as np\n'), ((1084, 1114), 'numpy.matmul', 'np.matmul', (['attention', 'residual'], {}), '(attention, residual)\n', (1093, 1114), True, 'import numpy as np\n'), ((1401, 1434), 'numpy.mean', 'np.mean', (['img_patches'], {'axis': '(2, 3)'}), '(img_patches, axis=(2, 3))\n', (1408, 1434), True, 'import numpy as np\n'), ((1677, 1780), 'cv2.resize', 'cv2.resize', (['raw_img', '(multiple * INPUT_SIZE, multiple * INPUT_SIZE)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(raw_img, (multiple * INPUT_SIZE, multiple * INPUT_SIZE),\n interpolation=cv2.INTER_LINEAR)\n', (1687, 1780), False, 'import cv2\n'), ((1798, 1903), 'cv2.resize', 'cv2.resize', (['raw_mask', '(multiple * INPUT_SIZE, multiple * INPUT_SIZE)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(raw_mask, (multiple * INPUT_SIZE, multiple * INPUT_SIZE),\n interpolation=cv2.INTER_NEAREST)\n', (1808, 1903), False, 'import cv2\n'), ((2016, 2095), 'cv2.resize', 'cv2.resize', (['raw_mask', '(INPUT_SIZE, INPUT_SIZE)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(raw_mask, (INPUT_SIZE, INPUT_SIZE), interpolation=cv2.INTER_NEAREST)\n', (2026, 2095), False, 'import cv2\n'), ((2972, 3002), 'numpy.clip', 'np.clip', (['res_large', '(0.0)', '(255.0)'], {}), '(res_large, 0.0, 255.0)\n', (2979, 3002), True, 'import numpy as np\n'), ((3064, 3125), 'cv2.resize', 'cv2.resize', (['res_large', '(w, h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(res_large, (w, h), interpolation=cv2.INTER_LINEAR)\n', (3074, 3125), False, 'import cv2\n'), ((3287, 3315), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (3301, 3315), True, 'import numpy as np\n'), ((4182, 4217), 'glob.glob', 'glob.glob', (["(args.images + '/*.*[gG]')"], {}), "(args.images + '/*.*[gG]')\n", (4191, 4217), False, 'import glob\n'), ((4233, 4267), 'glob.glob', 'glob.glob', (["(args.masks + '/*.*[gG]')"], {}), "(args.masks + '/*.*[gG]')\n", (4242, 4267), False, 'import glob\n'), ((4971, 5002), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (4985, 5002), False, 'import os\n'), ((5008, 5036), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (5019, 5036), False, 'import os\n'), ((5132, 5145), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (5143, 5145), True, 'import tensorflow as tf\n'), ((5197, 5243), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['output_graph_def'], {'name': '""""""'}), "(output_graph_def, name='')\n", (5216, 5243), True, 'import tensorflow as tf\n'), ((5252, 5264), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5262, 5264), True, 'import tensorflow as tf\n'), ((5285, 5318), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5316, 5318), True, 'import tensorflow as tf\n'), ((5042, 5052), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5050, 5052), True, 'import tensorflow as tf\n'), ((5730, 5750), 'cv2.imread', 'cv2.imread', (['path_img'], {}), '(path_img)\n', (5740, 5750), False, 'import cv2\n'), ((5770, 5791), 'cv2.imread', 'cv2.imread', (['path_mask'], {}), '(path_mask)\n', (5780, 5791), False, 'import cv2\n'), ((5811, 5836), 'cv2.bitwise_not', 'cv2.bitwise_not', (['raw_mask'], {}), '(raw_mask)\n', (5826, 5836), False, 'import cv2\n'), ((6053, 6104), 'cv2.imwrite', 'cv2.imwrite', (["(filename + '_inpainted.jpg')", 'inpainted'], {}), "(filename + '_inpainted.jpg', inpainted)\n", (6064, 6104), False, 'import cv2\n'), ((6018, 6044), 'os.path.basename', 'os.path.basename', (['path_img'], {}), '(path_img)\n', (6034, 6044), False, 'import os\n')] |
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # <--- This is important for 3d plotting
from scipy.interpolate import griddata
PI = np.pi
### MESH PART
# Mesh input parameters
fparms = 'parameters.inp'
nc_col,nc_row = np.loadtxt(fparms)[:]
nc_col = int(nc_col)
nc_row = int(nc_row)
ncx = nc_col
ncy = nc_row
cx = np.zeros(nc_col)
cy = np.zeros(nc_row)
X = np.loadtxt("mesh.txt")[:, 0]
Y = np.loadtxt("mesh.txt")[:, 1]
## CENTROIDS
for j in range(nc_col):
cx[j] = X[j]
for i in range(nc_row):
cy[i] = Y[i*nc_col]
# interpolate on this grid
xi,yi = np.meshgrid(cx,cy)
# The Dual Pulse Laser :: SECOND PULSE
w0 = 200.0e-6
lam = 1064.0e-9
f = 300.0e-3
Rl_rn = 0.1*PI*(w0*w0) / lam ## Scale Down the length in this direction
xmid = cx[int(nc_col/2)]
ymid = cy[int(nc_row/2)]
#normalized one
x_nor_i = (xi-xmid) / Rl_rn
y_nor_i = (yi - ymid) / w0
## INTERPOLATION VALUES EXTRACT
# Import The Files
## ---------------------------
pre1 = 'I_Pulse2_'
pre2 = 'Rho_e_'
ext = '.txt'
p3d = '.p3d'
## LOOP for diffrent files
for i in range(8):
index = i*5
str_i = str(index)
fname1 = pre1+str_i
fname2 = pre2+str_i
fout = 'out_'+str_i + ext
# fout2 = 'out_'+fname2 + ext
Z_r = np.loadtxt(fname1+ext)[0,1:]
R_r = np.loadtxt(fname1+ext)[1:,0]
In = np.loadtxt(fname1+ext)[1:,1:] #Value read, already in matrix form
Ne = np.loadtxt(fname2+ext)[1:,1:]
In_stack = np.vstack((np.flipud(In),In))
Ne_stack = np.vstack((np.flipud(Ne),Ne))
In_stack = 1.0e2 * In_stack ## Intensity value scaled up
Ne_stack = 1.0e2 * Ne_stack ## Ne value scaled up
R_r_stack = np.hstack((np.flipud(-R_r),R_r))
X_r, Y_r = np.meshgrid(Z_r, R_r_stack)
V_r1 = In_stack
V_r2 = Ne_stack
points = np.vstack((X_r.ravel(), Y_r.ravel()))
points = points.T
val1 = V_r1.ravel()
val2 = V_r2.ravel()
points_i = np.vstack((x_nor_i.ravel(), y_nor_i.ravel()))
points_i = points_i.T
# # interpolate
V_i1 = griddata(points,val1,points_i,method='linear',fill_value=np.min(In_stack))
V_i2 = griddata(points,val2,points_i,method='linear',fill_value=np.min(Ne_stack))
# Save the Values in a file to be used by Hydro2D
## First Intensity :: Then Number Density
result = np.vstack((V_i1, V_i2))
np.savetxt(fout, result.T)
# FOR PLOTS __________________________________________________________________________________________
# twd_V1 = np.reshape(V_i1,(nc_row,nc_col))
# twd_V2 = np.reshape(V_i2,(nc_row,nc_col))
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(xi,yi,twd_V1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function -- Not-Normalized")
# # ax.set_ylim(0.03,0.04)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(xi,yi,twd_V2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function -- Not-Normalized")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# # Plot the 3D figure of the fitted function and the residuals.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(x_nor_i,y_nor_i,twd_V1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function -- Normalized")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# # Plot the 3D figure of the fitted function and the residuals.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(x_nor_i,y_nor_i,twd_V2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function -- Normalized")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# # Plot the 3D figure of the fitted function and the residuals.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(X_r,Y_r,V_r1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("Data: In")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# # Plot the 3D figure of the fitted function and the residuals.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(X_r,Y_r,V_r2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("Data : Ne")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# ## Check by reading the data file that you just wrote
# # mesh is in xi , yi :: [][]
# In_int = np.loadtxt("out_0.txt")[:, 0]
# Ne_int = np.loadtxt("out_0.txt")[:, 1]
# twd_V1 = np.reshape(In_int,(nc_row,nc_col))
# twd_V2 = np.reshape(Ne_int,(nc_row,nc_col))
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(xi,yi,twd_V1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Interpolation In -- Not-Normalized")
# # ax.set_ylim(0.03,0.04)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(xi,yi,twd_V2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Interpolation Ne -- Not-Normalized")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(X_r,Y_r,V_r1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("Data")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(X_r,Y_r,V_r2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("Data")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# plt.plot(points_i[:,0],points_i[:,1],V_i1)
# twd_V1 = np.reshape(V_i1,(nc_row,nc_col))
# twd_V2 = np.reshape(V_i2,(nc_row,nc_col))
# # Plot the 3D figure of the fitted function and the residuals.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(x_nor_i,y_nor_i,twd_V1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function -- Normalized")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(xi,yi,twd_V1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function -- Not-Normalized")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(X_r,Y_r,V_r1, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("Data")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# # Plot the 3D figure of the fitted function and the residuals.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(x_nor_i,y_nor_i,twd_V2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("From Function")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(X_r,Y_r,V_r2, cmap='plasma')
# # cset = ax.contourf(X, Y, Z-fit, zdir='z', offset=-4.0e12, cmap='plasma')
# ax.set_title("Data")
# # ax.set_zlim(0,np.max(Z)+2)
# plt.show()
# ##PLOTS ///////////////////////////////////// END ------------------------------------------------
| [
"numpy.meshgrid",
"numpy.savetxt",
"numpy.zeros",
"numpy.flipud",
"numpy.min",
"numpy.loadtxt",
"numpy.vstack"
] | [((390, 406), 'numpy.zeros', 'np.zeros', (['nc_col'], {}), '(nc_col)\n', (398, 406), True, 'import numpy as np\n'), ((412, 428), 'numpy.zeros', 'np.zeros', (['nc_row'], {}), '(nc_row)\n', (420, 428), True, 'import numpy as np\n'), ((641, 660), 'numpy.meshgrid', 'np.meshgrid', (['cx', 'cy'], {}), '(cx, cy)\n', (652, 660), True, 'import numpy as np\n'), ((292, 310), 'numpy.loadtxt', 'np.loadtxt', (['fparms'], {}), '(fparms)\n', (302, 310), True, 'import numpy as np\n'), ((434, 456), 'numpy.loadtxt', 'np.loadtxt', (['"""mesh.txt"""'], {}), "('mesh.txt')\n", (444, 456), True, 'import numpy as np\n'), ((467, 489), 'numpy.loadtxt', 'np.loadtxt', (['"""mesh.txt"""'], {}), "('mesh.txt')\n", (477, 489), True, 'import numpy as np\n'), ((1795, 1822), 'numpy.meshgrid', 'np.meshgrid', (['Z_r', 'R_r_stack'], {}), '(Z_r, R_r_stack)\n', (1806, 1822), True, 'import numpy as np\n'), ((2393, 2416), 'numpy.vstack', 'np.vstack', (['(V_i1, V_i2)'], {}), '((V_i1, V_i2))\n', (2402, 2416), True, 'import numpy as np\n'), ((2421, 2447), 'numpy.savetxt', 'np.savetxt', (['fout', 'result.T'], {}), '(fout, result.T)\n', (2431, 2447), True, 'import numpy as np\n'), ((1300, 1324), 'numpy.loadtxt', 'np.loadtxt', (['(fname1 + ext)'], {}), '(fname1 + ext)\n', (1310, 1324), True, 'import numpy as np\n'), ((1339, 1363), 'numpy.loadtxt', 'np.loadtxt', (['(fname1 + ext)'], {}), '(fname1 + ext)\n', (1349, 1363), True, 'import numpy as np\n'), ((1377, 1401), 'numpy.loadtxt', 'np.loadtxt', (['(fname1 + ext)'], {}), '(fname1 + ext)\n', (1387, 1401), True, 'import numpy as np\n'), ((1452, 1476), 'numpy.loadtxt', 'np.loadtxt', (['(fname2 + ext)'], {}), '(fname2 + ext)\n', (1462, 1476), True, 'import numpy as np\n'), ((1509, 1522), 'numpy.flipud', 'np.flipud', (['In'], {}), '(In)\n', (1518, 1522), True, 'import numpy as np\n'), ((1554, 1567), 'numpy.flipud', 'np.flipud', (['Ne'], {}), '(Ne)\n', (1563, 1567), True, 'import numpy as np\n'), ((1757, 1772), 'numpy.flipud', 'np.flipud', (['(-R_r)'], {}), '(-R_r)\n', (1766, 1772), True, 'import numpy as np\n'), ((2170, 2186), 'numpy.min', 'np.min', (['In_stack'], {}), '(In_stack)\n', (2176, 2186), True, 'import numpy as np\n'), ((2256, 2272), 'numpy.min', 'np.min', (['Ne_stack'], {}), '(Ne_stack)\n', (2262, 2272), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Digital Elevation Model Data I/O
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Provide surface/terrain elevation information from SRTM data
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = ["download_srtm", "get_srtm"]
__doc__ = __doc__.format("\n ".join(__all__))
import os
import numpy as np
import requests
from osgeo import gdal
from wradlib import util
class HeaderRedirection(requests.Session):
AUTH_HOST = "urs.earthdata.nasa.gov"
def __init__(self, username, password):
super().__init__()
self.auth = (username, password)
def rebuild_auth(self, request, response):
headers = request.headers
url = request.url
if "Authorization" in headers:
original = requests.utils.urlparse(response.request.url).hostname
redirect = requests.utils.urlparse(url).hostname
if (
original != redirect
and redirect != self.AUTH_HOST
and original != self.AUTH_HOST
):
del headers["Authorization"]
return
def download_srtm(filename, destination, resolution=3):
"""
Download NASA SRTM elevation data
Only available with login/password
Parameters
----------
filename : str
srtm file to download
destination : str
output filename
resolution : int
resolution of SRTM data (1, 3 or 30)
"""
website = "https://e4ftl01.cr.usgs.gov/MEASURES"
subres = 3
if resolution == 30:
subres = 2
resolution = f"SRTMGL{resolution}.00{subres}"
source = "/".join([website, resolution, "2000.02.11"])
url = "/".join([source, filename])
user = os.environ.get("WRADLIB_EARTHDATA_USER", None)
pwd = os.environ.get("WRADLIB_EARTHDATA_PASS", None)
if user is None or pwd is None:
raise ValueError(
"WRADLIB_EARTHDATA_USER and/or WRADLIB_EARTHDATA_PASS environment "
"variable missing. Downloading SRTM data requires a NASA Earthdata "
"Login username and password. To obtain a NASA Earthdata Login account, "
"please visit https://urs.earthdata.nasa.gov/users/new/."
)
session = HeaderRedirection(user, pwd)
try:
r = session.get(url, stream=True)
r.raise_for_status()
if destination is None:
destination = filename
with open(destination, "wb") as fd:
for chunk in r.iter_content(chunk_size=1024 * 1014):
fd.write(chunk)
except requests.exceptions.HTTPError as err:
status_code = err.response.status_code
if status_code != 404:
raise err
def get_srtm(extent, resolution=3, merge=True):
"""
Get NASA SRTM elevation data
Parameters
----------
extent : list
list containing lonmin, lonmax, latmin, latmax
resolution : int
resolution of SRTM data (1, 3 or 30)
merge : bool
True to merge the tiles in one dataset
Returns
-------
dataset : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.Dataset Raster dataset containing elevation information
"""
extent = [int(np.floor(x)) for x in extent]
lonmin, lonmax, latmin, latmax = extent
filelist = []
for latitude in range(latmin, min(latmax, 0)):
for longitude in range(lonmin, min(lonmax, 0)):
georef = "S%02gW%03g" % (-latitude, -longitude)
filelist.append(georef)
for longitude in range(max(lonmin, 0), lonmax + 1):
georef = "S%02gE%03g" % (-latitude, longitude)
filelist.append(georef)
for latitude in range(max(0, latmin), latmax + 1):
for longitude in range(lonmin, min(lonmax, 0)):
georef = "N%02gW%03g" % (latitude, -longitude)
filelist.append(georef)
for longitude in range(max(lonmin, 0), lonmax + 1):
georef = "N%02gE%03g" % (latitude, longitude)
filelist.append(georef)
filelist = [f"{f}.SRTMGL{resolution}.hgt.zip" for f in filelist]
wrl_data_path = util.get_wradlib_data_path()
srtm_path = os.path.join(wrl_data_path, "geo")
if not os.path.exists(srtm_path):
os.makedirs(srtm_path)
demlist = []
for filename in filelist:
path = os.path.join(srtm_path, filename)
if not os.path.exists(path):
download_srtm(filename, path, resolution)
demlist.append(path)
demlist = [gdal.Open(d) for d in demlist]
if not merge:
return demlist
dem = gdal.Warp("", demlist, format="MEM")
return dem
| [
"requests.utils.urlparse",
"os.makedirs",
"osgeo.gdal.Warp",
"numpy.floor",
"os.path.exists",
"os.environ.get",
"wradlib.util.get_wradlib_data_path",
"osgeo.gdal.Open",
"os.path.join"
] | [((1849, 1895), 'os.environ.get', 'os.environ.get', (['"""WRADLIB_EARTHDATA_USER"""', 'None'], {}), "('WRADLIB_EARTHDATA_USER', None)\n", (1863, 1895), False, 'import os\n'), ((1906, 1952), 'os.environ.get', 'os.environ.get', (['"""WRADLIB_EARTHDATA_PASS"""', 'None'], {}), "('WRADLIB_EARTHDATA_PASS', None)\n", (1920, 1952), False, 'import os\n'), ((4222, 4250), 'wradlib.util.get_wradlib_data_path', 'util.get_wradlib_data_path', ([], {}), '()\n', (4248, 4250), False, 'from wradlib import util\n'), ((4267, 4301), 'os.path.join', 'os.path.join', (['wrl_data_path', '"""geo"""'], {}), "(wrl_data_path, 'geo')\n", (4279, 4301), False, 'import os\n'), ((4685, 4721), 'osgeo.gdal.Warp', 'gdal.Warp', (['""""""', 'demlist'], {'format': '"""MEM"""'}), "('', demlist, format='MEM')\n", (4694, 4721), False, 'from osgeo import gdal\n'), ((4313, 4338), 'os.path.exists', 'os.path.exists', (['srtm_path'], {}), '(srtm_path)\n', (4327, 4338), False, 'import os\n'), ((4348, 4370), 'os.makedirs', 'os.makedirs', (['srtm_path'], {}), '(srtm_path)\n', (4359, 4370), False, 'import os\n'), ((4433, 4466), 'os.path.join', 'os.path.join', (['srtm_path', 'filename'], {}), '(srtm_path, filename)\n', (4445, 4466), False, 'import os\n'), ((4603, 4615), 'osgeo.gdal.Open', 'gdal.Open', (['d'], {}), '(d)\n', (4612, 4615), False, 'from osgeo import gdal\n'), ((3321, 3332), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (3329, 3332), True, 'import numpy as np\n'), ((4482, 4502), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4496, 4502), False, 'import os\n'), ((895, 940), 'requests.utils.urlparse', 'requests.utils.urlparse', (['response.request.url'], {}), '(response.request.url)\n', (918, 940), False, 'import requests\n'), ((973, 1001), 'requests.utils.urlparse', 'requests.utils.urlparse', (['url'], {}), '(url)\n', (996, 1001), False, 'import requests\n')] |
import numpy as np
#
# Your previous Plain Text content is preserved below:
#
#
# N x M Matrix
# Point located at x;y coordinate
#
# After K moves, What is the probability the point is dead
#
# x=1
# y=0
#
initial_matrix = np.array([[1, 2, 3],
[4, 5, 6]])
#
# K=1
# Initial Matrix
#
# M = [[0.50, 0.75, 0.50]
# [0.75, 1.00, 0.75]
# [0.50, 0.75, 0.50]]
def probability_point_is_dead(point):
x, y = point
def compute_probability_matrix(initial_matrix):
# TODO: Check for IndexError when initial_matrix is empty
N, M = initial_matrix.shape
result = np.ones(initial_matrix.shape)
result[0, 0] = 0.5
return result
print(compute_probability_matrix(initial_matrix))
def compute_transition_matrix(probability_matrix):
pass
# K=2
# 0.25 0.375
# [[0.50*.25*(.5+.75+.75) , 0.75*.25(.5+1+.5), 0.50]
# [0.75, 1.00, 0.75]
# [0.50, 0.75, 0.50]]
#
# K=3
# [[0.50*.25*(.5+.75+.75)*.25*(.5+.75+.75) , 0.75*.25(.5+1+.5), 0.50]
# [0.75, 1.00, 0.75]
# [0.50, 0.75, 0.50]]
#
# K=4
# [[0.50*.25*(.5+.75+.75)*.25*(.5+.75+.75) *(.5+.75+.75) , 0.75*.25(.5+1+.5), 0.50]
# [0.75, 1.00, 0.75]
# [0.50, 0.75, 0.50]]
#
# Transition Matrix
# T = [[]]
#
# p = probability of moving to that cell = 1/4
#
# Final Matrix = M .* (p .* T)^k
#
#
# probability of dying in the next step:
#
#
#
#
#
# for each cell:
# take the initial probability
# multiply by 1/4
# multiply by the sum of all the neighboring probabilities
#
#
| [
"numpy.array",
"numpy.ones"
] | [((232, 264), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (240, 264), True, 'import numpy as np\n'), ((596, 625), 'numpy.ones', 'np.ones', (['initial_matrix.shape'], {}), '(initial_matrix.shape)\n', (603, 625), True, 'import numpy as np\n')] |
import pprint
# import json
import time
from gurobipy import *
from tqdm import tqdm
import numpy as np
"""
This code is for the most part written by <NAME> and is taken and
adapted from the repository https://github.com/chenhongge/RobustTrees.
It is an implementation of the MILP attack from:
Kantchelian, <NAME>. <NAME>, and <NAME>. "Evasion and hardening of
tree ensemble classifiers." International Conference on Machine Learning.
PMLR, 2016.
Feasibility idea from:
Andriushchenko, Maksym, and <NAME>. "Provably robust boosted decision
stumps and trees against adversarial attacks." arXiv preprint arXiv:1906.03526
(2019).
The changes made were related to:
- Default guard_val, round_digits values
- Removing dependency on xgboost
- Taking only a JSON file as input
- Solving a feasibility encoding for fixed epsilon
- Removing print statements
- Removing excessive model updates
- Only keeping binary classification attacks
"""
GUARD_VAL = 5e-6
ROUND_DIGITS = 6
FEASIBILITY_TOLERANCE = 1e-4
INT_FEASIBILITY_TOLERANCE = FEASIBILITY_TOLERANCE
class AttackWrapper:
def attack_feasibility(self, X, y, order=np.inf, epsilon=0.0, options=None):
X_distances = self.attack_distance(X, y, order, options)
return X_distances < epsilon
def attack_distance(self, X, y, order=np.inf, options=None):
X_adv = self.adversarial_examples(X, y, order, options)
return np.linalg.norm(X - X_adv, ord=order, axis=1)
def adversarial_examples(self, X, y, order=np.inf, options=None):
raise NotImplementedError
class node_wrapper(object):
def __init__(
self,
treeid,
nodeid,
attribute,
threshold,
left_leaves,
right_leaves,
root=False,
):
# left_leaves and right_leaves are the lists of leaf indices in self.leaf_v_list
self.attribute = attribute
self.threshold = threshold
self.node_pos = []
self.leaves_lists = []
self.add_leaves(treeid, nodeid, left_leaves, right_leaves, root)
def print(self):
print(
"node_pos{}, attr:{}, th:{}, leaves:{}".format(
self.node_pos, self.attribute, self.threshold, self.leaves_lists
)
)
def add_leaves(self, treeid, nodeid, left_leaves, right_leaves, root=False):
self.node_pos.append({"treeid": treeid, "nodeid": nodeid})
if root:
self.leaves_lists.append((left_leaves, right_leaves, "root"))
else:
self.leaves_lists.append((left_leaves, right_leaves))
def add_grb_var(self, node_grb_var, leaf_grb_var_list):
self.p_grb_var = node_grb_var
self.l_grb_var_list = []
for item in self.leaves_lists:
left_leaf_grb_var = [leaf_grb_var_list[i] for i in item[0]]
right_leaf_grb_var = [leaf_grb_var_list[i] for i in item[1]]
if len(item) == 3:
self.l_grb_var_list.append(
(left_leaf_grb_var, right_leaf_grb_var, "root")
)
else:
self.l_grb_var_list.append((left_leaf_grb_var, right_leaf_grb_var))
class KantchelianAttack(object):
def __init__(
self,
json_model,
epsilon=None,
order=np.inf,
guard_val=GUARD_VAL,
round_digits=ROUND_DIGITS,
pos_json_input=None,
neg_json_input=None,
pred_threshold=0.0,
verbose=False,
n_threads=1,
):
assert epsilon is None or order == np.inf, "feasibility epsilon can only be used with order inf"
self.pred_threshold = pred_threshold
self.epsilon = epsilon
self.binary = (pos_json_input == None) or (neg_json_input == None)
self.pos_json_input = pos_json_input
self.neg_json_input = neg_json_input
self.guard_val = guard_val
self.round_digits = round_digits
self.json_model = json_model
self.order = order
self.verbose = verbose
self.n_threads = n_threads
# two nodes with identical decision are merged in this list, their left and right leaves and in the list, third element of the tuple
self.node_list = []
self.leaf_v_list = [] # list of all leaf values
self.leaf_pos_list = [] # list of leaves' position in xgboost model
self.leaf_count = [0] # total number of leaves in the first i trees
node_check = (
{}
) # track identical decision nodes. {(attr, th):<index in node_list>}
def dfs(tree, treeid, root=False, neg=False):
if "leaf" in tree.keys():
if neg:
self.leaf_v_list.append(-tree["leaf"])
else:
self.leaf_v_list.append(tree["leaf"])
self.leaf_pos_list.append({"treeid": treeid, "nodeid": tree["nodeid"]})
return [len(self.leaf_v_list) - 1]
else:
attribute, threshold, nodeid = (
tree["split"],
tree["split_condition"],
tree["nodeid"],
)
if type(attribute) == str:
attribute = int(attribute[1:])
threshold = round(threshold, self.round_digits)
# XGBoost can only offer precision up to 8 digits, however, minimum difference between two splits can be smaller than 1e-8
# here rounding may be an option, but its hard to choose guard value after rounding
# for example, if round to 1e-6, then guard value should be 5e-7, or otherwise may cause mistake
# xgboost prediction has a precision of 1e-8, so when min_diff<1e-8, there is a precision problem
# if we do not round, xgboost.predict may give wrong results due to precision, but manual predict on json file should always work
left_subtree = None
right_subtree = None
for subtree in tree["children"]:
if subtree["nodeid"] == tree["yes"]:
left_subtree = subtree
if subtree["nodeid"] == tree["no"]:
right_subtree = subtree
if left_subtree == None or right_subtree == None:
pprint.pprint(tree)
raise ValueError("should be a tree but one child is missing")
left_leaves = dfs(left_subtree, treeid, False, neg)
right_leaves = dfs(right_subtree, treeid, False, neg)
if (attribute, threshold) not in node_check:
self.node_list.append(
node_wrapper(
treeid,
nodeid,
attribute,
threshold,
left_leaves,
right_leaves,
root,
)
)
node_check[(attribute, threshold)] = len(self.node_list) - 1
else:
node_index = node_check[(attribute, threshold)]
self.node_list[node_index].add_leaves(
treeid, nodeid, left_leaves, right_leaves, root
)
return left_leaves + right_leaves
if self.binary:
for i, tree in enumerate(self.json_model):
dfs(tree, i, root=True)
self.leaf_count.append(len(self.leaf_v_list))
if len(self.json_model) + 1 != len(self.leaf_count):
print("self.leaf_count:", self.leaf_count)
raise ValueError("leaf count error")
else:
for i, tree in enumerate(self.pos_json_input):
dfs(tree, i, root=True)
self.leaf_count.append(len(self.leaf_v_list))
for i, tree in enumerate(self.neg_json_input):
dfs(tree, i + len(self.pos_json_input), root=True, neg=True)
self.leaf_count.append(len(self.leaf_v_list))
if len(self.pos_json_input) + len(self.neg_json_input) + 1 != len(
self.leaf_count
):
print("self.leaf_count:", self.leaf_count)
raise ValueError("leaf count error")
self.m = Model("attack")
if not self.verbose:
self.m.setParam(
"OutputFlag", 0
) # suppress Gurobi output, gives a small speed-up and prevents huge logs
self.m.setParam("Threads", self.n_threads)
# Most datasets require a very low tolerance
self.m.setParam("IntFeasTol", 1e-9)
self.m.setParam("FeasibilityTol", 1e-9)
self.P = self.m.addVars(len(self.node_list), vtype=GRB.BINARY, name="p")
self.L = self.m.addVars(len(self.leaf_v_list), lb=0, ub=1, name="l")
if epsilon:
self.B = self.m.addVar(
name="b", lb=0.0, ub=self.epsilon - 0.0001
)
elif self.order == np.inf:
self.B = self.m.addVar(name="b")
self.llist = [self.L[key] for key in range(len(self.L))]
self.plist = [self.P[key] for key in range(len(self.P))]
# p dictionary by attributes, {attr1:[(threshold1, gurobiVar1),(threshold2, gurobiVar2),...],attr2:[...]}
self.pdict = {}
for i, node in enumerate(self.node_list):
node.add_grb_var(self.plist[i], self.llist)
if node.attribute not in self.pdict:
self.pdict[node.attribute] = [(node.threshold, self.plist[i])]
else:
self.pdict[node.attribute].append((node.threshold, self.plist[i]))
# sort each feature list
# add p constraints
for key in self.pdict.keys():
min_diff = 1000
if len(self.pdict[key]) > 1:
self.pdict[key].sort(key=lambda tup: tup[0])
for i in range(len(self.pdict[key]) - 1):
self.m.addConstr(
self.pdict[key][i][1] <= self.pdict[key][i + 1][1],
name="p_consis_attr{}_{}th".format(key, i),
)
min_diff = min(
min_diff, self.pdict[key][i + 1][0] - self.pdict[key][i][0]
)
if min_diff < 2 * self.guard_val:
self.guard_val = min_diff / 3
print(
"guard value too large, change to min_diff/3:", self.guard_val
)
# all leaves sum up to 1
for i in range(len(self.leaf_count) - 1):
leaf_vars = [
self.llist[j] for j in range(self.leaf_count[i], self.leaf_count[i + 1])
]
self.m.addConstr(
LinExpr([1] * (self.leaf_count[i + 1] - self.leaf_count[i]), leaf_vars)
== 1,
name="leaf_sum_one_for_tree{}".format(i),
)
# node leaves constraints
for j in range(len(self.node_list)):
p = self.plist[j]
for k in range(len(self.node_list[j].leaves_lists)):
left_l = [self.llist[i] for i in self.node_list[j].leaves_lists[k][0]]
right_l = [self.llist[i] for i in self.node_list[j].leaves_lists[k][1]]
if len(self.node_list[j].leaves_lists[k]) == 3:
self.m.addConstr(
LinExpr([1] * len(left_l), left_l) - p == 0,
name="p{}_root_left_{}".format(j, k),
)
self.m.addConstr(
LinExpr([1] * len(right_l), right_l) + p == 1,
name="p_{}_root_right_{}".format(j, k),
)
else:
self.m.addConstr(
LinExpr([1] * len(left_l), left_l) - p <= 0,
name="p{}_left_{}".format(j, k),
)
self.m.addConstr(
LinExpr([1] * len(right_l), right_l) + p <= 1,
name="p{}_right_{}".format(j, k),
)
self.m.update()
def attack_feasible(self, sample, label):
if self.binary:
pred = 1 if self.check(sample, self.json_model) >= self.pred_threshold else 0
else:
pred = 1 if self.check(sample, self.pos_json_input) >= self.check(sample, self.neg_json_input) else 0
x = np.copy(sample)
if pred != label:
# Wrong prediction, no attack needed
return True
# model mislabel
try:
c = self.m.getConstrByName("mislabel")
self.m.remove(c)
except Exception:
pass
if (not self.binary) or label == 1:
self.m.addConstr(
LinExpr(self.leaf_v_list, self.llist) <= self.pred_threshold - self.guard_val,
name="mislabel",
)
else:
self.m.addConstr(
LinExpr(self.leaf_v_list, self.llist)
>= self.pred_threshold + self.guard_val,
name="mislabel",
)
# Generate constraints for self.B, the l-infinity distance.
for key in self.pdict.keys():
if len(self.pdict[key]) == 0:
raise ValueError("self.pdict list empty")
axis = [-np.inf] + [item[0] for item in self.pdict[key]] + [np.inf]
w = [0] * (len(self.pdict[key]) + 1)
for i in range(len(axis) - 1, 0, -1):
if x[key] < axis[i] and x[key] >= axis[i - 1]:
w[i - 1] = 0
elif x[key] < axis[i] and x[key] < axis[i - 1]:
w[i - 1] = np.abs(x[key] - axis[i - 1])
elif x[key] >= axis[i] and x[key] >= axis[i - 1]:
w[i - 1] = np.abs(x[key] - axis[i] + self.guard_val)
else:
print("x[key]:", x[key])
print("axis:", axis)
print("axis[i]:{}, axis[i-1]:{}".format(axis[i], axis[i - 1]))
raise ValueError("wrong axis ordering")
for i in range(len(w) - 1):
w[i] -= w[i + 1]
else:
try:
c = self.m.getConstrByName("linf_constr_attr{}".format(key))
self.m.remove(c)
except Exception:
pass
self.m.addConstr(
LinExpr(w[:-1], [item[1] for item in self.pdict[key]]) + w[-1]
<= self.B,
name="linf_constr_attr{}".format(key),
)
self.m.setObjective(0, GRB.MINIMIZE)
self.m.update()
self.m.optimize()
return not self.m.status == 3 # 3 -> infeasible -> no adv example -> False
def optimal_adversarial_example(self, sample, label):
if self.binary:
pred = 1 if self.check(sample, self.json_model) >= self.pred_threshold else 0
else:
pred = 1 if self.check(sample, self.pos_json_input) >= self.check(sample, self.neg_json_input) else 0
x = np.copy(sample)
if pred != label:
# Wrong prediction, no attack needed
return x
# model mislabel
# this is for binary
try:
c = self.m.getConstrByName("mislabel")
self.m.remove(c)
except Exception:
pass
if (not self.binary) or label == 1:
self.m.addConstr(
LinExpr(self.leaf_v_list, self.llist) <= self.pred_threshold - self.guard_val,
name="mislabel",
)
else:
self.m.addConstr(
LinExpr(self.leaf_v_list, self.llist)
>= self.pred_threshold + self.guard_val,
name="mislabel",
)
if self.order == np.inf:
rho = 1
else:
rho = self.order
if self.order != np.inf:
self.obj_coeff_list = []
self.obj_var_list = []
self.obj_c = 0
# model objective
for key in self.pdict.keys():
if len(self.pdict[key]) == 0:
raise ValueError("self.pdict list empty")
axis = [-np.inf] + [item[0] for item in self.pdict[key]] + [np.inf]
w = [0] * (len(self.pdict[key]) + 1)
for i in range(len(axis) - 1, 0, -1):
if x[key] < axis[i] and x[key] >= axis[i - 1]:
w[i - 1] = 0
elif x[key] < axis[i] and x[key] < axis[i - 1]:
w[i - 1] = np.abs(x[key] - axis[i - 1]) ** rho
elif x[key] >= axis[i] and x[key] >= axis[i - 1]:
w[i - 1] = np.abs(x[key] - axis[i] + self.guard_val) ** rho
else:
print("x[key]:", x[key])
print("axis:", axis)
print("axis[i]:{}, axis[i-1]:{}".format(axis[i], axis[i - 1]))
raise ValueError("wrong axis ordering")
for i in range(len(w) - 1):
w[i] -= w[i + 1]
if self.order != np.inf:
self.obj_c += w[-1]
self.obj_coeff_list += w[:-1]
self.obj_var_list += [item[1] for item in self.pdict[key]]
else:
try:
c = self.m.getConstrByName("linf_constr_attr{}".format(key))
self.m.remove(c)
except Exception:
pass
self.m.addConstr(
LinExpr(w[:-1], [item[1] for item in self.pdict[key]]) + w[-1]
<= self.B,
name="linf_constr_attr{}".format(key),
)
if self.order != np.inf:
self.m.setObjective(
LinExpr(self.obj_coeff_list, self.obj_var_list) + self.obj_c,
GRB.MINIMIZE,
)
else:
self.m.setObjective(self.B, GRB.MINIMIZE)
self.m.update()
self.m.optimize()
# If infeasible
if self.m.status == 3:
return None
# Assert that the adversarial example causes a misclassification
for key in self.pdict.keys():
for node in self.pdict[key]:
if node[1].x > 0.5 and x[key] >= node[0]:
x[key] = node[0] - self.guard_val
if node[1].x <= 0.5 and x[key] < node[0]:
x[key] = node[0] + self.guard_val
if self.binary:
pred = 1 if self.check(x, self.json_model) >= self.pred_threshold else 0
else:
pos_value = self.check(x, self.pos_json_input)
neg_value = self.check(x, self.neg_json_input)
pred = 1 if pos_value >= neg_value else 0
if pred == label and self.verbose:
print("!" * 50)
print("MILP result did not cause a misclassification!")
print("!" * 50)
return x
def check(self, x, json_file):
# Due to XGBoost precision issues, some attacks may not succeed if tested using model.predict.
# We manually run the tree on the json file here to make sure those attacks are actually successful.
leaf_values = []
for item in json_file:
tree = item.copy()
while "leaf" not in tree.keys():
attribute, threshold, nodeid = (
tree["split"],
tree["split_condition"],
tree["nodeid"],
)
if type(attribute) == str:
attribute = int(attribute[1:])
if x[attribute] < threshold:
if tree["children"][0]["nodeid"] == tree["yes"]:
tree = tree["children"][0].copy()
elif tree["children"][1]["nodeid"] == tree["yes"]:
tree = tree["children"][1].copy()
else:
pprint.pprint(tree)
print("x[attribute]:", x[attribute])
raise ValueError("child not found")
else:
if tree["children"][0]["nodeid"] == tree["no"]:
tree = tree["children"][0].copy()
elif tree["children"][1]["nodeid"] == tree["no"]:
tree = tree["children"][1].copy()
else:
pprint.pprint(tree)
print("x[attribute]:", x[attribute])
raise ValueError("child not found")
leaf_values.append(tree["leaf"])
manual_res = np.sum(leaf_values)
return manual_res
class KantchelianAttackMultiClass(object):
def __init__(
self,
json_model,
n_classes,
order=np.inf,
epsilon=None,
guard_val=GUARD_VAL,
round_digits=ROUND_DIGITS,
pred_threshold=0.0,
low_memory=False,
verbose=False,
n_threads=1
):
if n_classes <= 2:
raise ValueError('multiclass attack must be used when number of class > 2')
assert epsilon is None or order == np.inf, "feasibility epsilon can only be used with order inf"
self.n_classes = n_classes
self.order = order
self.epsilon = epsilon
self.guard_val = guard_val
self.round_digits = round_digits
self.pred_threshold = pred_threshold
self.low_memory = low_memory
self.verbose = verbose
self.n_threads = n_threads
# Create all attacker models, this takes quadratic space in terms
# of n_classes, but speeds up attacks for many samples.
self.one_vs_all_models = [[] for _ in range(self.n_classes)]
for i, json_tree in enumerate(json_model):
self.one_vs_all_models[i % n_classes].append(json_tree)
if not low_memory:
self.__create_cached_attackers()
def __create_cached_attackers(self):
self.binary_attackers = []
for class_label in range(self.n_classes):
attackers = []
for other_label in range(self.n_classes):
if class_label == other_label:
attackers.append(None)
attacker = KantchelianAttack(
None,
epsilon=self.epsilon,
order=self.order,
guard_val=self.guard_val,
round_digits=self.round_digits,
pred_threshold=self.pred_threshold,
verbose=self.verbose,
n_threads=self.n_threads,
pos_json_input=self.one_vs_all_models[class_label],
neg_json_input=self.one_vs_all_models[other_label],
)
attackers.append(attacker)
self.binary_attackers.append(attackers)
return self.binary_attackers
def optimal_adversarial_example(self, sample, label):
best_distance = float("inf")
best_adv_example = None
for other_label in range(self.n_classes):
if other_label == label:
continue
# Create new attacker or use a cached attacker
if self.low_memory:
attacker = KantchelianAttack(
None,
epsilon=self.epsilon,
order=self.order,
guard_val=self.guard_val,
round_digits=self.round_digits,
pred_threshold=self.pred_threshold,
verbose=self.verbose,
n_threads=self.n_threads,
pos_json_input=self.one_vs_all_models[label],
neg_json_input=self.one_vs_all_models[other_label],
)
else:
attacker = self.binary_attackers[label][other_label]
# Generate adversarial example on this binary attacker
adv_example = attacker.optimal_adversarial_example(sample, 1)
# If this binary attacker example was better than the previous ones, keep it
if adv_example is not None:
distance = np.linalg.norm(sample - adv_example, ord=self.order)
if distance < best_distance:
best_adv_example = adv_example
best_distance = distance
if best_adv_example is None:
raise Exception("No adversarial example found, does your model predict a constant value?")
return best_adv_example
def attack_feasible(self, sample, label):
for other_label in range(self.n_classes):
if other_label == label:
continue
# Create new attacker or use a cached attacker
if self.low_memory:
attacker = KantchelianAttack(
None,
epsilon=self.epsilon,
order=self.order,
guard_val=self.guard_val,
round_digits=self.round_digits,
pred_threshold=self.pred_threshold,
verbose=self.verbose,
n_threads=self.n_threads,
pos_json_input=self.one_vs_all_models[label],
neg_json_input=self.one_vs_all_models[other_label],
)
else:
attacker = self.binary_attackers[label][other_label]
# Check if the binary attacker can create an adversarial example
if attacker.attack_feasible(sample, 1):
return True
return False
DEFAULT_OPTIONS = {
"epsilon": None,
"guard_val": GUARD_VAL,
"round_digits": ROUND_DIGITS,
"pred_threshold": 0.0,
"order": np.inf,
"low_memory": False,
"verbose": False,
"n_threads": 1,
}
class KantchelianAttackWrapper(AttackWrapper):
def __init__(self, json_model, n_classes):
self.json_model = json_model
self.n_classes = n_classes
def __get_attacker(self, order, options):
if self.n_classes == 2:
attack = KantchelianAttack(
self.json_model,
order=order,
epsilon=options["epsilon"],
guard_val=options["guard_val"],
round_digits=options["round_digits"],
pred_threshold=options["pred_threshold"],
verbose=options["verbose"],
n_threads=options["n_threads"],
)
else:
attack = KantchelianAttackMultiClass(
self.json_model,
self.n_classes,
order=order,
epsilon=options["epsilon"],
guard_val=options["guard_val"],
round_digits=options["round_digits"],
pred_threshold=options["pred_threshold"],
low_memory=options["low_memory"],
verbose=options["verbose"],
n_threads=options["n_threads"],
)
return attack
def attack_feasibility(self, X, y, order=np.inf, epsilon=0.0, options=None):
opts = DEFAULT_OPTIONS.copy()
if options is not None:
opts.update(options)
opts["epsilon"] = epsilon
attack = self.__get_attacker(order, opts)
attack_feasible = []
start_time = time.time()
for sample, label in tqdm(zip(X, y), total=X.shape[0]):
attack_feasible.append(attack.attack_feasible(sample, label))
total_time = time.time() - start_time
if opts["verbose"]:
print("Total time:", total_time)
print("Avg time per instance:", total_time / len(X))
return np.array(attack_feasible)
def adversarial_examples(self, X, y, order=np.inf, options=None):
opts = DEFAULT_OPTIONS.copy()
if options is not None:
opts.update(options)
attack = self.__get_attacker(order, opts)
start_time = time.time()
X_adv = []
for sample, label in tqdm(zip(X, y), total=X.shape[0]):
optimal_example = attack.optimal_adversarial_example(sample, label)
X_adv.append(optimal_example)
total_time = time.time() - start_time
if options["verbose"]:
print("Total time:", total_time)
print("Avg time per instance:", total_time / len(X))
return np.array(X_adv)
| [
"numpy.sum",
"numpy.abs",
"numpy.copy",
"time.time",
"numpy.array",
"numpy.linalg.norm",
"pprint.pprint"
] | [((1408, 1452), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - X_adv)'], {'ord': 'order', 'axis': '(1)'}), '(X - X_adv, ord=order, axis=1)\n', (1422, 1452), True, 'import numpy as np\n'), ((12637, 12652), 'numpy.copy', 'np.copy', (['sample'], {}), '(sample)\n', (12644, 12652), True, 'import numpy as np\n'), ((15338, 15353), 'numpy.copy', 'np.copy', (['sample'], {}), '(sample)\n', (15345, 15353), True, 'import numpy as np\n'), ((20894, 20913), 'numpy.sum', 'np.sum', (['leaf_values'], {}), '(leaf_values)\n', (20900, 20913), True, 'import numpy as np\n'), ((27687, 27698), 'time.time', 'time.time', ([], {}), '()\n', (27696, 27698), False, 'import time\n'), ((28040, 28065), 'numpy.array', 'np.array', (['attack_feasible'], {}), '(attack_feasible)\n', (28048, 28065), True, 'import numpy as np\n'), ((28313, 28324), 'time.time', 'time.time', ([], {}), '()\n', (28322, 28324), False, 'import time\n'), ((28736, 28751), 'numpy.array', 'np.array', (['X_adv'], {}), '(X_adv)\n', (28744, 28751), True, 'import numpy as np\n'), ((27860, 27871), 'time.time', 'time.time', ([], {}), '()\n', (27869, 27871), False, 'import time\n'), ((28553, 28564), 'time.time', 'time.time', ([], {}), '()\n', (28562, 28564), False, 'import time\n'), ((24508, 24560), 'numpy.linalg.norm', 'np.linalg.norm', (['(sample - adv_example)'], {'ord': 'self.order'}), '(sample - adv_example, ord=self.order)\n', (24522, 24560), True, 'import numpy as np\n'), ((6392, 6411), 'pprint.pprint', 'pprint.pprint', (['tree'], {}), '(tree)\n', (6405, 6411), False, 'import pprint\n'), ((13910, 13938), 'numpy.abs', 'np.abs', (['(x[key] - axis[i - 1])'], {}), '(x[key] - axis[i - 1])\n', (13916, 13938), True, 'import numpy as np\n'), ((14036, 14077), 'numpy.abs', 'np.abs', (['(x[key] - axis[i] + self.guard_val)'], {}), '(x[key] - axis[i] + self.guard_val)\n', (14042, 14077), True, 'import numpy as np\n'), ((16824, 16852), 'numpy.abs', 'np.abs', (['(x[key] - axis[i - 1])'], {}), '(x[key] - axis[i - 1])\n', (16830, 16852), True, 'import numpy as np\n'), ((20220, 20239), 'pprint.pprint', 'pprint.pprint', (['tree'], {}), '(tree)\n', (20233, 20239), False, 'import pprint\n'), ((20687, 20706), 'pprint.pprint', 'pprint.pprint', (['tree'], {}), '(tree)\n', (20700, 20706), False, 'import pprint\n'), ((16957, 16998), 'numpy.abs', 'np.abs', (['(x[key] - axis[i] + self.guard_val)'], {}), '(x[key] - axis[i] + self.guard_val)\n', (16963, 16998), True, 'import numpy as np\n')] |
# --- For cmd.py
from __future__ import division, print_function
import os
import subprocess
import multiprocessing
import collections
import glob
import pandas as pd
import numpy as np
import distutils.dir_util
import shutil
import stat
import re
# --- External library for io
try:
import weio
except:
try:
import welib.weio as weio
print('Using `weio` from `welib`')
except:
raise Exception('Fastlib needs the package `weio` to be installed from https://github.com/ebranlard/weio/`')
FAST_EXE='openfast'
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
def createStepWind(filename,WSstep=1,WSmin=3,WSmax=25,tstep=100,dt=0.5,tmin=0,tmax=999):
f = weio.FASTWndFile()
Steps= np.arange(WSmin,WSmax+WSstep,WSstep)
print(Steps)
nCol = len(f.colNames)
nRow = len(Steps)*2
M = np.zeros((nRow,nCol));
M[0,0] = tmin
M[0,1] = WSmin
for i,s in enumerate(Steps[:-1]):
M[2*i+1,0] = tmin + (i+1)*tstep-dt
M[2*i+2,0] = tmin + (i+1)*tstep
M[2*i+1,1] = Steps[i]
if i<len(Steps)-1:
M[2*i+2,1] = Steps[i+1]
else:
M[2*i+2,1] = Steps[-1]
M[-1,0]= max(tmax, (len(Steps)+1)*tstep)
M[-1,1]= WSmax
f.data=pd.DataFrame(data=M,columns=f.colNames)
#
print(f.data)
f.write(filename)
#plt.plot(M[:,0],M[:,1])
#plt.show()
#print(f.toDataFrame())
#pass
#createStepWind('test.wnd',tstep=200,WSmax=28)
# createStepWind('test.wnd',tstep=200,WSmin=5,WSmax=7,WSstep=2)
# --------------------------------------------------------------------------------}
# --- Tools for executing FAST
# --------------------------------------------------------------------------------{
# --- START cmd.py
def run_cmds(inputfiles, exe, parallel=True, ShowOutputs=True, nCores=None, ShowCommand=True):
""" Run a set of simple commands of the form `exe input_file`
By default, the commands are run in "parallel" (though the method needs to be improved)
The stdout and stderr may be displayed on screen (`ShowOutputs`) or hidden.
A better handling is yet required.
"""
Failed=[]
def _report(p):
if p.returncode==0:
print('[ OK ] Input : ',p.input_file)
else:
Failed.append(p)
print('[FAIL] Input : ',p.input_file)
print(' Directory: '+os.getcwd())
print(' Command : '+p.cmd)
print(' Use `ShowOutputs=True` to debug, or run the command above.')
#out, err = p.communicate()
#print('StdOut:\n'+out)
#print('StdErr:\n'+err)
ps=[]
iProcess=0
if nCores is None:
nCores=multiprocessing.cpu_count()
if nCores<0:
nCores=len(inputfiles)+1
for i,f in enumerate(inputfiles):
#print('Process {}/{}: {}'.format(i+1,len(inputfiles),f))
ps.append(run_cmd(f, exe, wait=(not parallel), ShowOutputs=ShowOutputs, ShowCommand=ShowCommand))
iProcess += 1
# waiting once we've filled the number of cores
# TODO: smarter method with proper queue, here processes are run by chunks
if parallel:
if iProcess==nCores:
for p in ps:
p.wait()
for p in ps:
_report(p)
ps=[]
iProcess=0
# Extra process if not multiptle of nCores (TODO, smarter method)
for p in ps:
p.wait()
for p in ps:
_report(p)
# --- Giving a summary
if len(Failed)==0:
print('[ OK ] All simulations run successfully.')
return True
else:
print('[FAIL] {}/{} simulations failed:'.format(len(Failed),len(inputfiles)))
for p in Failed:
print(' ',p.input_file)
return False
def run_cmd(input_file, exe, wait=True, ShowOutputs=False, ShowCommand=True):
""" Run a simple command of the form `exe input_file` """
# TODO Better capture STDOUT
if not os.path.isabs(input_file):
input_file_abs=os.path.abspath(input_file)
else:
input_file_abs=input_file
if not os.path.exists(exe):
raise Exception('Executable not found: {}'.format(exe))
args= [exe,input_file]
#args = 'cd '+workdir+' && '+ exe +' '+basename
shell=False
if ShowOutputs:
STDOut= None
else:
STDOut= open(os.devnull, 'w')
if ShowCommand:
print('Running: '+' '.join(args))
if wait:
p=subprocess.call(args , stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
else:
p=subprocess.Popen(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
# Storing some info into the process
p.cmd = ' '.join(args)
p.args = args
p.input_file = input_file
p.input_file_abs = input_file_abs
p.exe = exe
return p
# --- END cmd.py
def run_fastfiles(fastfiles, fastExe=None, parallel=True, ShowOutputs=True, nCores=None, ShowCommand=True, ReRun=True):
if fastExe is None:
fastExe=FAST_EXE
if not ReRun:
# Figure out which files exist
newfiles=[]
for f in fastfiles:
base=os.path.splitext(f)[0]
if os.path.exists(base+'.outb') or os.path.exists(base+'.out'):
print('>>> Skipping existing simulation for: ',f)
pass
else:
newfiles.append(f)
fastfiles=newfiles
return run_cmds(fastfiles, fastExe, parallel=parallel, ShowOutputs=ShowOutputs, nCores=nCores, ShowCommand=ShowCommand)
def run_fast(input_file, fastExe=None, wait=True, ShowOutputs=False, ShowCommand=True):
if fastExe is None:
fastExe=FAST_EXE
return run_cmd(input_file, fastExe, wait=wait, ShowOutputs=ShowOutputs, ShowCommand=ShowCommand)
def writeBatch(batchfile, fastfiles, fastExe=None):
if fastExe is None:
fastExe=FAST_EXE
with open(batchfile,'w') as f:
for l in [fastExe + ' '+ os.path.basename(f) for f in fastfiles]:
f.write("%s\n" % l)
def removeFASTOuputs(workdir):
# Cleaning folder
for f in glob.glob(os.path.join(workdir,'*.out')):
os.remove(f)
for f in glob.glob(os.path.join(workdir,'*.outb')):
os.remove(f)
for f in glob.glob(os.path.join(workdir,'*.ech')):
os.remove(f)
for f in glob.glob(os.path.join(workdir,'*.sum')):
os.remove(f)
# --------------------------------------------------------------------------------}
# --- Tools for IO
# --------------------------------------------------------------------------------{
def ED_BldStations(ED):
""" Returns ElastoDyn Blade Station positions, useful to know where the outputs are.
INPUTS:
- ED: either:
- a filename of a ElastoDyn input file
- an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)
OUTUPTS:
- bld_fract: fraction of the blade length were stations are defined
- r_nodes: spanwise position from the rotor apex of the Blade stations
"""
if not isinstance(ED,weio.FASTInFile):
ED = weio.FASTInFile(ED)
nBldNodes = ED['BldNodes']
bld_fract = np.arange(1./nBldNodes/2., 1, 1./nBldNodes)
r_nodes = bld_fract*(ED['TipRad']-ED['HubRad']) + ED['HubRad']
return bld_fract, r_nodes
def ED_TwrStations(ED):
""" Returns ElastoDyn Tower Station positions, useful to know where the outputs are.
INPUTS:
- ED: either:
- a filename of a ElastoDyn input file
- an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)
OUTPUTS:
- r_fract: fraction of the towet length were stations are defined
- h_nodes: height from the *ground* of the stations (not from the Tower base)
"""
if not isinstance(ED,weio.FASTInFile):
ED = weio.FASTInFile(ED)
nTwrNodes = ED['TwrNodes']
twr_fract = np.arange(1./nTwrNodes/2., 1, 1./nTwrNodes)
h_nodes = twr_fract*(ED['TowerHt']-ED['TowerBsHt']) + ED['TowerBsHt']
return twr_fract, h_nodes
def ED_BldGag(ED):
""" Returns the radial position of ElastoDyn blade gages
INPUTS:
- ED: either:
- a filename of a ElastoDyn input file
- an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)
OUTPUTS:
- r_gag: The radial positions of the gages, given from the rotor apex
"""
if not isinstance(ED,weio.FASTInFile):
ED = weio.FASTInFile(ED)
_,r_nodes= ED_BldStations(ED)
nOuts = ED['NBlGages']
if nOuts<=0:
return np.array([])
if type(ED['BldGagNd']) is list:
Inodes = np.asarray(ED['BldGagNd'])
else:
Inodes = np.array([ED['BldGagNd']])
r_gag = r_nodes[ Inodes[:nOuts] -1]
return r_gag
def ED_TwrGag(ED):
""" Returns the heights of ElastoDyn blade gages
INPUTS:
- ED: either:
- a filename of a ElastoDyn input file
- an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)
OUTPUTS:
- h_gag: The heights of the gages, given from the ground height (tower base + TowerBsHt)
"""
if not isinstance(ED,weio.FASTInFile):
ED = weio.FASTInFile(ED)
_,h_nodes= ED_TwrStations(ED)
nOuts = ED['NTwGages']
if nOuts<=0:
return np.array([])
if type(ED['TwrGagNd']) is list:
Inodes = np.asarray(ED['TwrGagNd'])
else:
Inodes = np.array([ED['TwrGagNd']])
h_gag = h_nodes[ Inodes[:nOuts] -1]
return h_gag
def AD14_BldGag(AD):
""" Returns the radial position of AeroDyn 14 blade gages (based on "print" in column 6)
INPUTS:
- AD: either:
- a filename of a AeroDyn input file
- an instance of FileCl, as returned by reading the file, AD = weio.read(AD_filename)
OUTPUTS:
- r_gag: The radial positions of the gages, given from the blade root
"""
if not isinstance(AD,weio.FASTInFile):
AD = weio.FASTInFile(AD)
Nodes=AD['BldAeroNodes']
if Nodes.shape[1]==6:
doPrint= np.array([ n.lower().find('p')==0 for n in Nodes[:,5]])
else:
doPrint=np.array([ True for n in Nodes[:,0]])
r_gag = Nodes[doPrint,0].astype(float)
IR = np.arange(1,len(Nodes)+1)[doPrint]
return r_gag, IR
def AD_BldGag(AD,AD_bld,chordOut=False):
""" Returns the radial position of AeroDyn blade gages
INPUTS:
- AD: either:
- a filename of a AeroDyn input file
- an instance of FileCl, as returned by reading the file, AD = weio.read(AD_filename)
- AD_bld: either:
- a filename of a AeroDyn Blade input file
- an instance of FileCl, as returned by reading the file, AD_bld = weio.read(AD_bld_filename)
OUTPUTS:
- r_gag: The radial positions of the gages, given from the blade root
"""
if not isinstance(AD,weio.FASTInFile):
AD = weio.FASTInFile(AD)
if not isinstance(AD_bld,weio.FASTInFile):
AD_bld = weio.FASTInFile(AD_bld)
#print(AD_bld.keys())
nOuts=AD['NBlOuts']
if nOuts<=0:
return np.array([])
INodes = np.array(AD['BlOutNd'][:nOuts])
r_gag = AD_bld['BldAeroNodes'][INodes-1,0]
if chordOut:
chord_gag = AD_bld['BldAeroNodes'][INodes-1,5]
return r_gag,chord_gag
else:
return r_gag
#
#
# 1, 7, 14, 21, 30, 36, 43, 52, 58 BldGagNd List of blade nodes that have strain gages [1 to BldNodes] (-) [unused if NBlGages=0]
def spanwise(tsAvg,vr_bar,R,postprofile=None):
nr=len(vr_bar)
Columns = [('r/R_[-]', vr_bar)]
Columns.append(extractSpanTS(tsAvg,nr,'Spn{:d}FLxb1_[kN]' ,'FLxb1_[kN]'))
Columns.append(extractSpanTS(tsAvg,nr,'Spn{:d}MLyb1_[kN-m]' ,'MLxb1_[kN-m]' ))
Columns.append(extractSpanTS(tsAvg,nr,'Spn{:d}MLxb1_[kN-m]' ,'MLyb1_[kN-m]' ))
Columns.append(extractSpanTS(tsAvg,nr,'Spn{:d}MLzb1_[kN-m]' ,'MLzb1_[kN-m]' ))
Columns.append(('r_[m]', vr_bar*R))
data = np.column_stack([c for _,c in Columns if c is not None])
ColNames = [n for n,_ in Columns if n is not None]
# --- Export to dataframe and csv
if len(ColNames)<=2:
print('[WARN] No elastodyn spanwise data found.')
return None
else:
dfRad = pd.DataFrame(data= data, columns = ColNames)
if postprofile is not None:
dfRad.to_csv(postprofile,sep='\t',index=False)
return dfRad
def spanwiseAD(tsAvg,vr_bar=None,rho=None,R=None,nB=None,chord=None,postprofile=None,IR=None):
# --- Extract radial data
Columns=[]
for sB in ['B1','B2','B3']:
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Alpha_\[deg\]',sB+'Alpha_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AOA_\[deg\]' ,sB+'Alpha_[deg]')) # DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AxInd_\[-\]' ,sB+'AxInd_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)TnInd_\[-\]' ,sB+'TnInd_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)AIn_\[-\]' ,sB+'AxInd_[-]' )) # DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)ApI_\[-\]' ,sB+'TnInd_[-]' )) # DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cl_\[-\]' ,sB+'Cl_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cd_\[-\]' ,sB+'Cd_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cm_\[-\]' ,sB+'Cm_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cx_\[-\]' ,sB+'Cx_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cy_\[-\]' ,sB+'Cy_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Cn_\[-\]' ,sB+'Cn_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Ct_\[-\]' ,sB+'Ct_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Re_\[-\]' ,sB+'Re_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vrel_\[m/s\]' ,sB+'Vrel_[m/s]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Theta_\[deg\]',sB+'Theta_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Phi_\[deg\]' ,sB+'Phi_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Twst_\[deg\]' ,sB+'Twst_[deg]')) #DBGOuts
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Curve_\[deg\]',sB+'Curve_[deg]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vindx_\[m/s\]',sB+'Vindx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vindy_\[m/s\]',sB+'Vindy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fx_\[N/m\]' ,sB+'Fx_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fy_\[N/m\]' ,sB+'Fy_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fl_\[N/m\]' ,sB+'Fl_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fd_\[N/m\]' ,sB+'Fd_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Fn_\[N/m\]' ,sB+'Fn_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Ft_\[N/m\]' ,sB+'Ft_[N/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VUndx_\[m/s\]',sB+'VUndx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VUndy_\[m/s\]',sB+'VUndy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VUndz_\[m/s\]',sB+'VUndz_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VDisx_\[m/s\]',sB+'VDisx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VDisy_\[m/s\]',sB+'VDisy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)VDisz_\[m/s\]',sB+'VDisz_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vx_\[m/s\]' ,sB+'Vx_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vy_\[m/s\]' ,sB+'Vy_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Vz_\[m/s\]' ,sB+'Vz_[m/s]'))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)DynP_\[Pa\]' ,sB+'DynP_[Pa]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)M_\[-\]' ,sB+'M_[-]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Mm_\[N-m/m\]' ,sB+'Mm_[N-m/m]' ))
Columns.append(extractSpanTSReg(tsAvg,'^'+sB+'N(\d*)Gam_\[' ,sB+'Gam_[m^2/s]')) #DBGOuts
# --- AD 14
Columns.append(extractSpanTSReg(tsAvg,'^Alpha(\d*)_\[deg\]' ,'Alpha_[deg]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^DynPres(\d*)_\[Pa\]' ,'DynPres_[Pa]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CLift(\d*)_\[-\]' ,'CLift_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CDrag(\d*)_\[-\]' ,'CDrag_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CNorm(\d*)_\[-\]' ,'CNorm_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CTang(\d*)_\[-\]' ,'CTang_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^CMomt(\d*)_\[-\]' ,'CMomt_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^Pitch(\d*)_\[deg\]' ,'Pitch_[deg]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^AxInd(\d*)_\[-\]' ,'AxInd_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^TanInd(\d*)_\[-\]' ,'TanInd_[-]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^ForcN(\d*)_\[N\]' ,'ForcN_[N]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^ForcT(\d*)_\[N\]' ,'ForcT_[N]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^Pmomt(\d*)_\[N-m\]' ,'Pmomt_[N-N]' , IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^ReNum(\d*)_\[x10^6\]' ,'ReNum_[x10^6]', IR=IR))
Columns.append(extractSpanTSReg(tsAvg,'^Gamma(\d*)_\[m^2/s\]' ,'Gamma_[m^2/s]', IR=IR))
# --- Data present
data = [c for _,c in Columns if c is not None]
ColNames = [n for n,_ in Columns if n is not None]
Lengths = [len(d) for d in data]
if len(data)<=0:
print('[WARN] No spanwise aero data')
return None
# --- Harmonize data so that they all have the same length
nrMax = np.max(Lengths)
ids=np.arange(nrMax)
if vr_bar is None:
bFakeVr=True
vr_bar = ids/(nrMax-1)
else:
bFakeVr=False
if (nrMax)<len(vr_bar):
vr_bar=vr_bar[1:nrMax]
if chord is not None:
chord =chord[1:nrMax]
elif (nrMax)>len(vr_bar):
raise Exception('Inconsitent length between radial stations and max index present in output chanels')
for i in np.arange(len(data)):
d=data[i]
if len(d)<nrMax:
Values = np.zeros((nrMax,1))
Values[:] = np.nan
Values[1:len(d)] = d
data[i] = Values
print('>> nr',len(d), nrMax,len(Values))
# --- Combine data and remove
dataStack = np.column_stack([d for d in data])
ValidRow = np.logical_not([np.isnan(dataStack).all(axis=1)])
dataStack = dataStack[ValidRow[0],:]
ids = ids [ValidRow[0]]
vr_bar = vr_bar [ValidRow[0]]
if chord is not None:
chord = chord [ValidRow[0]]
# --- Create a dataframe
dfRad = pd.DataFrame(data= dataStack, columns = ColNames)
if bFakeVr:
dfRad.insert(0, 'i/n_[-]', vr_bar)
else:
dfRad.insert(0, 'r/R_[-]', vr_bar)
if R is not None:
r = vr_bar*R
# --- Compute additional values (AD15 only)
for sB in ['B1','B2','B3']:
try: # for python 2
Fx = dfRad[sB+'Fx_[N/m]']
U0 = tsAvg['Wind1VelX_[m/s]']
Ct=nB*Fx/(0.5 * rho * 2 * U0**2 * np.pi * r)
Ct[vr_bar<0.01] = 0
dfRad[sB+'Ct_[-]'] = Ct
CT=2*np.trapz(vr_bar*Ct,vr_bar)
dfRad[sB+'CtAvg_[-]']= CT*np.ones(r.shape)
except:
pass
try:
dfRad[sB+'Gamma_[m^2/s]'] = 1/2 * chord* dfRad[sB+'Vrel_[m/s]'] * dfRad[sB+'Cl_[-]']
except:
pass
dfRad['id_[#]']=ids+1
if not bFakeVr:
dfRad['r_[m]'] = r
# --- Export to dataframe and csv
if postprofile is not None:
dfRad.to_csv(postprofile,sep='\t',index=False)
return dfRad
def spanwisePostPro(FST_In=None,avgMethod='constantwindow',avgParam=5,out_ext='.outb',postprofile=None,df=None):
"""
Postprocess FAST radial data
INPUTS:
- FST_IN: Fast .fst input file
- avgMethod='periods', avgParam=2: average over 2 last periods, Needs Azimuth sensors!!!
- avgMethod='constantwindow', avgParam=5: average over 5s of simulation
- postprofile: outputfile to write radial data
"""
# --- Opens Fast output and performs averaging
if df is None:
df = weio.read(FST_In.replace('.fst',out_ext)).toDataFrame()
else:
pass
# NOTE: spanwise script doest not support duplicate columns
df = df.loc[:,~df.columns.duplicated()]
dfAvg = averageDF(df,avgMethod=avgMethod ,avgParam=avgParam) # NOTE: average 5 last seconds
# --- Extract info (e.g. radial positions) from Fast input file
if FST_In is None:
r_FST_struct = None
rho = 1.225
r_bar_FST_aero=None
R=None
chord=None
IR = None
else:
fst = weio.FASTInputDeck(FST_In)
chord=None
if fst.version == 'F7':
# --- FAST7
if not hasattr(fst,'AD'):
raise Exception('The AeroDyn file couldn''t be found or read, from main file: '+FST_In)
r_FST_aero,IR = AD14_BldGag(fst.AD)
R = fst.fst['TipRad']
try:
rho = fst.AD['Rho']
except:
rho = fst.AD['AirDens']
r_FST_struct = None
else:
# --- OpenFAST 2
if not hasattr(fst,'ED'):
raise Exception('The Elastodyn file couldn''t be found or read, from main file: '+FST_In)
if not hasattr(fst,'AD'):
raise Exception('The AeroDyn file couldn''t be found or read, from main file: '+FST_In)
if fst.ADversion == 'AD15':
if not hasattr(fst.AD,'Bld1'):
raise Exception('The AeroDyn blade file couldn''t be found or read, from main file: '+FST_In)
rho = fst.AD['AirDens']
r_FST_aero_gag,_ = AD_BldGag(fst.AD,fst.AD.Bld1, chordOut = True) # Only at Gages locations
r_FST_aero = fst.AD.Bld1['BldAeroNodes'][:,0] # Full span
chord = fst.AD.Bld1['BldAeroNodes'][:,5] # Full span
r_FST_aero+= fst.ED['HubRad']
IR = None
elif fst.ADversion == 'AD14':
try:
rho = fst.AD['Rho']
except:
rho = fst.AD['AirDens']
r_FST_aero,IR = AD14_BldGag(fst.AD)
else:
raise Exception('AeroDyn version unknown')
R = fst.ED ['TipRad']
r_FST_struct = ED_BldGag(fst.ED)
#print('r struct:',r_FST_struct)
#print('r aero :',r_FST_aero)
#print('IR :',IR)
r_bar_FST_aero = r_FST_aero/R
# --- Extract radial data and export to csv if needed
dfAeroRad = spanwiseAD(dfAvg.iloc[0], r_bar_FST_aero, rho , R, nB=3, chord=chord, postprofile=postprofile, IR=IR)
if r_FST_struct is None:
dfStructRad=None
else:
dfStructRad = spanwise(dfAvg.iloc[0] , r_FST_struct/R, R=R, postprofile=postprofile)
return dfStructRad , dfAeroRad
# --------------------------------------------------------------------------------}
# --- Template replace
# --------------------------------------------------------------------------------{
def handleRemoveReadonlyWin(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def copyTree(src, dst):
"""
Copy a directory to another one, overwritting files if necessary.
copy_tree from distutils and copytree from shutil fail on Windows (in particular on git files)
"""
def forceMergeFlatDir(srcDir, dstDir):
if not os.path.exists(dstDir):
os.makedirs(dstDir)
for item in os.listdir(srcDir):
srcFile = os.path.join(srcDir, item)
dstFile = os.path.join(dstDir, item)
forceCopyFile(srcFile, dstFile)
def forceCopyFile (sfile, dfile):
# ---- Handling error due to wrong mod
if os.path.isfile(dfile):
if not os.access(dfile, os.W_OK):
os.chmod(dfile, stat.S_IWUSR)
#print(sfile, ' > ', dfile)
shutil.copy2(sfile, dfile)
def isAFlatDir(sDir):
for item in os.listdir(sDir):
sItem = os.path.join(sDir, item)
if os.path.isdir(sItem):
return False
return True
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isfile(s):
if not os.path.exists(dst):
os.makedirs(dst)
forceCopyFile(s,d)
if os.path.isdir(s):
isRecursive = not isAFlatDir(s)
if isRecursive:
copyTree(s, d)
else:
forceMergeFlatDir(s, d)
def templateReplaceGeneral(PARAMS, template_dir=None, output_dir=None, main_file=None, name_function=None, RemoveAllowed=False):
""" Generate inputs files by replacing different parameters from a template file.
The generated files are placed in the output directory `output_dir`
The files are read and written using the library `weio`.
The template file is read and its content can be changed like a dictionary.
Each item of `PARAMS` correspond to a set of parameters that will be replaced
in the template file to generate one input file.
For "FAST" input files, parameters can be changed recursively.
INPUTS:
PARAMS: list of dictionaries. Each key of the dictionary should be a key present in the
template file when read with `weio` (see: weio.read(main_file).keys() )
PARAMS[0]={'FAST|DT':0.1, 'EDFile|GBRatio':1, 'ServoFile|GenEff':0.8}
template_dir: if provided, this directory and its content will be copied to `output_dir`
before doing the parametric substitution
output_dir : directory where files will be generated.
"""
def fileID(s):
if s.find('|')<=0:
return 'ROOT'
else:
return s.split('|')[0]
def basename(s):
return os.path.splitext(os.path.basename(s))[0]
def rebase(s,sid):
split = os.path.splitext(os.path.basename(s))
return os.path.join(output_dir,split[0]+sid+split[1])
def rebase_rel(s,sid):
split = os.path.splitext(s)
return os.path.join(output_dir,split[0]+sid+split[1])
# --- Safety checks
if template_dir is None and output_dir is None:
raise Exception('Provide at least a template directory OR an output directory')
if template_dir is not None:
if not os.path.exists(template_dir):
raise Exception('Template directory does not exist: '+template_dir)
# Default value of output_dir if not provided
if template_dir[-1]=='/' or template_dir[-1]=='\\' :
template_dir=template_dir[0:-1]
if output_dir is None:
output_dir=template_dir+'_Parametric'
# --- Creating output_dir - Copying template folder to output_dir if necessary
if os.path.exists(output_dir) and RemoveAllowed:
shutil.rmtree(output_dir, ignore_errors=False, onerror=handleRemoveReadonlyWin)
if template_dir is not None:
copyTree(template_dir, output_dir)
else:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# --- Main file use as "master"
if template_dir is not None:
main_file=os.path.join(output_dir, os.path.basename(main_file))
else:
main_file=main_file
# Params need to be a list
if not isinstance(PARAMS,list):
PARAMS=[PARAMS]
files=[]
# TODO: Recursive loop splitting at the pipes '|', for now only 1 level supported...
for ip,p in enumerate(PARAMS):
if '__index__' not in p.keys():
p['__index__']=ip
if name_function is None:
if '__name__' in p.keys():
strID=p['__name__']
else:
raise Exception('When calling `templateReplace`, either provide a naming function or profile the key `__name_` in the parameter dictionaries')
else:
strID =name_function(p)
FileTypes = set([fileID(k) for k in list(p.keys()) if (k!='__index__' and k!='__name__')])
FileTypes = set(list(FileTypes)+['ROOT']) # Enforcing ROOT in list, so the main file is written
# ---Copying main file and reading it
#fst_full = rebase(main_file,strID)
ext = os.path.splitext(main_file)[-1]
fst_full = os.path.join(output_dir,strID+ext)
shutil.copyfile(main_file, fst_full )
Files=dict()
Files['ROOT']=weio.FASTInFile(fst_full)
# --- Looping through required files and opening them
for t in FileTypes:
# Doing a naive if
# The reason is that we want to account for more complex file types in the future
if t=='ROOT':
continue
org_filename = Files['ROOT'][t].strip('"')
org_filename_full = os.path.join(output_dir,org_filename)
new_filename_full = rebase_rel(org_filename,'_'+strID)
new_filename = os.path.relpath(new_filename_full,output_dir)
# print('org_filename',org_filename)
# print('org_filename',org_filename_full)
# print('New_filename',new_filename_full)
# print('New_filename',new_filename)
shutil.copyfile(org_filename_full, new_filename_full)
Files['ROOT'][t] = '"'+new_filename+'"'
# Reading files
Files[t]=weio.FASTInFile(new_filename_full)
# --- Replacing in files
for k,v in p.items():
if k =='__index__' or k=='__name__':
continue
sp= k.split('|')
kk=sp[0]
if len(sp)==1:
Files['ROOT'][kk]=v
elif len(sp)==2:
Files[sp[0]][sp[1]]=v
else:
raise Exception('Multi-level not supported')
# --- Rewritting all files
for t in FileTypes:
Files[t].write()
files.append(fst_full)
# --- Remove extra files at the end
if template_dir is not None:
os.remove(main_file)
return files
def templateReplace(PARAMS, template_dir, workdir=None, main_file=None, name_function=None, RemoveAllowed=False, RemoveRefSubFiles=False, oneSimPerDir=False):
""" Replace parameters in a fast folder using a list of dictionaries where the keys are for instance:
'FAST|DT', 'EDFile|GBRatio', 'ServoFile|GenEff'
"""
def fileID(s):
return s.split('|')[0]
def basename(s):
return os.path.splitext(os.path.basename(s))[0]
def rebase(wd,s,sid):
split = os.path.splitext(os.path.basename(s))
return os.path.join(wd,split[0]+sid+split[1])
def rebase_rel(wd,s,sid):
split = os.path.splitext(s)
return os.path.join(wd,split[0]+sid+split[1])
def get_strID(p) :
if name_function is None:
if '__name__' in p.keys():
strID=p['__name__']
else:
raise Exception('When calling `templateReplace`, either provide a naming function or profile the key `__name_` in the parameter dictionaries')
else:
strID =name_function(p)
return strID
# --- Safety checks
if not os.path.exists(template_dir):
raise Exception('Template directory does not exist: '+template_dir)
# Default value of workdir if not provided
if template_dir[-1]=='/' or template_dir[-1]=='\\' :
template_dir=template_dir[0:-1]
if workdir is None:
workdir=template_dir+'_Parametric'
# Params need to be a list
if not isinstance(PARAMS,list):
PARAMS=[PARAMS]
if oneSimPerDir:
WORKDIRS=[os.path.join(workdir,get_strID(p)) for p in PARAMS]
else:
WORKDIRS=[workdir]*len(PARAMS)
# Copying template folder to workdir
for wd in list(set(WORKDIRS)):
if RemoveAllowed:
removeFASTOuputs(wd)
if os.path.exists(wd) and RemoveAllowed:
shutil.rmtree(wd, ignore_errors=False, onerror=handleRemoveReadonlyWin)
copyTree(template_dir, wd)
if RemoveAllowed:
removeFASTOuputs(wd)
# --- Fast main file use as "master"
if main_file is None:
FstFiles=set(glob.glob(os.path.join(template_dir,'*.fst'))+glob.glob(os.path.join(template_dir,'*.FST')))
if len(FstFiles)>1:
print(FstFiles)
raise Exception('More than one fst file found in template folder, provide `main_file` or ensure there is only one `.fst` file')
main_file=FstFiles.pop()
# if the user provided a full path to the main file, we scrap the directory. TODO, should be cleaner
if len(os.path.dirname(main_file))>0:
main_file=os.path.basename(main_file)
fastfiles=[]
# TODO: Recursive loop splitting at the pipes '|', for now only 1 level supported...
for ip,(wd,p) in enumerate(zip(WORKDIRS,PARAMS)):
#
main_file_new=os.path.join(wd, os.path.basename(main_file))
if '__index__' not in p.keys():
p['__index__']=ip
if name_function is None:
if '__name__' in p.keys():
strID=p['__name__']
else:
raise Exception('When calling `templateReplace`, either provide a naming function or profile the key `__name_` in the parameter dictionaries')
else:
strID =name_function(p)
FileTypes = set([fileID(k) for k in list(p.keys()) if (k!='__index__' and k!='__name__')])
FileTypes = set(list(FileTypes)+['FAST']) # Enforcing FAST in list, so the main fst file is written
# ---Copying main file and reading it
fst_full = os.path.join(wd,strID+'.fst')
shutil.copyfile(main_file_new, fst_full )
Files=dict()
Files['FAST']=weio.FASTInFile(fst_full)
#
# fst=weio.FASTInputDeck(main_file)
# for k,v in fst.inputfiles.items():
# rel = os.path.relpath(v,template_dir)
# if rel.find('/')<0 or rel.find('\\')<0:
# print('Copying ',k,rel)
# shutil.copyfile(os.path.join(template_dir,rel), os.path.join(workdir,rel))
# --- Looping through required files and opening them
for t in FileTypes:
# Doing a naive if
# The reason is that we want to account for more complex file types in the future
if t=='FAST':
continue
org_filename = Files['FAST'][t].strip('"')
org_filename_full =os.path.join(wd, org_filename)
new_filename_full = rebase_rel(wd, org_filename,'_'+strID)
new_filename = os.path.relpath(new_filename_full,wd).replace('\\','/')
# print('org_filename',org_filename)
# print('org_filename',org_filename_full)
# print('New_filename',new_filename_full)
# print('New_filename',new_filename)
shutil.copyfile(org_filename_full, new_filename_full)
Files['FAST'][t] = '"'+new_filename+'"'
# Reading files
Files[t]=weio.FASTInFile(new_filename_full)
# --- Replacing in files
for k,v in p.items():
if k =='__index__' or k=='__name__':
continue
t,kk=k.split('|')
Files[t][kk]=v
#print(t+'|'+kk+'=',v)
# --- Rewritting all files
for t in FileTypes:
Files[t].write()
fastfiles.append(fst_full)
# --- Remove extra files at the end
if RemoveRefSubFiles:
for wd in np.unique(WORKDIRS):
main_file_new=os.path.join(wd, os.path.basename(main_file))
FST = weio.FASTInFile(main_file_new)
for t in FileTypes:
if t=='FAST':
continue
filename = FST[t].strip('"')
#fullname = rebase(filename,'')
fullname = os.path.join(wd,filename)
os.remove(fullname)
for wd in np.unique(WORKDIRS):
main_file_new=os.path.join(wd, os.path.basename(main_file))
os.remove(main_file_new)
return fastfiles
# --------------------------------------------------------------------------------}
# --- Tools for template replacement
# --------------------------------------------------------------------------------{
def paramsSteadyAero(p=dict()):
p['AeroFile|AFAeroMod']=1 # remove dynamic effects dynamic
p['AeroFile|WakeMod']=1 # remove dynamic inflow dynamic
p['AeroFile|TwrPotent']=0 # remove tower shadow
return p
def paramsNoGen(p=dict()):
p['EDFile|GenDOF' ] = 'False'
return p
def paramsGen(p=dict()):
p['EDFile|GenDOF' ] = 'True'
return p
def paramsNoController(p=dict()):
p['ServoFile|PCMode'] = 0;
p['ServoFile|VSContrl'] = 0;
p['ServoFile|YCMode'] = 0;
return p
def paramsControllerDLL(p=dict()):
p['ServoFile|PCMode'] = 5;
p['ServoFile|VSContrl'] = 5;
p['ServoFile|YCMode'] = 5;
p['EDFile|GenDOF'] = 'True';
return p
def paramsStiff(p=dict()):
p['EDFile|FlapDOF1'] = 'False'
p['EDFile|FlapDOF2'] = 'False'
p['EDFile|EdgeDOF' ] = 'False'
p['EDFile|TeetDOF' ] = 'False'
p['EDFile|DrTrDOF' ] = 'False'
p['EDFile|YawDOF' ] = 'False'
p['EDFile|TwFADOF1'] = 'False'
p['EDFile|TwFADOF2'] = 'False'
p['EDFile|TwSSDOF1'] = 'False'
p['EDFile|TwSSDOF2'] = 'False'
p['EDFile|PtfmSgDOF'] = 'False'
p['EDFile|PtfmSwDOF'] = 'False'
p['EDFile|PtfmHvDOF'] = 'False'
p['EDFile|PtfmRDOF'] = 'False'
p['EDFile|PtfmPDOF'] = 'False'
p['EDFile|PtfmYDOF'] = 'False'
return p
def paramsWS_RPM_Pitch(WS,RPM,Pitch,BaseDict=None,FlatInputs=False):
""" """
# --- Naming function appropriate for such parametric study
def default_naming(p): # TODO TODO CHANGE ME
return '{:03d}_ws{:04.1f}_pt{:04.2f}_om{:04.2f}'.format(p['__index__'],p['InflowFile|HWindSpeed'],p['EDFile|BlPitch(1)'],p['EDFile|RotSpeed'])
# --- Ensuring everythin is an iterator
def iterify(x):
if not isinstance(x, collections.Iterable): x = [x]
return x
WS = iterify(WS)
RPM = iterify(RPM)
Pitch = iterify(Pitch)
# --- If inputs are not flat but different vectors to length through, we flatten them (TODO: meshgrid and ravel?)
if not FlatInputs :
WS_flat = []
Pitch_flat = []
RPM_flat = []
for pitch in Pitch:
for rpm in RPM:
for ws in WS:
WS_flat.append(ws)
RPM_flat.append(rpm)
Pitch_flat.append(pitch)
else:
WS_flat, Pitch_flat, RPM_flat = WS, Pitch, RPM
# --- Defining the parametric study
PARAMS=[]
i=0
for ws,rpm,pitch in zip(WS_flat,RPM_flat,Pitch_flat):
if BaseDict is None:
p=dict()
else:
p = BaseDict.copy()
p['EDFile|RotSpeed'] = rpm
p['InflowFile|HWindSpeed'] = ws
p['InflowFile|WindType'] = 1 # Setting steady wind
p['EDFile|BlPitch(1)'] = pitch
p['EDFile|BlPitch(2)'] = pitch
p['EDFile|BlPitch(3)'] = pitch
p['__index__'] = i
p['__name__'] = default_naming(p)
i=i+1
PARAMS.append(p)
return PARAMS, default_naming
# --------------------------------------------------------------------------------}
# --- Tools for PostProcessing one or several simulations
# --------------------------------------------------------------------------------{
def _zero_crossings(y,x=None,direction=None):
"""
Find zero-crossing points in a discrete vector, using linear interpolation.
direction: 'up' or 'down', to select only up-crossings or down-crossings
Returns:
x values xzc such that y(yzc)==0
indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)
if direction is not provided, also returns:
sign, equal to 1 for up crossing
"""
y=np.asarray(y)
if x is None:
x=np.arange(len(y))
if np.any((x[1:] - x[0:-1]) <= 0.0):
raise Exception('x values need to be in ascending order')
# Indices before zero-crossing
iBef = np.where(y[1:]*y[0:-1] < 0.0)[0]
# Find the zero crossing by linear interpolation
xzc = x[iBef] - y[iBef] * (x[iBef+1] - x[iBef]) / (y[iBef+1] - y[iBef])
# Selecting points that are exactly 0 and where neighbor change sign
iZero = np.where(y == 0.0)[0]
iZero = iZero[np.where((iZero > 0) & (iZero < x.size-1))]
iZero = iZero[np.where(y[iZero-1]*y[iZero+1] < 0.0)]
# Concatenate
xzc = np.concatenate((xzc, x[iZero]))
iBef = np.concatenate((iBef, iZero))
# Sort
iSort = np.argsort(xzc)
xzc, iBef = xzc[iSort], iBef[iSort]
# Return up-crossing, down crossing or both
sign = np.sign(y[iBef+1]-y[iBef])
if direction == 'up':
I= np.where(sign==1)[0]
return xzc[I],iBef[I]
elif direction == 'down':
I= np.where(sign==-1)[0]
return xzc[I],iBef[I]
elif direction is not None:
raise Exception('Direction should be either `up` or `down`')
return xzc, iBef, sign
def find_matching_pattern(List, pattern):
""" Return elements of a list of strings that match a pattern
and return the first matching group
"""
reg_pattern=re.compile(pattern)
MatchedElements=[]
MatchedStrings=[]
for l in List:
match=reg_pattern.search(l)
if match:
MatchedElements.append(l)
MatchedStrings.append(match.groups(1)[0])
return MatchedElements, MatchedStrings
def extractSpanTSReg(ts, col_pattern, colname, IR=None):
""" Helper function to extract spanwise results, like B1N1Cl B1N2Cl etc.
Example
col_pattern: 'B1N(\d*)Cl_\[-\]'
colname : 'B1Cl_[-]'
"""
# Extracting columns matching pattern
cols, sIdx = find_matching_pattern(ts.keys(), col_pattern)
if len(cols) ==0:
return (None,None)
# Sorting by ID
cols = np.asarray(cols)
Idx = np.array([int(s) for s in sIdx])
Isort = np.argsort(Idx)
Idx = Idx[Isort]
cols = cols[Isort]
nrMax = np.max(Idx)
Values = np.zeros((nrMax,1))
Values[:] = np.nan
# if IR is None:
# cols = [col_pattern.format(ir+1) for ir in range(nr)]
# else:
# cols = [col_pattern.format(ir) for ir in IR]
for idx,col in zip(Idx,cols):
Values[idx-1]=ts[col]
nMissing = np.sum(np.isnan(Values))
if nMissing==nrMax:
return (None,None)
if len(cols)<nrMax:
#print(Values)
print('[WARN] Not all values found for {}, missing {}/{}'.format(colname,nMissing,nrMax))
if len(cols)>nrMax:
print('[WARN] More values found for {}, found {}/{}'.format(colname,len(cols),nrMax))
return (colname,Values)
def extractSpanTS(ts, nr, col_pattern, colname, IR=None):
""" Helper function to extract spanwise results, like B1N1Cl B1N2Cl etc.
Example
col_pattern: 'B1N{:d}Cl_[-]'
colname : 'B1Cl_[-]'
"""
Values=np.zeros((nr,1))
if IR is None:
cols = [col_pattern.format(ir+1) for ir in range(nr)]
else:
cols = [col_pattern.format(ir) for ir in IR]
colsExist = [c for c in cols if c in ts.keys() ]
if len(colsExist)==0:
return (None,None)
Values = [ts[c] if c in ts.keys() else np.nan for c in cols ]
nMissing = np.sum(np.isnan(Values))
#Values = ts[cols].T
#nCoun=len(Values)
if nMissing==nr:
return (None,None)
if len(colsExist)<nr:
print(Values)
print('[WARN] Not all values found for {}, missing {}/{}'.format(colname,nMissing,nr))
if len(colsExist)>nr:
print('[WARN] More values found for {}, found {}/{}'.format(colname,len(cols),nr))
return (colname,Values)
def averageDF(df,avgMethod='periods',avgParam=None,ColMap=None,ColKeep=None,ColSort=None,stats=['mean']):
"""
See average PostPro for documentation, same interface, just does it for one dataframe
"""
def renameCol(x):
for k,v in ColMap.items():
if x==v:
return k
return x
# Before doing the colomn map we store the time
time = df['Time_[s]'].values
timenoNA = time[~np.isnan(time)]
# Column mapping
if ColMap is not None:
ColMapMiss = [v for _,v in ColMap.items() if v not in df.columns.values]
if len(ColMapMiss)>0:
print('[WARN] Signals missing and omitted for ColMap:\n '+'\n '.join(ColMapMiss))
df.rename(columns=renameCol,inplace=True)
## Defining a window for stats (start time and end time)
if avgMethod.lower()=='constantwindow':
tEnd = timenoNA[-1]
if avgParam is None:
tStart=timenoNA[0]
else:
tStart =tEnd-avgParam
elif avgMethod.lower()=='periods':
# --- Using azimuth to find periods
if 'Azimuth_[deg]' not in df.columns:
raise Exception('The sensor `Azimuth_[deg]` does not appear to be in the output file. You cannot use the averaging method by `periods`, use `constantwindow` instead.')
# NOTE: potentially we could average over each period and then average
psi=df['Azimuth_[deg]'].values
_,iBef = _zero_crossings(psi-psi[-10],direction='up')
if len(iBef)==0:
_,iBef = _zero_crossings(psi-180,direction='up')
if len(iBef)==0:
print('[WARN] Not able to find a zero crossing!')
tEnd = time[-1]
iBef=[0]
else:
tEnd = time[iBef[-1]]
if avgParam is None:
tStart=time[iBef[0]]
else:
avgParam=int(avgParam)
if len(iBef)-1<avgParam:
print('[WARN] Not enough periods found ({}) compared to number requested to average ({})!'.format(len(iBef)-1,avgParam))
avgParam=len(iBef)-1
if avgParam==0:
tStart = time[0]
tEnd = time[-1]
else:
tStart=time[iBef[-1-avgParam]]
elif avgMethod.lower()=='periods_omega':
# --- Using average omega to find periods
if 'RotSpeed_[rpm]' not in df.columns:
raise Exception('The sensor `RotSpeed_[rpm]` does not appear to be in the output file. You cannot use the averaging method by `periods_omega`, use `periods` or `constantwindow` instead.')
Omega=df['RotSpeed_[rpm]'].mean()/60*2*np.pi
Period = 2*np.pi/Omega
if avgParam is None:
nRotations=np.floor(tEnd/Period)
else:
nRotations=avgParam
tStart =tEnd-Period*nRotations
else:
raise Exception('Unknown averaging method {}'.format(avgMethod))
# Narrowind number of columns here (azimuth needed above)
if ColKeep is not None:
ColKeepSafe = [c for c in ColKeep if c in df.columns.values]
ColKeepMiss = [c for c in ColKeep if c not in df.columns.values]
if len(ColKeepMiss)>0:
print('[WARN] Signals missing and omitted for ColKeep:\n '+'\n '.join(ColKeepMiss))
df=df[ColKeepSafe]
if tStart<time[0]:
print('[WARN] Simulation time ({}) too short compared to required averaging window ({})!'.format(tEnd-time[0],tStart-tEnd))
IWindow = np.where((time>=tStart) & (time<=tEnd) & (~np.isnan(time)))[0]
iEnd = IWindow[-1]
iStart = IWindow[0]
## Absolute and relative differences at window extremities
DeltaValuesAbs=(df.iloc[iEnd]-df.iloc[iStart]).abs()
# DeltaValuesRel=(df.iloc[iEnd]-df.iloc[iStart]).abs()/df.iloc[iEnd]
DeltaValuesRel=(df.iloc[IWindow].max()-df.iloc[IWindow].min())/df.iloc[IWindow].mean()
#EndValues=df.iloc[iEnd]
#if avgMethod.lower()=='periods_omega':
# if DeltaValuesRel['RotSpeed_[rpm]']*100>5:
# print('[WARN] Rotational speed vary more than 5% in averaging window ({}%) for simulation: {}'.format(DeltaValuesRel['RotSpeed_[rpm]']*100,f))
## Stats values during window
# MeanValues = df[IWindow].mean()
# StdValues = df[IWindow].std()
if 'mean' in stats:
MeanValues = pd.DataFrame(df.iloc[IWindow].mean()).transpose()
else:
raise NotImplementedError()
return MeanValues
def averagePostPro(outFiles,avgMethod='periods',avgParam=None,ColMap=None,ColKeep=None,ColSort=None,stats=['mean']):
""" Opens a list of FAST output files, perform average of its signals and return a panda dataframe
For now, the scripts only computes the mean within a time window which may be a constant or a time that is a function of the rotational speed (see `avgMethod`).
The script only computes the mean for now. Other stats will be added
`ColMap` : dictionary where the key is the new column name, and v the old column name.
Default: None, output is not sorted
NOTE: the mapping is done before sorting and `ColKeep` is applied
ColMap = {'WS':Wind1VelX_[m/s], 'RPM': 'RotSpeed_[rpm]'}
`ColKeep` : List of strings corresponding to the signals to analyse.
Default: None, all columns are analysed
Example: ColKeep=['RotSpeed_[rpm]','BldPitch1_[deg]','RtAeroCp_[-]']
or: ColKeep=list(ColMap.keys())
`avgMethod` : string defining the method used to determine the extent of the averaging window:
- 'periods': use a number of periods(`avgParam`), determined by the azimuth.
- 'periods_omega': use a number of periods(`avgParam`), determined by the mean RPM
- 'constantwindow': the averaging window is constant (defined by `avgParam`).
`avgParam`: based on `avgMethod` it is either
- for 'periods_*': the number of revolutions for the window.
Default: None, as many period as possible are used
- for 'constantwindow': the number of seconds for the window
Default: None, full simulation length is used
"""
result=None
for i,f in enumerate(outFiles):
df=weio.read(f).toDataFrame()
postpro=averageDF(df, avgMethod=avgMethod, avgParam=avgParam, ColMap=ColMap, ColKeep=ColKeep,ColSort=ColSort,stats=stats)
MeanValues=postpro # todo
if i==0:
result = MeanValues.copy()
else:
result=result.append(MeanValues, ignore_index=True)
if ColSort is not None:
# Sorting
result.sort_values([ColSort],inplace=True,ascending=True)
result.reset_index(drop=True,inplace=True)
return result
# --------------------------------------------------------------------------------}
# --- Tools for typical wind turbine study
# --------------------------------------------------------------------------------{
def CPCT_LambdaPitch(refdir,main_fastfile,Lambda=None,Pitch=np.linspace(-10,40,5),WS=None,Omega=None, # operating conditions
TMax=20,bStiff=True,bNoGen=True,bSteadyAero=True, # simulation options
ReRun=True,
fastExe=None,ShowOutputs=True,nCores=4): # execution options
""" Computes CP and CT as function of tip speed ratio (lambda) and pitch.
There are two main ways to define the inputs:
- Option 1: provide Lambda and Pitch (deg)
- Option 2: provide WS (m/s), Omega (in rpm) and Pitch (deg), in which case len(WS)==len(Omega)
"""
WS_default=5 # If user does not provide a wind speed vector, wind speed used
# if the user provided a full path to the main file, we scrap the directory. TODO, should be cleaner
if len(os.path.dirname(main_fastfile))>0:
main_fastfile=os.path.basename(main_fastfile)
# --- Reading main fast file to get rotor radius
fst = weio.FASTInFile(os.path.join(refdir,main_fastfile))
ed = weio.FASTInFile(os.path.join(refdir,fst['EDFile'].replace('"','')))
R = ed['TipRad']
# --- Making sure we have
if (Omega is not None):
if (Lambda is not None):
WS = np.ones(Omega.shape)*WS_default
elif (WS is not None):
if len(WS)!=len(Omega):
raise Exception('When providing Omega and WS, both vectors should have the same dimension')
else:
WS = np.ones(Omega.shape)*WS_default
else:
Omega = WS_default * Lambda/R*60/(2*np.pi) # TODO, use more realistic combinations of WS and Omega
WS = np.ones(Omega.shape)*WS_default
# --- Defining flat vectors of operating conditions
WS_flat = []
RPM_flat = []
Pitch_flat = []
for pitch in Pitch:
for (rpm,ws) in zip(Omega,WS):
WS_flat.append(ws)
RPM_flat.append(rpm)
Pitch_flat.append(pitch)
# --- Setting up default options
BaseDict={'FAST|TMax': TMax, 'FAST|DT': 0.01, 'FAST|DT_Out': 0.1} # NOTE: Tmax should be at least 2pi/Omega
BaseDict = paramsNoController(BaseDict)
if bStiff:
BaseDict = paramsStiff(BaseDict)
if bNoGen:
BaseDict = paramsNoGen(BaseDict)
if bSteadyAero:
BaseDict = paramsSteadyAero(BaseDict)
# --- Creating set of parameters to be changed
# TODO: verify that RtAeroCp and RtAeroCt are present in AeroDyn outlist
PARAMS,naming = paramsWS_RPM_Pitch(WS_flat,RPM_flat,Pitch_flat,BaseDict=BaseDict, FlatInputs=True)
# --- Generating all files in a workdir
workdir = refdir.strip('/').strip('\\')+'_CPLambdaPitch'
print('>>> Generating inputs files in {}'.format(workdir))
RemoveAllowed=ReRun # If the user want to rerun, we can remove, otherwise we keep existing simulations
fastFiles=templateReplace(PARAMS, refdir, workdir=workdir,name_function=naming,RemoveRefSubFiles=True,RemoveAllowed=RemoveAllowed,main_file=main_fastfile)
# --- Running fast simulations
print('>>> Running {} simulations...'.format(len(fastFiles)))
run_fastfiles(fastFiles, ShowOutputs=ShowOutputs, fastExe=fastExe, nCores=nCores, ReRun=ReRun)
# --- Postpro - Computing averages at the end of the simluation
print('>>> Postprocessing...')
outFiles = [os.path.splitext(f)[0]+'.outb' for f in fastFiles]
# outFiles = glob.glob(os.path.join(workdir,'*.outb'))
ColKeepStats = ['RotSpeed_[rpm]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]','Wind1VelX_[m/s]']
result = averagePostPro(outFiles,avgMethod='periods',avgParam=1,ColKeep=ColKeepStats,ColSort='RotSpeed_[rpm]')
# print(result)
# --- Adding lambda, sorting and keeping only few columns
result['lambda_[-]'] = result['RotSpeed_[rpm]']*R*2*np.pi/60/result['Wind1VelX_[m/s]']
result.sort_values(['lambda_[-]','BldPitch1_[deg]'],ascending=[True,True],inplace=True)
ColKeepFinal=['lambda_[-]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]']
result=result[ColKeepFinal]
print('>>> Done')
# --- Converting to a matrices
CP = result['RtAeroCp_[-]'].values
CT = result['RtAeroCt_[-]'].values
MCP =CP.reshape((len(Lambda),len(Pitch)))
MCT =CT.reshape((len(Lambda),len(Pitch)))
LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)
# --- CP max
i,j = np.unravel_index(MCP.argmax(), MCP.shape)
MaxVal={'CP_max':MCP[i,j],'lambda_opt':LAMBDA[j,i],'pitch_opt':PITCH[j,i]}
return MCP,MCT,Lambda,Pitch,MaxVal,result
# def detectFastFiles(workdir):
# FstFiles=glob.glob(os.path.join(workdir,'*.fst'))+glob.glob(os.path.join(workdir,'*.FST'))
# DatFiles=glob.glob(os.path.join(workdir,'*.dat'))+glob.glob(os.path.join(workdir,'*.DAT'))
# Files=dict()
# Files['Main'] = FstFiles
# Files['Inflow'] = None
# Files['Aero'] = None
# Files['Tower'] = None
# Files['Blade'] = None
# Files['AeroBlade'] = None
# Files['ServoDyn'] = None
# for f in DatFiles:
# b = os.path.basename(f).lower()
# if b.find('inflow'):
# Files['Inflow'] = f
# windfile_ref = 'InflowWind.dat';
# fastfile_ref = 'Turbine.fst';
# elasfile_ref = 'ElastoDyn.dat';
# remove
if __name__=='__main__':
pass
# --- Test of templateReplace
def naming(p):
return '_ws_'+str(p['InflowFile|HWindSpeed'])
PARAMS = {}
PARAMS['FAST|TMax'] = 10
PARAMS['FAST|DT'] = 0.01
PARAMS['FAST|DT_Out'] = 0.1
PARAMS['EDFile|RotSpeed'] = 100
PARAMS['EDFile|BlPitch(1)'] = 1
PARAMS['EDFile|GBoxEff'] = 0.92
PARAMS['ServoFile|VS_Rgn2K'] = 0.00038245
PARAMS['ServoFile|GenEff'] = 0.95
PARAMS['InflowFile|HWindSpeed'] = 8
templateReplace(PARAMS,ref_dir,name_function=naming,RemoveRefSubFiles=True)
| [
"os.remove",
"welib.weio.read",
"numpy.floor",
"numpy.ones",
"numpy.isnan",
"numpy.argsort",
"os.path.isfile",
"numpy.arange",
"shutil.rmtree",
"os.path.join",
"numpy.unique",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"os.path.abspath",
"numpy.meshgrid",
"os.path.dirname",
"os... | [((827, 845), 'welib.weio.FASTWndFile', 'weio.FASTWndFile', ([], {}), '()\n', (843, 845), True, 'import welib.weio as weio\n'), ((857, 897), 'numpy.arange', 'np.arange', (['WSmin', '(WSmax + WSstep)', 'WSstep'], {}), '(WSmin, WSmax + WSstep, WSstep)\n', (866, 897), True, 'import numpy as np\n'), ((970, 992), 'numpy.zeros', 'np.zeros', (['(nRow, nCol)'], {}), '((nRow, nCol))\n', (978, 992), True, 'import numpy as np\n'), ((1369, 1409), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'M', 'columns': 'f.colNames'}), '(data=M, columns=f.colNames)\n', (1381, 1409), True, 'import pandas as pd\n'), ((7340, 7392), 'numpy.arange', 'np.arange', (['(1.0 / nBldNodes / 2.0)', '(1)', '(1.0 / nBldNodes)'], {}), '(1.0 / nBldNodes / 2.0, 1, 1.0 / nBldNodes)\n', (7349, 7392), True, 'import numpy as np\n'), ((8090, 8142), 'numpy.arange', 'np.arange', (['(1.0 / nTwrNodes / 2.0)', '(1)', '(1.0 / nTwrNodes)'], {}), '(1.0 / nTwrNodes / 2.0, 1, 1.0 / nTwrNodes)\n', (8099, 8142), True, 'import numpy as np\n'), ((11334, 11365), 'numpy.array', 'np.array', (["AD['BlOutNd'][:nOuts]"], {}), "(AD['BlOutNd'][:nOuts])\n", (11342, 11365), True, 'import numpy as np\n'), ((12181, 12238), 'numpy.column_stack', 'np.column_stack', (['[c for _, c in Columns if c is not None]'], {}), '([c for _, c in Columns if c is not None])\n', (12196, 12238), True, 'import numpy as np\n'), ((18327, 18342), 'numpy.max', 'np.max', (['Lengths'], {}), '(Lengths)\n', (18333, 18342), True, 'import numpy as np\n'), ((18351, 18367), 'numpy.arange', 'np.arange', (['nrMax'], {}), '(nrMax)\n', (18360, 18367), True, 'import numpy as np\n'), ((19080, 19114), 'numpy.column_stack', 'np.column_stack', (['[d for d in data]'], {}), '([d for d in data])\n', (19095, 19114), True, 'import numpy as np\n'), ((19408, 19454), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dataStack', 'columns': 'ColNames'}), '(data=dataStack, columns=ColNames)\n', (19420, 19454), True, 'import pandas as pd\n'), ((25446, 25461), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (25456, 25461), False, 'import os\n'), ((37237, 37256), 'numpy.unique', 'np.unique', (['WORKDIRS'], {}), '(WORKDIRS)\n', (37246, 37256), True, 'import numpy as np\n'), ((41306, 41319), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (41316, 41319), True, 'import numpy as np\n'), ((41374, 41404), 'numpy.any', 'np.any', (['(x[1:] - x[0:-1] <= 0.0)'], {}), '(x[1:] - x[0:-1] <= 0.0)\n', (41380, 41404), True, 'import numpy as np\n'), ((41950, 41981), 'numpy.concatenate', 'np.concatenate', (['(xzc, x[iZero])'], {}), '((xzc, x[iZero]))\n', (41964, 41981), True, 'import numpy as np\n'), ((41993, 42022), 'numpy.concatenate', 'np.concatenate', (['(iBef, iZero)'], {}), '((iBef, iZero))\n', (42007, 42022), True, 'import numpy as np\n'), ((42047, 42062), 'numpy.argsort', 'np.argsort', (['xzc'], {}), '(xzc)\n', (42057, 42062), True, 'import numpy as np\n'), ((42163, 42193), 'numpy.sign', 'np.sign', (['(y[iBef + 1] - y[iBef])'], {}), '(y[iBef + 1] - y[iBef])\n', (42170, 42193), True, 'import numpy as np\n'), ((42676, 42695), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (42686, 42695), False, 'import re\n'), ((43374, 43390), 'numpy.asarray', 'np.asarray', (['cols'], {}), '(cols)\n', (43384, 43390), True, 'import numpy as np\n'), ((43447, 43462), 'numpy.argsort', 'np.argsort', (['Idx'], {}), '(Idx)\n', (43457, 43462), True, 'import numpy as np\n'), ((43522, 43533), 'numpy.max', 'np.max', (['Idx'], {}), '(Idx)\n', (43528, 43533), True, 'import numpy as np\n'), ((43547, 43567), 'numpy.zeros', 'np.zeros', (['(nrMax, 1)'], {}), '((nrMax, 1))\n', (43555, 43567), True, 'import numpy as np\n'), ((44430, 44447), 'numpy.zeros', 'np.zeros', (['(nr, 1)'], {}), '((nr, 1))\n', (44438, 44447), True, 'import numpy as np\n'), ((52247, 52270), 'numpy.linspace', 'np.linspace', (['(-10)', '(40)', '(5)'], {}), '(-10, 40, 5)\n', (52258, 52270), True, 'import numpy as np\n'), ((56429, 56455), 'numpy.meshgrid', 'np.meshgrid', (['Lambda', 'Pitch'], {}), '(Lambda, Pitch)\n', (56440, 56455), True, 'import numpy as np\n'), ((2823, 2850), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2848, 2850), False, 'import multiprocessing\n'), ((4130, 4155), 'os.path.isabs', 'os.path.isabs', (['input_file'], {}), '(input_file)\n', (4143, 4155), False, 'import os\n'), ((4180, 4207), 'os.path.abspath', 'os.path.abspath', (['input_file'], {}), '(input_file)\n', (4195, 4207), False, 'import os\n'), ((4263, 4282), 'os.path.exists', 'os.path.exists', (['exe'], {}), '(exe)\n', (4277, 4282), False, 'import os\n'), ((4618, 4693), 'subprocess.call', 'subprocess.call', (['args'], {'stdout': 'STDOut', 'stderr': 'subprocess.STDOUT', 'shell': 'shell'}), '(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)\n', (4633, 4693), False, 'import subprocess\n'), ((4715, 4791), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'STDOut', 'stderr': 'subprocess.STDOUT', 'shell': 'shell'}), '(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)\n', (4731, 4791), False, 'import subprocess\n'), ((6272, 6302), 'os.path.join', 'os.path.join', (['workdir', '"""*.out"""'], {}), "(workdir, '*.out')\n", (6284, 6302), False, 'import os\n'), ((6312, 6324), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (6321, 6324), False, 'import os\n'), ((6348, 6379), 'os.path.join', 'os.path.join', (['workdir', '"""*.outb"""'], {}), "(workdir, '*.outb')\n", (6360, 6379), False, 'import os\n'), ((6389, 6401), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (6398, 6401), False, 'import os\n'), ((6425, 6455), 'os.path.join', 'os.path.join', (['workdir', '"""*.ech"""'], {}), "(workdir, '*.ech')\n", (6437, 6455), False, 'import os\n'), ((6465, 6477), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (6474, 6477), False, 'import os\n'), ((6501, 6531), 'os.path.join', 'os.path.join', (['workdir', '"""*.sum"""'], {}), "(workdir, '*.sum')\n", (6513, 6531), False, 'import os\n'), ((6541, 6553), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (6550, 6553), False, 'import os\n'), ((7269, 7288), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['ED'], {}), '(ED)\n', (7284, 7288), True, 'import welib.weio as weio\n'), ((8019, 8038), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['ED'], {}), '(ED)\n', (8034, 8038), True, 'import welib.weio as weio\n'), ((8661, 8680), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['ED'], {}), '(ED)\n', (8676, 8680), True, 'import welib.weio as weio\n'), ((8774, 8786), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8782, 8786), True, 'import numpy as np\n'), ((8841, 8867), 'numpy.asarray', 'np.asarray', (["ED['BldGagNd']"], {}), "(ED['BldGagNd'])\n", (8851, 8867), True, 'import numpy as np\n'), ((8895, 8921), 'numpy.array', 'np.array', (["[ED['BldGagNd']]"], {}), "([ED['BldGagNd']])\n", (8903, 8921), True, 'import numpy as np\n'), ((9406, 9425), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['ED'], {}), '(ED)\n', (9421, 9425), True, 'import welib.weio as weio\n'), ((9519, 9531), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9527, 9531), True, 'import numpy as np\n'), ((9586, 9612), 'numpy.asarray', 'np.asarray', (["ED['TwrGagNd']"], {}), "(ED['TwrGagNd'])\n", (9596, 9612), True, 'import numpy as np\n'), ((9640, 9666), 'numpy.array', 'np.array', (["[ED['TwrGagNd']]"], {}), "([ED['TwrGagNd']])\n", (9648, 9666), True, 'import numpy as np\n'), ((10172, 10191), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['AD'], {}), '(AD)\n', (10187, 10191), True, 'import welib.weio as weio\n'), ((10348, 10387), 'numpy.array', 'np.array', (['[(True) for n in Nodes[:, 0]]'], {}), '([(True) for n in Nodes[:, 0]])\n', (10356, 10387), True, 'import numpy as np\n'), ((11117, 11136), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['AD'], {}), '(AD)\n', (11132, 11136), True, 'import welib.weio as weio\n'), ((11201, 11224), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['AD_bld'], {}), '(AD_bld)\n', (11216, 11224), True, 'import welib.weio as weio\n'), ((11308, 11320), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11316, 11320), True, 'import numpy as np\n'), ((12461, 12502), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'ColNames'}), '(data=data, columns=ColNames)\n', (12473, 12502), True, 'import pandas as pd\n'), ((21497, 21523), 'welib.weio.FASTInputDeck', 'weio.FASTInputDeck', (['FST_In'], {}), '(FST_In)\n', (21515, 21523), True, 'import welib.weio as weio\n'), ((24295, 24319), 'os.access', 'os.access', (['path', 'os.W_OK'], {}), '(path, os.W_OK)\n', (24304, 24319), False, 'import os\n'), ((24370, 24398), 'os.chmod', 'os.chmod', (['path', 'stat.S_IWUSR'], {}), '(path, stat.S_IWUSR)\n', (24378, 24398), False, 'import os\n'), ((24788, 24806), 'os.listdir', 'os.listdir', (['srcDir'], {}), '(srcDir)\n', (24798, 24806), False, 'import os\n'), ((25047, 25068), 'os.path.isfile', 'os.path.isfile', (['dfile'], {}), '(dfile)\n', (25061, 25068), False, 'import os\n'), ((25206, 25232), 'shutil.copy2', 'shutil.copy2', (['sfile', 'dfile'], {}), '(sfile, dfile)\n', (25218, 25232), False, 'import shutil\n'), ((25280, 25296), 'os.listdir', 'os.listdir', (['sDir'], {}), '(sDir)\n', (25290, 25296), False, 'import os\n'), ((25475, 25498), 'os.path.join', 'os.path.join', (['src', 'item'], {}), '(src, item)\n', (25487, 25498), False, 'import os\n'), ((25511, 25534), 'os.path.join', 'os.path.join', (['dst', 'item'], {}), '(dst, item)\n', (25523, 25534), False, 'import os\n'), ((25546, 25563), 'os.path.isfile', 'os.path.isfile', (['s'], {}), '(s)\n', (25560, 25563), False, 'import os\n'), ((25680, 25696), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (25693, 25696), False, 'import os\n'), ((27300, 27351), 'os.path.join', 'os.path.join', (['output_dir', '(split[0] + sid + split[1])'], {}), '(output_dir, split[0] + sid + split[1])\n', (27312, 27351), False, 'import os\n'), ((27390, 27409), 'os.path.splitext', 'os.path.splitext', (['s'], {}), '(s)\n', (27406, 27409), False, 'import os\n'), ((27425, 27476), 'os.path.join', 'os.path.join', (['output_dir', '(split[0] + sid + split[1])'], {}), '(output_dir, split[0] + sid + split[1])\n', (27437, 27476), False, 'import os\n'), ((28129, 28155), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (28143, 28155), False, 'import os\n'), ((28183, 28262), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {'ignore_errors': '(False)', 'onerror': 'handleRemoveReadonlyWin'}), '(output_dir, ignore_errors=False, onerror=handleRemoveReadonlyWin)\n', (28196, 28262), False, 'import shutil\n'), ((29603, 29640), 'os.path.join', 'os.path.join', (['output_dir', '(strID + ext)'], {}), '(output_dir, strID + ext)\n', (29615, 29640), False, 'import os\n'), ((29646, 29682), 'shutil.copyfile', 'shutil.copyfile', (['main_file', 'fst_full'], {}), '(main_file, fst_full)\n', (29661, 29682), False, 'import shutil\n'), ((29727, 29752), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['fst_full'], {}), '(fst_full)\n', (29742, 29752), True, 'import welib.weio as weio\n'), ((31305, 31325), 'os.remove', 'os.remove', (['main_file'], {}), '(main_file)\n', (31314, 31325), False, 'import os\n'), ((31896, 31939), 'os.path.join', 'os.path.join', (['wd', '(split[0] + sid + split[1])'], {}), '(wd, split[0] + sid + split[1])\n', (31908, 31939), False, 'import os\n'), ((31981, 32000), 'os.path.splitext', 'os.path.splitext', (['s'], {}), '(s)\n', (31997, 32000), False, 'import os\n'), ((32016, 32059), 'os.path.join', 'os.path.join', (['wd', '(split[0] + sid + split[1])'], {}), '(wd, split[0] + sid + split[1])\n', (32028, 32059), False, 'import os\n'), ((32471, 32499), 'os.path.exists', 'os.path.exists', (['template_dir'], {}), '(template_dir)\n', (32485, 32499), False, 'import os\n'), ((33966, 33993), 'os.path.basename', 'os.path.basename', (['main_file'], {}), '(main_file)\n', (33982, 33993), False, 'import os\n'), ((34916, 34948), 'os.path.join', 'os.path.join', (['wd', "(strID + '.fst')"], {}), "(wd, strID + '.fst')\n", (34928, 34948), False, 'import os\n'), ((34954, 34994), 'shutil.copyfile', 'shutil.copyfile', (['main_file_new', 'fst_full'], {}), '(main_file_new, fst_full)\n', (34969, 34994), False, 'import shutil\n'), ((35039, 35064), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['fst_full'], {}), '(fst_full)\n', (35054, 35064), True, 'import welib.weio as weio\n'), ((36801, 36820), 'numpy.unique', 'np.unique', (['WORKDIRS'], {}), '(WORKDIRS)\n', (36810, 36820), True, 'import numpy as np\n'), ((37334, 37358), 'os.remove', 'os.remove', (['main_file_new'], {}), '(main_file_new)\n', (37343, 37358), False, 'import os\n'), ((41521, 41552), 'numpy.where', 'np.where', (['(y[1:] * y[0:-1] < 0.0)'], {}), '(y[1:] * y[0:-1] < 0.0)\n', (41529, 41552), True, 'import numpy as np\n'), ((41778, 41796), 'numpy.where', 'np.where', (['(y == 0.0)'], {}), '(y == 0.0)\n', (41786, 41796), True, 'import numpy as np\n'), ((41818, 41862), 'numpy.where', 'np.where', (['((iZero > 0) & (iZero < x.size - 1))'], {}), '((iZero > 0) & (iZero < x.size - 1))\n', (41826, 41862), True, 'import numpy as np\n'), ((41880, 41923), 'numpy.where', 'np.where', (['(y[iZero - 1] * y[iZero + 1] < 0.0)'], {}), '(y[iZero - 1] * y[iZero + 1] < 0.0)\n', (41888, 41923), True, 'import numpy as np\n'), ((43832, 43848), 'numpy.isnan', 'np.isnan', (['Values'], {}), '(Values)\n', (43840, 43848), True, 'import numpy as np\n'), ((44792, 44808), 'numpy.isnan', 'np.isnan', (['Values'], {}), '(Values)\n', (44800, 44808), True, 'import numpy as np\n'), ((53030, 53061), 'os.path.basename', 'os.path.basename', (['main_fastfile'], {}), '(main_fastfile)\n', (53046, 53061), False, 'import os\n'), ((53143, 53178), 'os.path.join', 'os.path.join', (['refdir', 'main_fastfile'], {}), '(refdir, main_fastfile)\n', (53155, 53178), False, 'import os\n'), ((18862, 18882), 'numpy.zeros', 'np.zeros', (['(nrMax, 1)'], {}), '((nrMax, 1))\n', (18870, 18882), True, 'import numpy as np\n'), ((24712, 24734), 'os.path.exists', 'os.path.exists', (['dstDir'], {}), '(dstDir)\n', (24726, 24734), False, 'import os\n'), ((24748, 24767), 'os.makedirs', 'os.makedirs', (['dstDir'], {}), '(dstDir)\n', (24759, 24767), False, 'import os\n'), ((24830, 24856), 'os.path.join', 'os.path.join', (['srcDir', 'item'], {}), '(srcDir, item)\n', (24842, 24856), False, 'import os\n'), ((24879, 24905), 'os.path.join', 'os.path.join', (['dstDir', 'item'], {}), '(dstDir, item)\n', (24891, 24905), False, 'import os\n'), ((25318, 25342), 'os.path.join', 'os.path.join', (['sDir', 'item'], {}), '(sDir, item)\n', (25330, 25342), False, 'import os\n'), ((25358, 25378), 'os.path.isdir', 'os.path.isdir', (['sItem'], {}), '(sItem)\n', (25371, 25378), False, 'import os\n'), ((27264, 27283), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (27280, 27283), False, 'import os\n'), ((27686, 27714), 'os.path.exists', 'os.path.exists', (['template_dir'], {}), '(template_dir)\n', (27700, 27714), False, 'import os\n'), ((28364, 28390), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (28378, 28390), False, 'import os\n'), ((28404, 28427), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (28415, 28427), False, 'import os\n'), ((28541, 28568), 'os.path.basename', 'os.path.basename', (['main_file'], {}), '(main_file)\n', (28557, 28568), False, 'import os\n'), ((29552, 29579), 'os.path.splitext', 'os.path.splitext', (['main_file'], {}), '(main_file)\n', (29568, 29579), False, 'import os\n'), ((30112, 30150), 'os.path.join', 'os.path.join', (['output_dir', 'org_filename'], {}), '(output_dir, org_filename)\n', (30124, 30150), False, 'import os\n'), ((30249, 30295), 'os.path.relpath', 'os.path.relpath', (['new_filename_full', 'output_dir'], {}), '(new_filename_full, output_dir)\n', (30264, 30295), False, 'import os\n'), ((30513, 30566), 'shutil.copyfile', 'shutil.copyfile', (['org_filename_full', 'new_filename_full'], {}), '(org_filename_full, new_filename_full)\n', (30528, 30566), False, 'import shutil\n'), ((30668, 30702), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['new_filename_full'], {}), '(new_filename_full)\n', (30683, 30702), True, 'import welib.weio as weio\n'), ((31860, 31879), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (31876, 31879), False, 'import os\n'), ((33173, 33191), 'os.path.exists', 'os.path.exists', (['wd'], {}), '(wd)\n', (33187, 33191), False, 'import os\n'), ((33223, 33294), 'shutil.rmtree', 'shutil.rmtree', (['wd'], {'ignore_errors': '(False)', 'onerror': 'handleRemoveReadonlyWin'}), '(wd, ignore_errors=False, onerror=handleRemoveReadonlyWin)\n', (33236, 33294), False, 'import shutil\n'), ((33917, 33943), 'os.path.dirname', 'os.path.dirname', (['main_file'], {}), '(main_file)\n', (33932, 33943), False, 'import os\n'), ((34207, 34234), 'os.path.basename', 'os.path.basename', (['main_file'], {}), '(main_file)\n', (34223, 34234), False, 'import os\n'), ((35762, 35792), 'os.path.join', 'os.path.join', (['wd', 'org_filename'], {}), '(wd, org_filename)\n', (35774, 35792), False, 'import os\n'), ((36170, 36223), 'shutil.copyfile', 'shutil.copyfile', (['org_filename_full', 'new_filename_full'], {}), '(org_filename_full, new_filename_full)\n', (36185, 36223), False, 'import shutil\n'), ((36325, 36359), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['new_filename_full'], {}), '(new_filename_full)\n', (36340, 36359), True, 'import welib.weio as weio\n'), ((36912, 36942), 'welib.weio.FASTInFile', 'weio.FASTInFile', (['main_file_new'], {}), '(main_file_new)\n', (36927, 36942), True, 'import welib.weio as weio\n'), ((37297, 37324), 'os.path.basename', 'os.path.basename', (['main_file'], {}), '(main_file)\n', (37313, 37324), False, 'import os\n'), ((42227, 42246), 'numpy.where', 'np.where', (['(sign == 1)'], {}), '(sign == 1)\n', (42235, 42246), True, 'import numpy as np\n'), ((45633, 45647), 'numpy.isnan', 'np.isnan', (['time'], {}), '(time)\n', (45641, 45647), True, 'import numpy as np\n'), ((52973, 53003), 'os.path.dirname', 'os.path.dirname', (['main_fastfile'], {}), '(main_fastfile)\n', (52988, 53003), False, 'import os\n'), ((53791, 53811), 'numpy.ones', 'np.ones', (['Omega.shape'], {}), '(Omega.shape)\n', (53798, 53811), True, 'import numpy as np\n'), ((5320, 5339), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (5336, 5339), False, 'import os\n'), ((5358, 5388), 'os.path.exists', 'os.path.exists', (["(base + '.outb')"], {}), "(base + '.outb')\n", (5372, 5388), False, 'import os\n'), ((5390, 5419), 'os.path.exists', 'os.path.exists', (["(base + '.out')"], {}), "(base + '.out')\n", (5404, 5419), False, 'import os\n'), ((6121, 6140), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (6137, 6140), False, 'import os\n'), ((19953, 19982), 'numpy.trapz', 'np.trapz', (['(vr_bar * Ct)', 'vr_bar'], {}), '(vr_bar * Ct, vr_bar)\n', (19961, 19982), True, 'import numpy as np\n'), ((20018, 20034), 'numpy.ones', 'np.ones', (['r.shape'], {}), '(r.shape)\n', (20025, 20034), True, 'import numpy as np\n'), ((25089, 25114), 'os.access', 'os.access', (['dfile', 'os.W_OK'], {}), '(dfile, os.W_OK)\n', (25098, 25114), False, 'import os\n'), ((25132, 25161), 'os.chmod', 'os.chmod', (['dfile', 'stat.S_IWUSR'], {}), '(dfile, stat.S_IWUSR)\n', (25140, 25161), False, 'import os\n'), ((25584, 25603), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (25598, 25603), False, 'import os\n'), ((25621, 25637), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (25632, 25637), False, 'import os\n'), ((27184, 27203), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (27200, 27203), False, 'import os\n'), ((31777, 31796), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (31793, 31796), False, 'import os\n'), ((36865, 36892), 'os.path.basename', 'os.path.basename', (['main_file'], {}), '(main_file)\n', (36881, 36892), False, 'import os\n'), ((37160, 37186), 'os.path.join', 'os.path.join', (['wd', 'filename'], {}), '(wd, filename)\n', (37172, 37186), False, 'import os\n'), ((37202, 37221), 'os.remove', 'os.remove', (['fullname'], {}), '(fullname)\n', (37211, 37221), False, 'import os\n'), ((42319, 42339), 'numpy.where', 'np.where', (['(sign == -1)'], {}), '(sign == -1)\n', (42327, 42339), True, 'import numpy as np\n'), ((51465, 51477), 'welib.weio.read', 'weio.read', (['f'], {}), '(f)\n', (51474, 51477), True, 'import welib.weio as weio\n'), ((53388, 53408), 'numpy.ones', 'np.ones', (['Omega.shape'], {}), '(Omega.shape)\n', (53395, 53408), True, 'import numpy as np\n'), ((55464, 55483), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (55480, 55483), False, 'import os\n'), ((2502, 2513), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2511, 2513), False, 'import os\n'), ((19146, 19165), 'numpy.isnan', 'np.isnan', (['dataStack'], {}), '(dataStack)\n', (19154, 19165), True, 'import numpy as np\n'), ((33488, 33523), 'os.path.join', 'os.path.join', (['template_dir', '"""*.fst"""'], {}), "(template_dir, '*.fst')\n", (33500, 33523), False, 'import os\n'), ((33534, 33569), 'os.path.join', 'os.path.join', (['template_dir', '"""*.FST"""'], {}), "(template_dir, '*.FST')\n", (33546, 33569), False, 'import os\n'), ((35896, 35934), 'os.path.relpath', 'os.path.relpath', (['new_filename_full', 'wd'], {}), '(new_filename_full, wd)\n', (35911, 35934), False, 'import os\n'), ((47926, 47949), 'numpy.floor', 'np.floor', (['(tEnd / Period)'], {}), '(tEnd / Period)\n', (47934, 47949), True, 'import numpy as np\n'), ((48729, 48743), 'numpy.isnan', 'np.isnan', (['time'], {}), '(time)\n', (48737, 48743), True, 'import numpy as np\n'), ((53626, 53646), 'numpy.ones', 'np.ones', (['Omega.shape'], {}), '(Omega.shape)\n', (53633, 53646), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
import glob
import hashlib
import numpy as np
import h5py
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
import openmc
import openmc.mgxs
from openmc.examples import pwr_pin_cell
np.set_printoptions(formatter={'float_kind': '{:.8e}'.format})
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
# Generate inputs using parent class routine
super(MGXSTestHarness, self).__init__(*args, **kwargs)
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test all MGXS types
self.mgxs_lib.mgxs_types = openmc.mgxs.MGXS_TYPES + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Export the MGXS Library to an HDF5 file
self.mgxs_lib.build_hdf5_store(directory='.')
# Open the MGXS HDF5 file
with h5py.File('mgxs.h5', 'r') as f:
# Build a string from the datasets in the HDF5 file
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
outstr += 'domain={0} type={1}\n'.format(domain.id, mgxs_type)
avg_key = 'material/{0}/{1}/average'.format(domain.id, mgxs_type)
std_key = 'material/{0}/{1}/std. dev.'.format(domain.id, mgxs_type)
outstr += '{}\n{}\n'.format(f[avg_key][...], f[std_key][...])
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _cleanup(self):
super(MGXSTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'mgxs.h5')
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
model = pwr_pin_cell()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
| [
"openmc.examples.pwr_pin_cell",
"openmc.StatePoint",
"h5py.File",
"numpy.set_printoptions",
"os.remove",
"os.getcwd",
"os.path.exists",
"sys.path.insert",
"hashlib.sha512",
"openmc.mgxs.EnergyGroups",
"openmc.mgxs.Library"
] | [((104, 133), 'sys.path.insert', 'sys.path.insert', (['(0)', 'os.pardir'], {}), '(0, os.pardir)\n', (119, 133), False, 'import sys\n'), ((255, 317), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float_kind': '{:.8e}'.format}"}), "(formatter={'float_kind': '{:.8e}'.format})\n", (274, 317), True, 'import numpy as np\n'), ((2815, 2829), 'openmc.examples.pwr_pin_cell', 'pwr_pin_cell', ([], {}), '()\n', (2827, 2829), False, 'from openmc.examples import pwr_pin_cell\n'), ((586, 646), 'openmc.mgxs.EnergyGroups', 'openmc.mgxs.EnergyGroups', ([], {'group_edges': '[0, 0.625, 20000000.0]'}), '(group_edges=[0, 0.625, 20000000.0])\n', (610, 646), False, 'import openmc\n'), ((731, 772), 'openmc.mgxs.Library', 'openmc.mgxs.Library', (['self._model.geometry'], {}), '(self._model.geometry)\n', (750, 772), False, 'import openmc\n'), ((1530, 1559), 'openmc.StatePoint', 'openmc.StatePoint', (['statepoint'], {}), '(statepoint)\n', (1547, 1559), False, 'import openmc\n'), ((2730, 2747), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (2744, 2747), False, 'import os\n'), ((1813, 1838), 'h5py.File', 'h5py.File', (['"""mgxs.h5"""', '"""r"""'], {}), "('mgxs.h5', 'r')\n", (1822, 1838), False, 'import h5py\n'), ((2467, 2483), 'hashlib.sha512', 'hashlib.sha512', ([], {}), '()\n', (2481, 2483), False, 'import hashlib\n'), ((2695, 2706), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2704, 2706), False, 'import os\n'), ((2761, 2773), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (2770, 2773), False, 'import os\n'), ((1485, 1496), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1494, 1496), False, 'import os\n')] |
from math import sin, pi
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from common_math.math import safe_log10
from example_setups.setup import setup
class rfController:
def __init__(self, Fs, length=2 ** 14, resolution=2 ** 16):
self.Fs = int(Fs)
self.length = length
self.resolution = int((resolution-1) / 2)
self.Ts = 1 / self.Fs
def _tot_time(self):
return self.Ts * self.length
def get_max_mag(self):
return self.resolution
def get_samples(self, stup: list):
y = np.zeros(self.length, np.int32)
for s in stup:
dBFS = s.amplitude
frequency = s.frequency
A = self.resolution * 10 ** (dBFS / 20)
freq = self.get_real_frequency(frequency)
for i in range(0, self.length):
y[i] += int(round(A * sin(2 * pi * freq * i * self.Ts)))
if y[i] > self.get_max_mag():
y[i] = self.get_max_mag()
elif y[i] < - self.get_max_mag():
y[i] = 0 - self.get_max_mag()
return y
def get_real_frequency(self, frequency) -> float:
if frequency == 0:
return 0.0
T = 1 / frequency
Treal = round(self._tot_time() / T) / self._tot_time()
freq = Treal
return freq
def get_xaxis(self):
ret = np.arange(self.length, dtype=float)
ret *= self.Ts
return ret
def plot_spectrum(y: list, p: rfController, fn: Path = None):
fig, axs = plt.subplots(1, 1, constrained_layout=True)
Y = np.fft.fft(y / p.get_max_mag())
# Windowing function
win = np.ones(len(Y), dtype=int)
# Magnitude spectrum with compensation for real spectrum and windowing
s_mag = np.abs(Y) * 2 / np.sum(win)
s_dbfs = 20 * safe_log10(s_mag)
freq = np.fft.fftfreq(len(Y), d=p.Ts)
axs.plot(freq, s_dbfs)
axs.set_title("Spectral Plot")
axs.set_xlabel("Frequency (Hz)")
axs.set_ylabel("Power (dBFS)")
axs.axis([np.min(freq), np.max(freq), -100, 0])
axs.grid(True)
if fn != None:
plt.savefig(fn)
plt.close(fig)
| [
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.close",
"numpy.zeros",
"math.sin",
"numpy.min",
"numpy.max",
"numpy.arange",
"common_math.math.safe_log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((1564, 1607), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (1576, 1607), True, 'import matplotlib.pyplot as plt\n'), ((2158, 2172), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2167, 2172), True, 'import matplotlib.pyplot as plt\n'), ((578, 609), 'numpy.zeros', 'np.zeros', (['self.length', 'np.int32'], {}), '(self.length, np.int32)\n', (586, 609), True, 'import numpy as np\n'), ((1407, 1442), 'numpy.arange', 'np.arange', (['self.length'], {'dtype': 'float'}), '(self.length, dtype=float)\n', (1416, 1442), True, 'import numpy as np\n'), ((1814, 1825), 'numpy.sum', 'np.sum', (['win'], {}), '(win)\n', (1820, 1825), True, 'import numpy as np\n'), ((1844, 1861), 'common_math.math.safe_log10', 'safe_log10', (['s_mag'], {}), '(s_mag)\n', (1854, 1861), False, 'from common_math.math import safe_log10\n'), ((2138, 2153), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn'], {}), '(fn)\n', (2149, 2153), True, 'import matplotlib.pyplot as plt\n'), ((1798, 1807), 'numpy.abs', 'np.abs', (['Y'], {}), '(Y)\n', (1804, 1807), True, 'import numpy as np\n'), ((2053, 2065), 'numpy.min', 'np.min', (['freq'], {}), '(freq)\n', (2059, 2065), True, 'import numpy as np\n'), ((2067, 2079), 'numpy.max', 'np.max', (['freq'], {}), '(freq)\n', (2073, 2079), True, 'import numpy as np\n'), ((888, 920), 'math.sin', 'sin', (['(2 * pi * freq * i * self.Ts)'], {}), '(2 * pi * freq * i * self.Ts)\n', (891, 920), False, 'from math import sin, pi\n')] |
"""Functions for building the training loop"""
import numpy as np
import pandas as pd
import torch
from .embeddings import DynamicBernoulliEmbeddingModel
from .preprocessing import Data
def train_model(
dataset,
dictionary,
validation=None,
notebook=True,
m=300,
num_epochs=10,
lr=2e-3,
validate_after=100,
**kwargs,
):
""""Trains the model
Parameters
----------
dataset : `pd.DataFrame`
dictionary : dict
Maps a word to an index. If a word in the dataset is not present in this
dictionary, it will be removed/ignored.
validation : float
If None, no held out validation set. Otherwise, this is the proportion of the
dataset to use as a validation set.
notebook : bool
Indicates whether the function is being run in a notebook to allow for nicer
progress bars.
m : int
The number of mini batches to use.
num_epochs : int
Number of epochs to train for, excluding the first initialization epoch.
lr : float
Learning rate.
validate_after : int
Compute the validation metric after this many minibatches.
**kwargs
Forwarded to init of `DynamicBernoulliEmbeddingModel`.
"""
# Use nicer tqdm progress bar if in a notebook.
if notebook:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
# Check for gpu.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Create a validation set.
validation_mask = np.repeat(False, dataset.shape[0])
if validation is not None:
assert 0 < validation < 1
validation_mask = np.random.random(dataset.shape[0]) < validation
data = Data(dataset[~validation_mask], dictionary, device)
data_val = Data(dataset[validation_mask], dictionary, device)
# Build model.
model = DynamicBernoulliEmbeddingModel(
len(data.dictionary),
data.T,
data.m_t,
dictionary,
data.unigram_logits,
**kwargs,
)
model = model.to(device)
# Training loop.
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
loss_history = []
for i in range(num_epochs + 1):
# Initialize weights from the epoch 0 "burn in" period and reset the optimizer.
if i == 1:
with torch.no_grad():
model.rho.weight = torch.nn.Parameter(
model.rho.weight[: model.V].repeat((model.T, 1))
)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
pbar = tqdm(enumerate(data.epoch(m)), total=m)
pbar.set_description(f"Epoch {i}")
for j, (targets, contexts, times) in pbar:
model.train()
model.zero_grad()
# The first epoch ignores time for initializing weights.
if i == 0:
times = torch.zeros_like(times)
loss, L_pos, L_neg, L_prior = model(targets, times, contexts, dynamic=i > 0)
loss.backward()
optimizer.step()
# Validation.
L_pos_val = None
if validation is not None and i > 0 and j % validate_after == 0:
L_pos_val = 0
model.eval()
for val_targets, val_contexts, val_times in data_val.epoch(10):
_, L_pos_val_batch, _, _ = model(
val_targets, val_times, val_contexts, validate=True
)
L_pos_val += L_pos_val_batch.item()
# Collect loss history. Ignore the initialization epoch 0.
if i > 0:
batch_loss = (
loss.item(),
L_pos.item(),
L_neg.item(),
L_prior.item() if L_prior else None,
L_pos_val,
)
loss_history.append(batch_loss)
loss_history = pd.DataFrame(
loss_history, columns=["loss", "l_pos", "l_neg", "l_prior", "l_pos_val"]
)
return model, loss_history
| [
"pandas.DataFrame",
"torch.zeros_like",
"numpy.random.random",
"torch.cuda.is_available",
"torch.no_grad",
"numpy.repeat"
] | [((1546, 1580), 'numpy.repeat', 'np.repeat', (['(False)', 'dataset.shape[0]'], {}), '(False, dataset.shape[0])\n', (1555, 1580), True, 'import numpy as np\n'), ((3949, 4039), 'pandas.DataFrame', 'pd.DataFrame', (['loss_history'], {'columns': "['loss', 'l_pos', 'l_neg', 'l_prior', 'l_pos_val']"}), "(loss_history, columns=['loss', 'l_pos', 'l_neg', 'l_prior',\n 'l_pos_val'])\n", (3961, 4039), True, 'import pandas as pd\n'), ((1454, 1479), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1477, 1479), False, 'import torch\n'), ((1672, 1706), 'numpy.random.random', 'np.random.random', (['dataset.shape[0]'], {}), '(dataset.shape[0])\n', (1688, 1706), True, 'import numpy as np\n'), ((2345, 2360), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2358, 2360), False, 'import torch\n'), ((2899, 2922), 'torch.zeros_like', 'torch.zeros_like', (['times'], {}), '(times)\n', (2915, 2922), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pandas as pd
import numpy as np
from functools import reduce
import pdb
def split(tensor, window=5, step=1, offset=0, keep_tail=True):
"""
:param tensor: numpy data
:param window: int, size of window default=5
:param step: int, size between two windows default=1
:param offset: int, first window offset default=0
:param keep_tail: Boolean , {True : save tail of data,; False : possible not save tail of data} default True
:return: list within numpy data
Examples::
>>> data = np.array([1,2,3,4,5,6,7,8,9,10])
>>> # keep_tail is True
>>> split_list = split(data, window=4, step=5, offset=0, keep_tail=True)
>>> split_list # [array([1]), array([2, 3, 4, 5]), array([ 7, 8, 9, 10])]
>>> # keep_tail is False
>>> split_list = split(data, window=4, step=5, offset=0, keep_tail=False)
>>> split_list # [array([1, 2, 3, 4]), array([6, 7, 8, 9]), array([10])]
"""
window, step, offset = int(window), int(step), int(offset)
sample_list = []
index = int((len(tensor) - window - offset) / step) + 1 #total steps
remain = int(len(tensor) - window - offset - (index - 1) * step)
#print('remain : ', remain)
if keep_tail:
start_index = remain+offset#
if remain > 0:
sample_list.append(tensor[offset:offset+remain])
for i in range(index):
window_data = tensor[start_index + i * step : start_index + window + i * step]
sample_list.append(window_data)
else:
start_index = offset
for i in range(index):
window_data = tensor[start_index + i * step : start_index + window + i * step]
sample_list.append(window_data)
if remain > 0:
sample_list.append(tensor[-remain:])
return sample_list
def split3d(tensor, window=5, step=1, offset=0, keep_tail=True, dim=1):
"""
:param tensor: numpy data
:param window: int, size of window default=5
:param step: int, size between two windows default=1
:param offset: int, first window offset default=0
:param keep_tail: Boolean , {True : save tail of data,; False : possible not save tail of data} default True
:return: list within numpy data
Examples::
>>> data = np.array([1,2,3,4,5,6,7,8,9,10])
>>> # keep_tail is True
>>> split_list = split(data, window=4, step=5, offset=0, keep_tail=True)
>>> split_list # [array([1]), array([2, 3, 4, 5]), array([ 7, 8, 9, 10])]
>>> # keep_tail is False
>>> split_list = split(data, window=4, step=5, offset=0, keep_tail=False)
>>> split_list # [array([1, 2, 3, 4]), array([6, 7, 8, 9]), array([10])]
"""
window, step, offset = int(window), int(step), int(offset)
sample_list = []
lenght = tensor.shape[dim]
index = int((lenght - window - offset) / step) + 1 #total steps
remain = int(lenght - window - offset - (index - 1) * step)
#print('remain : ', remain)
if keep_tail:
start_index = remain+offset#
if remain > 0:
sample_list.append(tensor[:,offset:offset+remain])
for i in range(index):
window_data = tensor[:,start_index + i * step : start_index + window + i * step]
sample_list.append(window_data)
else:
start_index = offset
for i in range(index):
window_data = tensor[:,start_index + i * step : start_index + window + i * step]
sample_list.append(window_data)
if remain > 0:
sample_list.append(tensor[:,-remain:])
return sample_list
def split_sample(tensor, window=5, step=1, offset=0, keep_tail=True, merge_remain=False):
"""
:param tensor: numpy data
:param window: int, size of window default=5
:param step: int, size between two windows default=1
:param offset: int, first window offset default=0
:param keep_tail: Boolean , {True : save tail of data,; False : possible not save tail of data} default True
:param merge_remain: Boolean , {True: and if keep_tail is True, the first sample include remain sample,
elif keep_tail is Flase, the last sample include remain sample.
Flase: the sample decide by value of keep_tail
}
:return: list within numpy data
Examples::
>>> # use to split data set
>>> import numpy as np
>>> data = np.array(range(1, 11))
>>> window_train = 5
>>> window_test = 3
>>> # keep_tail=False, merge_remain=False
>>> train_data = split_sample(data, window=window_train, step=window_test, offset=0, keep_tail=False, merge_remain=False)
>>> train_data
[array([1, 2, 3, 4, 5]), array([4, 5, 6, 7, 8])]
>>> test_data = split_sample(data, window=window_test, step=window_test, offset=window_train, keep_tail=False, merge_remain=True)
[array([ 6, 7, 8, 9, 10])]
>>> # use to split sample
>>> data = np.array(range(30)).reshape(6, 5)
>>> # keep_tail=True, merge_remain=False
>>> sample1 = split_sample(data, window=3, step=2, offset=0, keep_tail=True, merge_remain=False)
>>> sample1
[array([[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]]),
array([[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])]
>>> # keep_tail=False, merge_remain=False
>>> sample2 = split_sample(data, window=3, step=2, offset=0, keep_tail=False, merge_remain=False)
>>> sample2
[array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]]),
array([[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])]
"""
index = int((len(tensor) - window - offset) / step) + 1
remain = len(tensor) - window - offset - (index - 1) * step
sample_list = split(tensor, window=window, step=step, offset=offset, keep_tail=keep_tail)
if remain:
if keep_tail:
idx = 1
else:
idx = -1
if not merge_remain:
return sample_list[idx:] if idx==1 else sample_list[:idx]
else:
if isinstance(tensor, torch.Tensor):
cat_func = torch.cat
else:
cat_func = np.concateneate
sample_list[idx-1] = cat_func([sample_list[idx-1], sample_list[idx]])
del sample_list[idx]
return sample_list
else:
return sample_list
def split_sample3d(tensor, window=5, step=1, offset=0, keep_tail=True, merge_remain=False, dim=1):
"""
:param tensor: numpy data
:param window: int, size of window default=5
:param step: int, size between two windows default=1
:param offset: int, first window offset default=0
:param keep_tail: Boolean , {True : save tail of data,; False : possible not save tail of data} default True
:param merge_remain: Boolean , {True: and if keep_tail is True, the first sample include remain sample,
elif keep_tail is Flase, the last sample include remain sample.
Flase: the sample decide by value of keep_tail
}
:return: list within numpy data
Examples::
>>> # use to split data set
>>> import numpy as np
>>> data = np.array(range(1, 11))
>>> window_train = 5
>>> window_test = 3
>>> # keep_tail=False, merge_remain=False
>>> train_data = split_sample(data, window=window_train, step=window_test, offset=0, keep_tail=False, merge_remain=False)
>>> train_data
[array([1, 2, 3, 4, 5]), array([4, 5, 6, 7, 8])]
>>> test_data = split_sample(data, window=window_test, step=window_test, offset=window_train, keep_tail=False, merge_remain=True)
[array([ 6, 7, 8, 9, 10])]
>>> # use to split sample
>>> data = np.array(range(30)).reshape(6, 5)
>>> # keep_tail=True, merge_remain=False
>>> sample1 = split_sample(data, window=3, step=2, offset=0, keep_tail=True, merge_remain=False)
>>> sample1
[array([[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]]),
array([[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])]
>>> # keep_tail=False, merge_remain=False
>>> sample2 = split_sample(data, window=3, step=2, offset=0, keep_tail=False, merge_remain=False)
>>> sample2
[array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]]),
array([[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])]
"""
lenght = tensor.shape[dim]
index = int((lenght - window - offset) / step) + 1
remain = lenght - window - offset - (index - 1) * step
sample_list = split3d(tensor, window=window, step=step, offset=offset, keep_tail=keep_tail)
if remain:
if keep_tail:
idx = 1
else:
idx = -1
if not merge_remain:
return sample_list[idx:] if idx==1 else sample_list[:idx]
else:
#pdb.set_trace()
if isinstance(tensor, torch.Tensor):
cat_func = torch.cat
else:
cat_func = np.concatenate
sample_list[idx-1] = cat_func([sample_list[idx-1], sample_list[idx]],dim)
del sample_list[idx]
return sample_list
else:
return sample_list
if __name__ == '__main__':
np.random.seed(520)
data2d_np = np.random.randn(10,3)
data2d_ts = torch.tensor(data2d_np, dtype=torch.float32)
data_list_by_split_np = split(data2d_np)
data_list_by_split_ts = split(data2d_ts)
data3d_np = np.random.randint(1,5,(2,10,3))
data3d_ts = torch.tensor(data3d_np, dtype=torch.int32)
data_list_by_split3d_np = split3d(data3d_np)
data_list_by_split3d_ts = split3d(data3d_ts)
#
data_sample_list_np = split_sample(data2d_np, window=3)
data_sample_list_ts = split_sample(data2d_ts, window=3)
data_sample3d_list_np = split_sample3d(data3d_np, window=3, step=2, merge_remain=False)
data_sample3d_list_ts = split_sample3d(data3d_ts, window=3, step=2, merge_remain=False)
data_sample3d_list_np2 = split_sample3d(data3d_np, window=3, step=2, merge_remain=True)
data_sample3d_list_ts2 = split_sample3d(data3d_ts, window=3, step=2, merge_remain=True)
| [
"numpy.random.randint",
"numpy.random.seed",
"torch.tensor",
"numpy.random.randn"
] | [((10653, 10672), 'numpy.random.seed', 'np.random.seed', (['(520)'], {}), '(520)\n', (10667, 10672), True, 'import numpy as np\n'), ((10689, 10711), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (10704, 10711), True, 'import numpy as np\n'), ((10727, 10771), 'torch.tensor', 'torch.tensor', (['data2d_np'], {'dtype': 'torch.float32'}), '(data2d_np, dtype=torch.float32)\n', (10739, 10771), False, 'import torch\n'), ((10888, 10923), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '(2, 10, 3)'], {}), '(1, 5, (2, 10, 3))\n', (10905, 10923), True, 'import numpy as np\n'), ((10936, 10978), 'torch.tensor', 'torch.tensor', (['data3d_np'], {'dtype': 'torch.int32'}), '(data3d_np, dtype=torch.int32)\n', (10948, 10978), False, 'import torch\n')] |
import os
import math
import numpy as np
import matplotlib as matplot
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import csv
from wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,
cartopy_ylim, latlon_coords)
# List the colors that will be used for tracing the track.
colors = ['blue', 'orange', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'black', 'green', 'gold', 'lightcoral', 'turquoise']
c =0
mainpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/'
Hurricaneall = ['Gert','Nicole','Joaquin','Cristobal','Ike']
Real_Hurricane_Data = ['Gert_Real_Track_Time_NOAA.csv',
'Nicole_Real_Track_Time_NOAA.csv',
'Joaquin_Real_Track_Time_NOAA.csv',
'Cristobal_Real_Track_Time_NOAA.csv',
'Ike_Real_Track_Time_NOAA.csv']
# Hurricaneall = ['Dorian']
# Real_Hurricane_Data = ['Dorian_Real_Track_Time_NOAA.csv']
gridsize = ['8km','16km']
swansize = ['swgr8p0', 'swgr16p0']
prefix = 'WRFSWAN_NoTurb_swdt10_cpdt7200_'
Dirall = ['_swh8_swt14_Clz0p0001',
'_swh8_swt14_Clz0p01',
'_swh8_swt14_A1200B4p5C0P11',
'_swh8_swt14_Clz100p00']
outputpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/postprocessing_WRFONLY/0_Paper_figures/section3_change_pars_for_weak_winds/source_code_outputs_change_Clz/'
# This function returns a list of all wrf files in the directory.
def list_files(Dir, ncfiles):
for f in os.listdir(Dir):
if f.startswith('wrfout'):
ncfiles.append(f)
return (ncfiles)
for gk in range(len(gridsize)):
count1=0
for Hurricane in Hurricaneall:
rows=[]
for Dir in Dirall:
print('Current folder is: ')
Dir_local = mainpath+Hurricane+ '/' +gridsize[gk]+ '/' +prefix+swansize[gk]+Dir
print(Dir_local)
#row.append(Hurricane+Dir)
# Set the working space>
os.chdir(Dir_local)
# initiate the list that will contain all wrf files in Dir directory.
ncfiles = []
# Use the list_files function to list all the wrf files in the directory.
ncfiles = list_files(Dir_local, ncfiles)
ncfiles = sorted(ncfiles)
print (ncfiles)
# initiate the list that will contain the hurricane-track data.
row = []
# Identify the time step
Time_Step = 6
k = 0
# initiate the list that will contain the times.
Times = []
for tt in range(1):
for ncfile in ncfiles:
ncfile = Dataset(ncfile)
ttt = np.array(getvar(ncfile, "times", tt))
print('!!!!!!',ttt)
ZNT_2D = np.array(getvar(ncfile, "ZNT", tt))
U10_2D = np.array(getvar(ncfile, "U10", tt))
V10_2D = np.array(getvar(ncfile, "V10", tt))
UV10_2D = np.square(U10_2D)+np.square(V10_2D)
idx = np.where(UV10_2D == np.amax(UV10_2D))
# List the maximum wind intensity for all time steps.
print(idx)
row.append(float(ZNT_2D[(np.amin(idx[0]),np.amin(idx[1]))]))
# list all the time steps
Times.append(Time_Step*k)
k = k+1
print (row)
print (Times)
rows.append(row)
fields = [time for time in Times]
print (fields)
print (rows)
with open(outputpath+Hurricane+'_ZNT_eyewall_'+gridsize[gk]+'.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(rows)
count1=count1+1
| [
"netCDF4.Dataset",
"csv.writer",
"numpy.amin",
"numpy.square",
"numpy.amax",
"wrf.getvar",
"os.chdir",
"os.listdir"
] | [((1494, 1509), 'os.listdir', 'os.listdir', (['Dir'], {}), '(Dir)\n', (1504, 1509), False, 'import os\n'), ((1980, 1999), 'os.chdir', 'os.chdir', (['Dir_local'], {}), '(Dir_local)\n', (1988, 1999), False, 'import os\n'), ((3711, 3730), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3721, 3730), False, 'import csv\n'), ((2674, 2689), 'netCDF4.Dataset', 'Dataset', (['ncfile'], {}), '(ncfile)\n', (2681, 2689), False, 'from netCDF4 import Dataset\n'), ((2725, 2752), 'wrf.getvar', 'getvar', (['ncfile', '"""times"""', 'tt'], {}), "(ncfile, 'times', tt)\n", (2731, 2752), False, 'from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords\n'), ((2832, 2857), 'wrf.getvar', 'getvar', (['ncfile', '"""ZNT"""', 'tt'], {}), "(ncfile, 'ZNT', tt)\n", (2838, 2857), False, 'from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords\n'), ((2897, 2922), 'wrf.getvar', 'getvar', (['ncfile', '"""U10"""', 'tt'], {}), "(ncfile, 'U10', tt)\n", (2903, 2922), False, 'from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords\n'), ((2962, 2987), 'wrf.getvar', 'getvar', (['ncfile', '"""V10"""', 'tt'], {}), "(ncfile, 'V10', tt)\n", (2968, 2987), False, 'from wrf import to_np, getvar, smooth2d, get_cartopy, cartopy_xlim, cartopy_ylim, latlon_coords\n'), ((3019, 3036), 'numpy.square', 'np.square', (['U10_2D'], {}), '(U10_2D)\n', (3028, 3036), True, 'import numpy as np\n'), ((3037, 3054), 'numpy.square', 'np.square', (['V10_2D'], {}), '(V10_2D)\n', (3046, 3054), True, 'import numpy as np\n'), ((3101, 3117), 'numpy.amax', 'np.amax', (['UV10_2D'], {}), '(UV10_2D)\n', (3108, 3117), True, 'import numpy as np\n'), ((3271, 3286), 'numpy.amin', 'np.amin', (['idx[0]'], {}), '(idx[0])\n', (3278, 3286), True, 'import numpy as np\n'), ((3287, 3302), 'numpy.amin', 'np.amin', (['idx[1]'], {}), '(idx[1])\n', (3294, 3302), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import array, arange, amin, amax, histogram
from numpy import column_stack, median, mean, sum
from plotting import Plotter
from datetime import timedelta
from filter_provider import DatasetFilter, AcceptanceTester
class DatasetContainer:
"""Contains one single dataset. Holds a list of Datapoint instances
describing the actual data. Depending on the name the dataset was
initialized with, processing is performed to reject invalid data when
appending new points."""
def __init__(self, dataset_type, time_resolution=timedelta(minutes=1),
filter_provider=DatasetFilter, plotter=Plotter):
"""Initialize the dataset with the dataset_type. Type selects
processing for valid data when appending points.
Parameters
----------
dataset_type : string
The dataset type.
time_resolution : datetime.timedelta
The time resolution of the dataset.
(Default: timedelta(minutes=1))
filter_provider : class
A class providing data filters to the container. Must be
callable to apply the filters, and expose an add_filter and
count method.
plotter : class
A class providing plotting functionality. Must expose a plot
method.
Returns
-------
None
"""
self._type = dataset_type
self._raw_datapoints = []
self._index = 0
self._time_resolution = time_resolution
self._accept = AcceptanceTester(self._type)
if hasattr(filter_provider, 'add_filter') \
and hasattr(filter_provider, 'count') \
and callable(filter_provider.add_filter) \
and callable(filter_provider.count) \
and callable(filter_provider):
self._filters = filter_provider()
else:
raise ValueError('Got an invalid filter provider')
if hasattr(plotter, 'plot') and callable(plotter.plot):
self._plotter = plotter
else:
raise ValueError('Got an invalid plotter')
self._data_up_to_date = True
self._update_filtered_data()
def add_filter(self, filter_type, **kwargs):
"""Add a new filter to the filter provider. filter_type selects the
filter that will be applied, any other parameter must be named and will
be passed to the actual filter function. When adding a filter, the
cached DatasetContainer._filtered_data is updated.
Parameters
----------
filter_type : string
The filter type to add.
kwargs : dict
Any other named parameters will be stored in the kwargs dict
and passed to the filter function when it gets called.
Returns
-------
None
"""
self._filters.add_filter(filter_type, **kwargs)
self._data_up_to_date = False
def append(self, timestamp, value):
"""Append a Datapoint(timestamp, value) to the dataset. Depending on the
type, checks for validity are performed, and if invalid, the data point
may be rejected.
Parameters
----------
timestamp : datetime
The timestamp of the Datapoint
value : int, float
The value to store
Returns
-------
None
"""
dp = Datapoint(timestamp, value)
if self._accept(dp):
self._raw_datapoints.append(dp)
self._data_up_to_date = False
def __getitem__(self, item):
"""Return list of timestamps when called with 'timestamps' or 0, and
list of values when called with 'values' or 1. If any other value is
passed, an IndexError is raised.
Parameters
----------
item : string or int
The item name or index of the item to retrieve.
Returns
-------
numpy.array
Depending on the selected item, an array containing the
timestamps or values stored in the container are returned.
"""
if not self._data_up_to_date:
self._update_filtered_data()
self._data_up_to_date = True
if item == 'timestamps' or item == 0:
return self._filtered_data['timestamps']
elif item == 'values' or item == 1:
return self._filtered_data['values']
else:
raise IndexError('Invalid index')
def _update_filtered_data(self):
"""Update the filtered data cache from the raw datapoints.
Parameters
----------
None
Returns
-------
None
"""
timestamps, values = [], []
for point in self._raw_datapoints:
timestamps.append(point.timestamp)
values.append(point.value)
timestamps, values = self._filters(array(timestamps),
array(values))
self._filtered_data = {'timestamps': timestamps, 'values': values}
def __iter__(self):
"""Return self as iterator.
Parameters
----------
None
Returns
-------
self : DatasetContainer
"""
return self
def next(self):
"""Iterate to the next Datapoint in the list.
Parameters
----------
None
Returns
-------
Datapoint
The next Datapoint in the container
"""
if self._index < len(self._raw_datapoints):
self._index += 1
return self._raw_datapoints[self._index - 1]
else:
self._index = 0
raise StopIteration
def time_resolution(self, value = None):
"""Manage the datasets time resolution. If called without a value,
return the current time resolution. If a value is passed, it is set as
the new time resolution. The value must be >= 1 min, or an error will
be raised.
Parameters
----------
value : datetime.timedelta, None
The new time resolution to set, or None to return the current
time resolution only.
Returns
-------
_time_resolution : datetime.timedelta
The time resolution of the dataset after the function finishes
"""
if not value is None:
if value < timedelta(minutes=1):
raise ValueError('Time resolution cannot be lower than 1 min.')
else:
self._time_resolution = value
return self._time_resolution
else:
return self._time_resolution
def timestamp_start(self):
"""Return first (chronological) timestamp for the dataset.
Parameters
----------
None
Returns
-------
datetime.datetime
The earliest timestamp stored in the dataset
"""
return min(self['timestamps'])
def timestamp_end(self):
"""Return last (chronological) timestamp for the dataset.
Parameters
----------
None
Returns
-------
datetime.datetime
The latest timestamp stored in the dataset
"""
return max(self['timestamps'])
def timerange(self):
"""Return the timerange [start, end] of the dataset as a list.
Parameters
----------
None
Returns
-------
list
A list containing the earliest and the latest timestamp in the
dataset.
"""
return [self.timestamp_start(), self.timestamp_end()]
def _downsample_data(self, func):
"""Arbitrary downsample function. Pass a callable that performs the actual
downsampling. func should accept an array of values and return a single
number.
Parameters
----------
func : callable
The downsample function to apply. func should accept numpy.array
and return a single float or int.
Returns
-------
class
A class that provides plotting of the data set.
"""
cur_time = self.timestamp_start()
res_timestamps = []
res_values = []
while(cur_time <= self.timestamp_end()):
sliced_data = self._timeslice_data(cur_time, cur_time +
self.time_resolution())
val = func(sliced_data['values'])
res_timestamps.append(cur_time)
res_values.append(val)
cur_time += self.time_resolution()
return self._plotter(self._type, timestamps=array(res_timestamps),
values=array(res_values))
def downsample_mean(self):
"""Downsample data using the Numpy mean function.
Parameters
----------
None
Returns
-------
class
A class that provides plotting of the data set.
"""
return self._downsample_data(mean)
def downsample_median(self):
"""Downsample data using the Numpy median function.
Parameters
----------
None
Returns
-------
class
A class that provides plotting of the data set.
"""
return self._downsample_data(median)
def downsample_sum(self):
"""Downsample data using the Numpy sum function.
Parameters
----------
None
Returns
-------
class
A class that provides plotting of the data set.
"""
return self._downsample_data(sum)
def downsample_none(self):
"""Don't downsample, just return full-resolution data as saved in the
dataset.
Parameters
----------
None
Returns
-------
class
A class that provides plotting of the data set.
"""
res_timestamps = self['timestamps']
res_values = self['values']
return self._plotter(self._type, timestamps=array(res_timestamps),
values=array(res_values))
def downsample_histogram(self, hist_min=None, hist_max=None,
resolution=5):
"""Downsample the data into a 2D histogram of values, where the time
resolution of the histogram is that of the dataset. I.e., each histogram
timestemp will contain a 1D histogram of values occuring in that
timestep in the dataset.
Parameters
----------
hist_min : float, None
The minimal value of the histogram. If None is passed, it is
computed dynamically.
(Default: None)
hist_max : float, None
The maximum value of the histogram. If None is passed, it is
computed dynamically.
(Default: None)
resolution : float
The bin width of the histogram.
(Default: 5)
Returns
-------
class
A class that provides plotting of the data set.
"""
if hist_min is None:
#Take the minimum, round to nearest 10
hist_min = int(amin(self['values'])/10)*10
if hist_max is None:
#Take the maximum, round to nearest 10
hist_max = int(amax(self['values'])/10)*10
bins = arange(hist_min, hist_max, resolution)
cur_time = self.timestamp_start()
res_timestamps = []
res_histogram = []
while(cur_time <= self.timestamp_end()):
sliced_data = self._timeslice_data(cur_time, cur_time +
self.time_resolution())
hist = histogram(sliced_data['values'], bins, density=True)[0]
res_timestamps.append(cur_time)
#Scale the maximum of each histogram row to 1:
res_histogram.append(hist/amax(hist))
cur_time += self.time_resolution()
res_timestamps.append(self.timestamp_end())
return self._plotter(self._type, timestamps=array(res_timestamps),
bins=bins, histogram=array(res_histogram))
def _timeslice_data(self, timestamp_start, timestamp_end):
"""Helper function to perform the actual time slicing common to
downsampling. Returns a DatasetContainer with the data for which
timestamp_start <= timestamp < timestamp_end.
Parameters
----------
timestamp_start : datetime.datetime
The earliest timestamp to include
timestamp_end : datetime.datetime
The first timestamp to exclude
Returns
-------
res : DatasetContainer
A container with the data between the start and end values.
"""
timestamps = array(self['timestamps'])
values = array(self['values'])
mask = (timestamps >= timestamp_start)*(timestamps < timestamp_end)
res = DatasetContainer(self._type)
res.time_resolution(value=self.time_resolution())
for timestamp, value in column_stack((timestamps[mask], values[mask])):
res.append(timestamp, value)
return res
class Datapoint:
"""Container for a single data point. Holds Datapoint.timestamp and
Datapoint.value. Is iterable to allow for timestamp, value = Datapoint
assignments."""
def __init__(self, timestamp, value):
"""Initialize the Datapoint. Pass a timestamp and a value to hold.
Parameters
----------
timestamp : datetime.datetime
The timestamp of the data point
value : float
The value of the data point
"""
self.timestamp = timestamp
self.value = value
self._index = 0
def __iter__(self):
"""Return self as iterator.
Parameters
----------
None
Returns
-------
self : Datapoint
This instance of Datapoint
"""
return self
def next(self):
"""Iterate over the values. First iteration yields timestamp, second
iteration yields value.
Parameters
----------
None
Returns
-------
datetime.datetime, float
Returns the timestamp on the first iteration, the value on the
second one.
"""
if self._index == 0:
self._index += 1
return self.timestamp
elif self._index == 1:
self._index += 1
return self.value
else:
self._index = 0
raise StopIteration
| [
"filter_provider.AcceptanceTester",
"numpy.amin",
"numpy.amax",
"numpy.histogram",
"numpy.array",
"datetime.timedelta",
"numpy.arange",
"numpy.column_stack"
] | [((602, 622), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (611, 622), False, 'from datetime import timedelta\n'), ((1662, 1690), 'filter_provider.AcceptanceTester', 'AcceptanceTester', (['self._type'], {}), '(self._type)\n', (1678, 1690), False, 'from filter_provider import DatasetFilter, AcceptanceTester\n'), ((12147, 12185), 'numpy.arange', 'arange', (['hist_min', 'hist_max', 'resolution'], {}), '(hist_min, hist_max, resolution)\n', (12153, 12185), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((13635, 13660), 'numpy.array', 'array', (["self['timestamps']"], {}), "(self['timestamps'])\n", (13640, 13660), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((13679, 13700), 'numpy.array', 'array', (["self['values']"], {}), "(self['values'])\n", (13684, 13700), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((13910, 13956), 'numpy.column_stack', 'column_stack', (['(timestamps[mask], values[mask])'], {}), '((timestamps[mask], values[mask]))\n', (13922, 13956), False, 'from numpy import column_stack, median, mean, sum\n'), ((5182, 5199), 'numpy.array', 'array', (['timestamps'], {}), '(timestamps)\n', (5187, 5199), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((5246, 5259), 'numpy.array', 'array', (['values'], {}), '(values)\n', (5251, 5259), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((6803, 6823), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (6812, 6823), False, 'from datetime import timedelta\n'), ((9233, 9254), 'numpy.array', 'array', (['res_timestamps'], {}), '(res_timestamps)\n', (9238, 9254), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((9293, 9310), 'numpy.array', 'array', (['res_values'], {}), '(res_values)\n', (9298, 9310), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((10755, 10776), 'numpy.array', 'array', (['res_timestamps'], {}), '(res_timestamps)\n', (10760, 10776), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((10815, 10832), 'numpy.array', 'array', (['res_values'], {}), '(res_values)\n', (10820, 10832), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((12501, 12553), 'numpy.histogram', 'histogram', (["sliced_data['values']", 'bins'], {'density': '(True)'}), "(sliced_data['values'], bins, density=True)\n", (12510, 12553), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((12861, 12882), 'numpy.array', 'array', (['res_timestamps'], {}), '(res_timestamps)\n', (12866, 12882), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((12935, 12955), 'numpy.array', 'array', (['res_histogram'], {}), '(res_histogram)\n', (12940, 12955), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((12698, 12708), 'numpy.amax', 'amax', (['hist'], {}), '(hist)\n', (12702, 12708), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((11969, 11989), 'numpy.amin', 'amin', (["self['values']"], {}), "(self['values'])\n", (11973, 11989), False, 'from numpy import array, arange, amin, amax, histogram\n'), ((12104, 12124), 'numpy.amax', 'amax', (["self['values']"], {}), "(self['values'])\n", (12108, 12124), False, 'from numpy import array, arange, amin, amax, histogram\n')] |
import pathlib
import warnings
import functools
import numpy as np
import xarray as xr
try:
from fastprogress.fastprogress import progress_bar
fastprogress = 1
except ImportError:
fastprogress = None
import shutil
def temp_write_split(
ds_in,
folder,
method="dimension",
dim="time",
split_interval=40,
zarr_write_kwargs={},
zarr_read_kwargs={},
file_name_pattern="temp_write_split",
verbose=False,
):
"""[summary]
Parameters
----------
ds_in : xr.Dataset
input
folder : pathlib.Path
Target folder for temporary files
method : str, optional
Defines if the temporary files are split by an increment along a certain
dimension("dimension") or by the variables of the dataset ("variables"),
by default "dimension"
dim : str, optional
Dimension to split along (only relevant for `method="dimension"`), by default "time"
split_interval : int, optional
Steps along `dim` for each temporary file (only relevant for `method="dimension"`), by default 40
zarr_write_kwargs : dict, optional
Kwargs parsed to `xr.to_zarr()`, by default {}
zarr_read_kwargs : dict, optional
Kwargs parsed to `xr.open_zarr()`, by default {}
file_name_pattern : str, optional
Pattern used to name the temporary files, by default "temp_write_split"
verbose : bool, optional
Activates printing, by default False
Returns
-------
ds_out : xr.Dataset
reloaded dataset, with value identical to `ds_in`
flist : list
List of paths to temporary datasets written.
"""
zarr_write_kwargs.setdefault("consolidated", False)
zarr_read_kwargs.setdefault("use_cftime", True)
zarr_read_kwargs.setdefault("consolidated", False)
flist = []
if method == "dimension":
split_points = list(range(0, len(ds_in[dim]), split_interval)) + [None]
if verbose:
print(f" Split indicies: {split_points}")
nsi = len(split_points) - 1
if fastprogress:
progress = progress_bar(range(nsi))
else:
progress = range(nsi)
for si in progress:
fname = folder.joinpath(f"{file_name_pattern}_{si}.zarr")
if fname.exists():
shutil.rmtree(fname)
ds_in.isel({dim: slice(split_points[si], split_points[si + 1])}).to_zarr(
fname, **zarr_write_kwargs
)
flist.append(fname)
ds_out = xr.concat(
[xr.open_zarr(f, **zarr_read_kwargs) for f in flist], dim=dim
)
elif method == "variables":
# move all coords to data variables to avoid doubling up the writing for expensive (time resolved) coords
reset_coords = [co for co in ds_in.coords if co not in ds_in.dims]
ds_in = ds_in.reset_coords(reset_coords)
variables = list(ds_in.data_vars)
if verbose:
print(variables)
for var in variables:
fname = folder.joinpath(f"{file_name_pattern}_{var}.zarr")
if fname.exists():
shutil.rmtree(
fname
) # can I just overwrite with zarr? This can take long!
ds_in[var].to_dataset(name=var).to_zarr(fname, **zarr_write_kwargs)
flist.append(fname)
ds_out = xr.merge([xr.open_zarr(f, **zarr_read_kwargs) for f in flist])
ds_out = ds_out.set_coords(reset_coords)
else:
raise ValueError(f"Method '{method}' not recognized.")
return ds_out, flist
def maybe_create_folder(path):
p = pathlib.Path(path)
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
else:
warnings.warn(f"Folder {path} does already exist.", UserWarning)
return p
def total_nested_size(nested):
"""Calculate the size of a nested dict full of xarray objects
Parameters
----------
nested : dict
Input dictionary. Can have arbitrary nesting levels
Returns
-------
float
total size in bytes
"""
size = []
def _size(obj):
if not (isinstance(obj, xr.Dataset) or isinstance(obj, xr.DataArray)):
return {k: _size(v) for k, v in obj.items()}
else:
size.append(obj.nbytes)
_size(nested)
return np.sum(np.array(size))
def _maybe_pathlib(path):
if not isinstance(path, pathlib.PosixPath):
path = pathlib.Path(path)
return path
def _file_iszarr(path):
if ".nc" in str(path):
zarr = False
elif ".zarr" in str(path):
zarr = True
return zarr
def file_exist_check(filepath, check_zarr_consolidated_complete=True):
"""Check if a file exists, with some extra checks for zarr files
Parameters
----------
filepath : path
path to the file to check
check_zarr_consolidated_complete : bool, optional
Check if .zmetadata file was written (consolidated metadata), by default True
"""
filepath = _maybe_pathlib(filepath)
zarr = _file_iszarr(filepath)
basic_check = filepath.exists()
if zarr and check_zarr_consolidated_complete:
check = filepath.joinpath(".zmetadata").exists()
else:
check = True
return check and basic_check
def checkpath(func):
@functools.wraps(func)
def wrapper_checkpath(*args, **kwargs):
ds = args[0]
path = _maybe_pathlib(args[1])
# Do something before
overwrite = kwargs.pop("overwrite", False)
check_zarr_consolidated_complete = kwargs.pop(
"check_zarr_consolidated_complete", False
)
reload_saved = kwargs.pop("reload_saved", True)
write_kwargs = kwargs.pop("write_kwargs", {})
load_kwargs = kwargs.pop("load_kwargs", {})
load_kwargs.setdefault("use_cftime", True)
load_kwargs.setdefault("consolidated", True)
write_kwargs.setdefault("consolidated", load_kwargs["consolidated"])
zarr = _file_iszarr(path)
check = file_exist_check(
path, check_zarr_consolidated_complete=check_zarr_consolidated_complete
)
# check for the consolidated stuff... or just rewrite it?
if check and not overwrite:
print(f"File [{str(path)}] already exists. Skipping.")
else:
# the file might still exist (inclomplete) and then needs to be removed.
if path.exists():
print(f"Removing file {str(path)}")
if zarr:
shutil.rmtree(path)
else:
path.unlink()
func(ds, path, **write_kwargs)
# Do something after
ds_out = ds
if reload_saved:
print(f"$ Reloading file")
consolidated = load_kwargs.pop("consolidated")
if not zarr:
ds_out = xr.open_dataset(str(path), **load_kwargs)
else:
ds_out = xr.open_zarr(
str(path), consolidated=consolidated, **load_kwargs
)
return ds_out
return wrapper_checkpath
@checkpath
def write(
ds,
path,
print_size=True,
consolidated=True,
**kwargs,
):
"""Convenience function to save large datasets.
Performs the following additional steps (compared to e.g. xr.to_netcdf() or xr.to_zarr())
1. Checks for existing files (with special checks for zarr files)
2. Handles existing files via `overwrite` argument.
3. Checks attributes for incompatible values
4. Optional: Prints size of saved dataset
4. Optional: Returns the saved dataset loaded from disk (e.g. for quality control)
Parameters
----------
ds : xr.Dataset
Input dataset
path : pathlib.Path
filepath to save to. Ending determines the output type (`.nc` for netcdf, `.zarr` for zarr)
print_size : bool, optional
If true prints the size of the dataset before saving, by default True
reload_saved : bool, optional
If true the returned datasets is opened from the written file,
otherwise the input is returned, by default True
open_kwargs : dict
Arguments passed to the reloading function (either xr.open_dataset or xr.open_zarr based on filename)
write_kwargs : dict
Arguments passed to the writing function (either xr.to_netcdf or xr.to_zarr based on filename)
overwrite : bool, optional
If True, overwrite existing files, by default False
check_zarr_consolidated_complete: bool, optional
If True check if `.zmetadata` is present in zarr store, and overwrite if not present, by default False
Returns
-------
xr.Dataset
Returns either the unmodified input dataset or a reloaded version from the written file
"""
for k, v in ds.attrs.items():
if isinstance(v, xr.Dataset) or isinstance(v, xr.DataArray):
raise RuntimeError(f"Found an attrs ({k}) in with xarray values:{v}.")
zarr = _file_iszarr(path)
if print_size:
print(f"$ Saving {ds.nbytes/1e9}GB to {path}")
if zarr:
ds.to_zarr(path, consolidated=consolidated, **kwargs)
else:
ds.to_netcdf(path, **kwargs)
| [
"pathlib.Path",
"numpy.array",
"functools.wraps",
"xarray.open_zarr",
"shutil.rmtree",
"warnings.warn"
] | [((3633, 3651), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (3645, 3651), False, 'import pathlib\n'), ((5324, 5345), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (5339, 5345), False, 'import functools\n'), ((3738, 3802), 'warnings.warn', 'warnings.warn', (['f"""Folder {path} does already exist."""', 'UserWarning'], {}), "(f'Folder {path} does already exist.', UserWarning)\n", (3751, 3802), False, 'import warnings\n'), ((4354, 4368), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (4362, 4368), True, 'import numpy as np\n'), ((4461, 4479), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (4473, 4479), False, 'import pathlib\n'), ((2321, 2341), 'shutil.rmtree', 'shutil.rmtree', (['fname'], {}), '(fname)\n', (2334, 2341), False, 'import shutil\n'), ((2558, 2593), 'xarray.open_zarr', 'xr.open_zarr', (['f'], {}), '(f, **zarr_read_kwargs)\n', (2570, 2593), True, 'import xarray as xr\n'), ((3139, 3159), 'shutil.rmtree', 'shutil.rmtree', (['fname'], {}), '(fname)\n', (3152, 3159), False, 'import shutil\n'), ((3392, 3427), 'xarray.open_zarr', 'xr.open_zarr', (['f'], {}), '(f, **zarr_read_kwargs)\n', (3404, 3427), True, 'import xarray as xr\n'), ((6554, 6573), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (6567, 6573), False, 'import shutil\n')] |
import numpy as np
import glob
from PIL import Image
from matplotlib import pyplot as plt
''' to determine accuracy of the predictions '''
''' since only 10 ground truth labels are created '''
target_files = []
prediction_files = []
accuracy_arr = np.zeros((11,4))
i = 0
for fname in glob.glob("/Users/shivani/Documents/approach2_testresults/ground_truth_labels/*.jpg"):
if i <= 10:
target_files.append(fname)
i = i + 1
i = 0
for fname in glob.glob("/Users/shivani/Documents/approach2_testresults/labels/*.jpg"):
if i <= 10:
prediction_files.append(fname)
i = i + 1
for index in range(0,10,1):
target_file = Image.open(target_files[index])
pred_file = Image.open(prediction_files[index])
target_file_arr = np.asarray(target_file)
pred_2D = np.asarray(pred_file)
target_2D = np.zeros((512,512),dtype='uint8')
for i in range(0,target_file_arr.shape[0],1):
for j in range(0, target_file_arr.shape[1], 1):
temp = 0
for k in range(0, target_file_arr.shape[2], 1):
temp = temp + target_file_arr[i][j][k]
target_2D[i][j] = temp / 3
target = target_2D < 254
prediction = pred_2D < 254
TP = 0
TN = 0
FP = 0
FN = 0
for i in range(0,512,1):
for j in range(0,512,1):
if target[i][j] == True and prediction[i][j] == True: TP = TP + 1
if target[i][j] == False and prediction[i][j] == False: TN = TN + 1
if target[i][j] == True and prediction[i][j] == False: FN = FN + 1
if target[i][j] == False and prediction[i][j] == True: FN = FN + 1
accuracy = (TP + TN )/(TP+TN+FP+FN)
print(accuracy)
accuracy_arr[index + 1] = accuracy
plt.plot(accuracy_arr)
plt.xlabel('samples')
plt.ylabel('accuracy')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.zeros",
"PIL.Image.open",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((250, 267), 'numpy.zeros', 'np.zeros', (['(11, 4)'], {}), '((11, 4))\n', (258, 267), True, 'import numpy as np\n'), ((287, 377), 'glob.glob', 'glob.glob', (['"""/Users/shivani/Documents/approach2_testresults/ground_truth_labels/*.jpg"""'], {}), "(\n '/Users/shivani/Documents/approach2_testresults/ground_truth_labels/*.jpg')\n", (296, 377), False, 'import glob\n'), ((463, 535), 'glob.glob', 'glob.glob', (['"""/Users/shivani/Documents/approach2_testresults/labels/*.jpg"""'], {}), "('/Users/shivani/Documents/approach2_testresults/labels/*.jpg')\n", (472, 535), False, 'import glob\n'), ((1749, 1771), 'matplotlib.pyplot.plot', 'plt.plot', (['accuracy_arr'], {}), '(accuracy_arr)\n', (1757, 1771), True, 'from matplotlib import pyplot as plt\n'), ((1772, 1793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""samples"""'], {}), "('samples')\n", (1782, 1793), True, 'from matplotlib import pyplot as plt\n'), ((1794, 1816), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (1804, 1816), True, 'from matplotlib import pyplot as plt\n'), ((1817, 1827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1825, 1827), True, 'from matplotlib import pyplot as plt\n'), ((658, 689), 'PIL.Image.open', 'Image.open', (['target_files[index]'], {}), '(target_files[index])\n', (668, 689), False, 'from PIL import Image\n'), ((707, 742), 'PIL.Image.open', 'Image.open', (['prediction_files[index]'], {}), '(prediction_files[index])\n', (717, 742), False, 'from PIL import Image\n'), ((766, 789), 'numpy.asarray', 'np.asarray', (['target_file'], {}), '(target_file)\n', (776, 789), True, 'import numpy as np\n'), ((804, 825), 'numpy.asarray', 'np.asarray', (['pred_file'], {}), '(pred_file)\n', (814, 825), True, 'import numpy as np\n'), ((844, 879), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {'dtype': '"""uint8"""'}), "((512, 512), dtype='uint8')\n", (852, 879), True, 'import numpy as np\n')] |
import numpy as np
from scipy.optimize import curve_fit
from ChromProcess.Utils import deconvolution as d_c
def _1gaussian(x, amp1, cen1, sigma1):
"""
A single gaussian function
Parameters
----------
x: array
x axis data
amp1: float
amplitude of the function
cen1: float
centre of the function (mean)
sigma1: float
width of the function (standard deviation)
Returns
-------
function: numpy array
y values for the function
"""
return (
amp1
* (1 / (sigma1 * (np.sqrt(2 * np.pi))))
* (np.exp(-((x - cen1) ** 2) / ((2 * sigma1) ** 2)))
)
def _2gaussian(x, amp1, cen1, sigma1, amp2, cen2, sigma2):
"""
A double gaussian function
Parameters
----------
x: array
x axis data
ampn: float
amplitude of a component gaussian function
cenn: float
centre of a component gaussian function (mean)
sigman: float
width of a component gaussian function (standard deviation)
Returns
-------
function: numpy array
y values for the function
"""
return d_c._1gaussian(x, amp1, cen1, sigma1) + d_c._1gaussian(x, amp2, cen2, sigma2)
def _3gaussian(x, amp1, cen1, sigma1, amp2, cen2, sigma2, amp3, cen3, sigma3):
return d_c._1gaussian(x, amp1, cen1, sigma1) + d_c._2gaussian(
x, amp2, cen2, sigma2, amp3, cen3, sigma3
)
def fit_gaussian_peaks(
time,
sig,
peaks,
initial_guess=[10000, 1, 0.005],
lowerbounds=[0, 0, 0.0],
upperbounds=[1e100, 1, 0.025],
):
"""
TODO: This kind of function could be useful, but a better adapted function
for peak deconvolution should be written. The function could take
similar concepts to this one, but with a different concept for its
implementation.
Fitting sums of gaussian peaks to data using supplied peak indices.
Parameters
----------
time: array
time values
sig: array
signal to be fit to
peaks: list of peak positions
list peak positions in the time
initial_guess: list
Initial guess for the peak amplitude, position and width
e.g. see _1gaussian() function arguments.
lowerbounds: list
Lower bounds for the peak amplitude, position and width
e.g. see _1gaussian() function arguments.
upperbounds: list
Upper bounds for the peak amplitude, position and width
e.g. see _1gaussian() function arguments.
Returns
-------
popt: ndarray
list of fitted values [[amplitude, centre, width],]
pcov: array
correlation matrix
"""
guess = [] # amp, cen, sig
lbds = []
ubds = []
for p in range(0, len(peaks)): # extend the boundary
initial_guess[0] = np.amax(sig)
initial_guess[1] = peaks[p]
lowerbounds[1] = time[0]
upperbounds[1] = time[-1]
guess.extend(initial_guess)
lbds.extend(lowerbounds)
ubds.extend(upperbounds)
boundarr = [lbds, ubds]
if len(peaks) == 1:
popt, pcov = curve_fit(d_c._1gaussian, time, sig, p0=guess, bounds=boundarr)
elif len(peaks) == 2:
popt, pcov = curve_fit(d_c._2gaussian, time, sig, p0=guess, bounds=boundarr)
elif len(peaks) == 3:
popt, pcov = curve_fit(d_c._3gaussian, time, sig, p0=guess, bounds=boundarr)
else:
print("Error fitting peaks")
popt, pcov = [0, 0, 0], 0
return popt, pcov
def deconvolute_region(chromatogram, region, num_peaks=1):
"""
TODO: Combine the ideas in this function with fit_gaussian_peaks()
Parameters
----------
chromatogram: ChromProcess Chromatogram object
Chromatogram
region: list
region of chromatogram under operation [lower bound, upper bound]
Returns
-------
popt: ndarray
list of fitted values [[amplitude, centre, width],]
pcov: array
correlation matrix
"""
upper = region[1]
lower = region[0]
inds = np.where((chromatogram.time > lower) & (chromatogram.time < upper))[0]
time = chromatogram.time[inds]
signal = chromatogram.signal[inds]
signal = signal - np.average(signal[-5:-1])
peak_list = np.array([*chromatogram.peaks])
peak_inds = np.where((peak_list > lower) & (peak_list < upper))[0]
peaks = peak_list[peak_inds]
while len(peaks) < num_peaks:
peaks = np.append(peaks, np.average(peaks))
if len(peaks) > num_peaks:
peaks = peaks[:num_peaks]
return d_c.fit_gaussian_peaks(time, signal, peaks)
def deconvolute_peak(peak, chromatogram, num_peaks=2):
"""
TODO: this function is quite similar in scope to deconvolute_region().
Refactor with the other two deconvolution macros.
Parameters
----------
chromatogram: ChromProcess Chromatogram object
Chromatogram
region: list
region of chromatogram under operation [lower bound, upper bound]
Returns
-------
popt: ndarray
list of fitted values [[amplitude, centre, width],]
pcov: array
correlation matrix
"""
peaks = [peak.retention_time for _ in range(num_peaks)]
time = chromatogram.time[peak.indices]
signal = chromatogram.signal[peak.indices]
baseline = np.interp(time, [time[0], time[-1]], [signal[0], signal[-1]])
signal = signal - baseline
return d_c.fit_gaussian_peaks(time, signal, peaks)
| [
"ChromProcess.Utils.deconvolution._1gaussian",
"numpy.average",
"ChromProcess.Utils.deconvolution.fit_gaussian_peaks",
"ChromProcess.Utils.deconvolution._2gaussian",
"scipy.optimize.curve_fit",
"numpy.amax",
"numpy.where",
"numpy.array",
"numpy.exp",
"numpy.interp",
"numpy.sqrt"
] | [((4426, 4457), 'numpy.array', 'np.array', (['[*chromatogram.peaks]'], {}), '([*chromatogram.peaks])\n', (4434, 4457), True, 'import numpy as np\n'), ((4739, 4782), 'ChromProcess.Utils.deconvolution.fit_gaussian_peaks', 'd_c.fit_gaussian_peaks', (['time', 'signal', 'peaks'], {}), '(time, signal, peaks)\n', (4761, 4782), True, 'from ChromProcess.Utils import deconvolution as d_c\n'), ((5519, 5580), 'numpy.interp', 'np.interp', (['time', '[time[0], time[-1]]', '[signal[0], signal[-1]]'], {}), '(time, [time[0], time[-1]], [signal[0], signal[-1]])\n', (5528, 5580), True, 'import numpy as np\n'), ((5629, 5672), 'ChromProcess.Utils.deconvolution.fit_gaussian_peaks', 'd_c.fit_gaussian_peaks', (['time', 'signal', 'peaks'], {}), '(time, signal, peaks)\n', (5651, 5672), True, 'from ChromProcess.Utils import deconvolution as d_c\n'), ((631, 675), 'numpy.exp', 'np.exp', (['(-(x - cen1) ** 2 / (2 * sigma1) ** 2)'], {}), '(-(x - cen1) ** 2 / (2 * sigma1) ** 2)\n', (637, 675), True, 'import numpy as np\n'), ((1203, 1240), 'ChromProcess.Utils.deconvolution._1gaussian', 'd_c._1gaussian', (['x', 'amp1', 'cen1', 'sigma1'], {}), '(x, amp1, cen1, sigma1)\n', (1217, 1240), True, 'from ChromProcess.Utils import deconvolution as d_c\n'), ((1243, 1280), 'ChromProcess.Utils.deconvolution._1gaussian', 'd_c._1gaussian', (['x', 'amp2', 'cen2', 'sigma2'], {}), '(x, amp2, cen2, sigma2)\n', (1257, 1280), True, 'from ChromProcess.Utils import deconvolution as d_c\n'), ((1377, 1414), 'ChromProcess.Utils.deconvolution._1gaussian', 'd_c._1gaussian', (['x', 'amp1', 'cen1', 'sigma1'], {}), '(x, amp1, cen1, sigma1)\n', (1391, 1414), True, 'from ChromProcess.Utils import deconvolution as d_c\n'), ((1417, 1474), 'ChromProcess.Utils.deconvolution._2gaussian', 'd_c._2gaussian', (['x', 'amp2', 'cen2', 'sigma2', 'amp3', 'cen3', 'sigma3'], {}), '(x, amp2, cen2, sigma2, amp3, cen3, sigma3)\n', (1431, 1474), True, 'from ChromProcess.Utils import deconvolution as d_c\n'), ((2934, 2946), 'numpy.amax', 'np.amax', (['sig'], {}), '(sig)\n', (2941, 2946), True, 'import numpy as np\n'), ((3236, 3299), 'scipy.optimize.curve_fit', 'curve_fit', (['d_c._1gaussian', 'time', 'sig'], {'p0': 'guess', 'bounds': 'boundarr'}), '(d_c._1gaussian, time, sig, p0=guess, bounds=boundarr)\n', (3245, 3299), False, 'from scipy.optimize import curve_fit\n'), ((4207, 4274), 'numpy.where', 'np.where', (['((chromatogram.time > lower) & (chromatogram.time < upper))'], {}), '((chromatogram.time > lower) & (chromatogram.time < upper))\n', (4215, 4274), True, 'import numpy as np\n'), ((4381, 4406), 'numpy.average', 'np.average', (['signal[-5:-1]'], {}), '(signal[-5:-1])\n', (4391, 4406), True, 'import numpy as np\n'), ((4475, 4526), 'numpy.where', 'np.where', (['((peak_list > lower) & (peak_list < upper))'], {}), '((peak_list > lower) & (peak_list < upper))\n', (4483, 4526), True, 'import numpy as np\n'), ((3349, 3412), 'scipy.optimize.curve_fit', 'curve_fit', (['d_c._2gaussian', 'time', 'sig'], {'p0': 'guess', 'bounds': 'boundarr'}), '(d_c._2gaussian, time, sig, p0=guess, bounds=boundarr)\n', (3358, 3412), False, 'from scipy.optimize import curve_fit\n'), ((4637, 4654), 'numpy.average', 'np.average', (['peaks'], {}), '(peaks)\n', (4647, 4654), True, 'import numpy as np\n'), ((3462, 3525), 'scipy.optimize.curve_fit', 'curve_fit', (['d_c._3gaussian', 'time', 'sig'], {'p0': 'guess', 'bounds': 'boundarr'}), '(d_c._3gaussian, time, sig, p0=guess, bounds=boundarr)\n', (3471, 3525), False, 'from scipy.optimize import curve_fit\n'), ((597, 615), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (604, 615), True, 'import numpy as np\n')] |
from typing import List
import logging
import numpy as np
import torch
import yacs.config
from gaze_estimation.gaze_estimator.common import Camera, Face, FacePartsName, MODEL3D
from .head_pose_estimation import HeadPoseNormalizer, LandmarkEstimator
from gaze_estimation import (GazeEstimationMethod, create_model,
create_transform)
import pdb
import time
logger = logging.getLogger(__name__)
SHIFT_PIXELS = 0
PRINT_MODEL_PARAMS = 1
class GazeEstimator:
EYE_KEYS = [FacePartsName.REYE, FacePartsName.LEYE]
def __init__(self, config: yacs.config.CfgNode, AVG_LANDMARKS=0, num_frames=None):
self._config = config
#pdb.set_trace()
self.camera = Camera(config.gaze_estimator.camera_params)
self._normalized_camera = Camera(
config.gaze_estimator.normalized_camera_params)
self._landmark_estimator = LandmarkEstimator(config, AVG_LANDMARKS, num_frames)
self._head_pose_normalizer = HeadPoseNormalizer(
self.camera, self._normalized_camera,
self._config.gaze_estimator.normalized_camera_distance)
self._gaze_estimation_model = self._load_model()
self._transform = create_transform(config)
def _load_model(self) -> torch.nn.Module:
model = create_model(self._config)
checkpoint = torch.load(self._config.gaze_estimator.checkpoint,
map_location='cpu')
model.load_state_dict(checkpoint['model'])
model.to(torch.device(self._config.device))
model.eval()
if PRINT_MODEL_PARAMS:
num_params = sum(x.numel() for x in model.parameters())
num_train = sum(x.numel() for x in model.parameters() if x.requires_grad)
print('TOTAL nr of params = ', num_params)
#print('TOTAL nr of trainable params = ', num_train)
return model
def detect_faces(self, image: np.ndarray) -> List[Face]:
return self._landmark_estimator.detect_faces(image)
def estimate_gaze(self, image: np.ndarray, face: Face) -> None:
# Estimation of the head pose rotation matrix (3x3) and the head pose (3D coords)
pose_time = time.time()
MODEL3D.estimate_head_pose(face, self.camera)
if 0:
print('Pose faces: ', time.time() - pose_time, ' seconds.')
# Fits the 3D landmark model with head pose rotation matrix and the head pose vector
pose3d_time = time.time()
MODEL3D.compute_3d_pose(face)
if 0:
print('3D Pose faces: ', time.time() - pose3d_time, ' seconds.')
# Compute the face center (left right eye and mouth indicies)
# Compute eye centers using the (corresponding eye indicies)
center_time = time.time()
MODEL3D.compute_face_eye_centers(face)
if 0:
print('Face center: ', time.time() - center_time, ' seconds.')
# Image normalization
if self._config.mode == GazeEstimationMethod.MPIIGaze.name:
norm_time = time.time()
for key in self.EYE_KEYS:
eye = getattr(face, key.name.lower())
# head pose normalizer!
self._head_pose_normalizer.normalize(image, eye)
if 0:
print('Normalization: ', time.time() - norm_time, ' seconds.')
model_time = time.time()
self._run_mpiigaze_model(face)
if 0:
print('Prediction: ', time.time() - model_time, ' seconds.')
elif self._config.mode == GazeEstimationMethod.MPIIFaceGaze.name:
self._head_pose_normalizer.normalize(image, face)
self._run_mpiifacegaze_model(face)
def _run_mpiigaze_model(self, face: Face) -> None:
images = []
head_poses = []
# OWND STuFF
shift_images = []
for key in self.EYE_KEYS:
eye = getattr(face, key.name.lower())
image = eye.normalized_image
#pdb.set_trace()
normalized_head_pose = eye.normalized_head_rot2d
if key == FacePartsName.REYE:
image = image[:, ::-1]
normalized_head_pose *= np.array([1, -1])
image = self._transform(image)
images.append(image)
head_poses.append(normalized_head_pose)
if SHIFT_PIXELS:
shift_images = torch.stack(shift_images)
images = torch.stack(images)
head_poses = np.array(head_poses).astype(np.float32)
head_poses = torch.from_numpy(head_poses)
device = torch.device(self._config.device)
with torch.no_grad():
images = images.to(device)
head_poses = head_poses.to(device)
if SHIFT_PIXELS:
pdb.set_trace()
import matplotlib.pyplot as plt
plt.ion()
test_img = np.squeeze(image.numpy())
shift_img = np.zeros((test_img.shape))
# SHIFT UP
shift_img[0:-5][:] = test_img[5:][:]
self._gaze_estimation_model(shift_images, head_poses)
#torch.Tensor(shift_img).unsqueeze_(0)
# INPUT IS CONCATENATION OF LEFT AND RIGHT EYE PATCH
predictions = self._gaze_estimation_model(images, head_poses)
predictions = predictions.cpu().numpy()
#
for i, key in enumerate(self.EYE_KEYS):
eye = getattr(face, key.name.lower())
eye.normalized_gaze_angles = predictions[i]
if key == FacePartsName.REYE:
eye.normalized_gaze_angles *= np.array([1, -1])
eye.angle_to_vector()
eye.denormalize_gaze_vector()
def _run_mpiifacegaze_model(self, face: Face) -> None:
# pdb.set_trace()
image = self._transform(face.normalized_image).unsqueeze(0)
device = torch.device(self._config.device)
with torch.no_grad():
image = image.to(device)
prediction = self._gaze_estimation_model(image)
prediction = prediction.cpu().numpy()
face.normalized_gaze_angles = prediction[0]
face.angle_to_vector()
face.denormalize_gaze_vector()
| [
"gaze_estimation.gaze_estimator.common.MODEL3D.estimate_head_pose",
"gaze_estimation.create_transform",
"gaze_estimation.gaze_estimator.common.MODEL3D.compute_face_eye_centers",
"gaze_estimation.gaze_estimator.common.Camera",
"gaze_estimation.gaze_estimator.common.MODEL3D.compute_3d_pose",
"torch.stack",
... | [((397, 424), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (414, 424), False, 'import logging\n'), ((708, 751), 'gaze_estimation.gaze_estimator.common.Camera', 'Camera', (['config.gaze_estimator.camera_params'], {}), '(config.gaze_estimator.camera_params)\n', (714, 751), False, 'from gaze_estimation.gaze_estimator.common import Camera, Face, FacePartsName, MODEL3D\n'), ((786, 840), 'gaze_estimation.gaze_estimator.common.Camera', 'Camera', (['config.gaze_estimator.normalized_camera_params'], {}), '(config.gaze_estimator.normalized_camera_params)\n', (792, 840), False, 'from gaze_estimation.gaze_estimator.common import Camera, Face, FacePartsName, MODEL3D\n'), ((1201, 1225), 'gaze_estimation.create_transform', 'create_transform', (['config'], {}), '(config)\n', (1217, 1225), False, 'from gaze_estimation import GazeEstimationMethod, create_model, create_transform\n'), ((1289, 1315), 'gaze_estimation.create_model', 'create_model', (['self._config'], {}), '(self._config)\n', (1301, 1315), False, 'from gaze_estimation import GazeEstimationMethod, create_model, create_transform\n'), ((1337, 1407), 'torch.load', 'torch.load', (['self._config.gaze_estimator.checkpoint'], {'map_location': '"""cpu"""'}), "(self._config.gaze_estimator.checkpoint, map_location='cpu')\n", (1347, 1407), False, 'import torch\n'), ((2192, 2203), 'time.time', 'time.time', ([], {}), '()\n', (2201, 2203), False, 'import time\n'), ((2212, 2257), 'gaze_estimation.gaze_estimator.common.MODEL3D.estimate_head_pose', 'MODEL3D.estimate_head_pose', (['face', 'self.camera'], {}), '(face, self.camera)\n', (2238, 2257), False, 'from gaze_estimation.gaze_estimator.common import Camera, Face, FacePartsName, MODEL3D\n'), ((2460, 2471), 'time.time', 'time.time', ([], {}), '()\n', (2469, 2471), False, 'import time\n'), ((2480, 2509), 'gaze_estimation.gaze_estimator.common.MODEL3D.compute_3d_pose', 'MODEL3D.compute_3d_pose', (['face'], {}), '(face)\n', (2503, 2509), False, 'from gaze_estimation.gaze_estimator.common import Camera, Face, FacePartsName, MODEL3D\n'), ((2762, 2773), 'time.time', 'time.time', ([], {}), '()\n', (2771, 2773), False, 'import time\n'), ((2782, 2820), 'gaze_estimation.gaze_estimator.common.MODEL3D.compute_face_eye_centers', 'MODEL3D.compute_face_eye_centers', (['face'], {}), '(face)\n', (2814, 2820), False, 'from gaze_estimation.gaze_estimator.common import Camera, Face, FacePartsName, MODEL3D\n'), ((4443, 4462), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (4454, 4462), False, 'import torch\n'), ((4545, 4573), 'torch.from_numpy', 'torch.from_numpy', (['head_poses'], {}), '(head_poses)\n', (4561, 4573), False, 'import torch\n'), ((4592, 4625), 'torch.device', 'torch.device', (['self._config.device'], {}), '(self._config.device)\n', (4604, 4625), False, 'import torch\n'), ((5905, 5938), 'torch.device', 'torch.device', (['self._config.device'], {}), '(self._config.device)\n', (5917, 5938), False, 'import torch\n'), ((1508, 1541), 'torch.device', 'torch.device', (['self._config.device'], {}), '(self._config.device)\n', (1520, 1541), False, 'import torch\n'), ((3033, 3044), 'time.time', 'time.time', ([], {}), '()\n', (3042, 3044), False, 'import time\n'), ((4400, 4425), 'torch.stack', 'torch.stack', (['shift_images'], {}), '(shift_images)\n', (4411, 4425), False, 'import torch\n'), ((4639, 4654), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4652, 4654), False, 'import torch\n'), ((5952, 5967), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5965, 5967), False, 'import torch\n'), ((3368, 3379), 'time.time', 'time.time', ([], {}), '()\n', (3377, 3379), False, 'import time\n'), ((4185, 4202), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (4193, 4202), True, 'import numpy as np\n'), ((4484, 4504), 'numpy.array', 'np.array', (['head_poses'], {}), '(head_poses)\n', (4492, 4504), True, 'import numpy as np\n'), ((4787, 4802), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4800, 4802), False, 'import pdb\n'), ((4867, 4876), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (4874, 4876), True, 'import matplotlib.pyplot as plt\n'), ((4958, 4982), 'numpy.zeros', 'np.zeros', (['test_img.shape'], {}), '(test_img.shape)\n', (4966, 4982), True, 'import numpy as np\n'), ((5639, 5656), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (5647, 5656), True, 'import numpy as np\n'), ((2306, 2317), 'time.time', 'time.time', ([], {}), '()\n', (2315, 2317), False, 'import time\n'), ((2561, 2572), 'time.time', 'time.time', ([], {}), '()\n', (2570, 2572), False, 'import time\n'), ((2870, 2881), 'time.time', 'time.time', ([], {}), '()\n', (2879, 2881), False, 'import time\n'), ((3301, 3312), 'time.time', 'time.time', ([], {}), '()\n', (3310, 3312), False, 'import time\n'), ((3479, 3490), 'time.time', 'time.time', ([], {}), '()\n', (3488, 3490), False, 'import time\n')] |
import numpy as np
import matplotlib.pyplot as plt
x = np.loadtxt("data.txt", delimiter=",")
plt.scatter(x[:, 3], x[:, 4])
plt.scatter(x[:, 4], x[:, 5])
plt.scatter(x[:, 3], x[:, 5])
plt.show()
| [
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"numpy.loadtxt"
] | [((57, 94), 'numpy.loadtxt', 'np.loadtxt', (['"""data.txt"""'], {'delimiter': '""","""'}), "('data.txt', delimiter=',')\n", (67, 94), True, 'import numpy as np\n'), ((95, 124), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 3]', 'x[:, 4]'], {}), '(x[:, 3], x[:, 4])\n', (106, 124), True, 'import matplotlib.pyplot as plt\n'), ((125, 154), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 4]', 'x[:, 5]'], {}), '(x[:, 4], x[:, 5])\n', (136, 154), True, 'import matplotlib.pyplot as plt\n'), ((155, 184), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 3]', 'x[:, 5]'], {}), '(x[:, 3], x[:, 5])\n', (166, 184), True, 'import matplotlib.pyplot as plt\n'), ((186, 196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (194, 196), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""DesicionTree.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1se7pQXAOQnadOsiQ7Kjha-ksznl9OGm8
"""
import pandas as pd
from sklearn import tree
from sklearn.model_selection import train_test_split
from google.colab import drive
#drive.mount('/content/drive')
drive.mount("/content/drive", force_remount=True)
data = pd.read_pickle('drive/MyDrive/Colab Notebooks/working_balanced_df.pkl')
data.describe()
data.info # FOREST_AREA = -1 means no fire
data['FOREST_AREA'] = data['FOREST_AREA'].apply(lambda x: 1 if x >= 10 else 0) # 1 = fire | 0 = Not fire
data.shape[0]
data['FOREST_AREA'].value_counts() # The balanced dataset
working_data = data.to_numpy()
x = data[['TEMPERATURE', 'SPEED', 'DEW']]
y = data['FOREST_AREA']
x = x.to_numpy()
y = y.to_numpy()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=0)
clf = tree.DecisionTreeClassifier(max_depth=10)
clf.fit(x_train, y_train)
import pickle
# save the classifier
pickle.dump(clf, open('desicion_tree.pkl', 'wb'))
score = clf.score(x_test, y_test)
score
#predictions = clf.predict(x_test)
predictions_proba = clf.predict_proba(x_test)
predictions_proba[:10]
predictions = predictions_proba[:, 1]
predictions[:5]
predictions_2 = predictions > 0.2
predictions_2 = predictions_2.tolist()
for x in range(0, len(predictions_2)):
if predictions_2[x] is True:
predictions_2[x] = 1
else:
predictions_2[x] = 0
predictions_2[:5]
import numpy as np
predictions_2 = np.asarray(predictions_2)
from sklearn import metrics
cm = metrics.confusion_matrix(y_test, predictions_2)
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(20,10))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title(all_sample_title, size = 15);
probs = clf.predict_proba(x)
type(probs)
probs.shape
probs[probs[:,1] > 0.5, :].shape[0]
risk_levels = {
'very_low': 0.10,
'low': 0.30,
'medium': 0.40,
'high': 0.60,
'very_high': 0.70
}
very_low = 0
low = 0
medium = 0
high = 0
very_high = 0
probs = probs.tolist()
for x in probs:
#print(x[1])
if x[1] <= risk_levels.get('very_low'):
very_low += 1
elif x[1] <= risk_levels.get('low'):
low += 1
elif x[1] <= risk_levels.get('medium'):
medium += 1
elif x[1] <= risk_levels.get('high'):
high += 1
else:
very_high += 1
all = very_low + low + medium + high + very_high
all # Same size to the initial dataset
p_very_low = round(very_low/all*100, 2)
p_low = round(low/all*100, 2)
p_medium = round(medium/all*100, 2)
p_high = round(high/all*100, 2)
p_very_high = round(very_high/all*100, 2)
print(f"Very Low: {p_very_low} %")
print(f"Low: {p_low} %")
print(f"Medium: {p_medium} %")
print(f"High: {p_high} %")
print(f"Very High: {p_very_high} %")
"""# The initial dataset had 49.6% fire rows (155.611 out of 313.274).
#Our model predicts that for this dataset 54.16% have High/Very High probability for a wildfire incident.
"""
clf.predict_proba([[32, 7, 17]])
clf.predict_proba(x_test[:5])
data_2 = pd.read_pickle('drive/MyDrive/Colab Notebooks/working_balanced_df.pkl')
fire_df = data_2.loc[(data_2['FOREST_AREA'] >= 0)]
fire_df['SPEED'].value_counts()
fire_df = data_2.loc[(data_2['FOREST_AREA'] >= 10)]
ax = fire_df['SPEED'].plot.hist(bins=20, alpha=1) | [
"matplotlib.pyplot.title",
"seaborn.heatmap",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"pandas.read_pickle",
"matplotlib.pyplot.ylabel",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.figure",
"google.colab.drive.mount",
"sklearn.metrics.confusion_matrix",
"matplot... | [((361, 410), 'google.colab.drive.mount', 'drive.mount', (['"""/content/drive"""'], {'force_remount': '(True)'}), "('/content/drive', force_remount=True)\n", (372, 410), False, 'from google.colab import drive\n'), ((419, 490), 'pandas.read_pickle', 'pd.read_pickle', (['"""drive/MyDrive/Colab Notebooks/working_balanced_df.pkl"""'], {}), "('drive/MyDrive/Colab Notebooks/working_balanced_df.pkl')\n", (433, 490), True, 'import pandas as pd\n'), ((905, 959), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(x, y, test_size=0.25, random_state=0)\n', (921, 959), False, 'from sklearn.model_selection import train_test_split\n'), ((967, 1008), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': '(10)'}), '(max_depth=10)\n', (994, 1008), False, 'from sklearn import tree\n'), ((1586, 1611), 'numpy.asarray', 'np.asarray', (['predictions_2'], {}), '(predictions_2)\n', (1596, 1611), True, 'import numpy as np\n'), ((1647, 1694), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'predictions_2'], {}), '(y_test, predictions_2)\n', (1671, 1694), False, 'from sklearn import metrics\n'), ((1751, 1779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1761, 1779), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1867), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".3f"""', 'linewidths': '(0.5)', 'square': '(True)', 'cmap': '"""Blues_r"""'}), "(cm, annot=True, fmt='.3f', linewidths=0.5, square=True, cmap=\n 'Blues_r')\n", (1790, 1867), True, 'import seaborn as sns\n'), ((1867, 1893), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual label"""'], {}), "('Actual label')\n", (1877, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (1905, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2017), 'matplotlib.pyplot.title', 'plt.title', (['all_sample_title'], {'size': '(15)'}), '(all_sample_title, size=15)\n', (1990, 2017), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3383), 'pandas.read_pickle', 'pd.read_pickle', (['"""drive/MyDrive/Colab Notebooks/working_balanced_df.pkl"""'], {}), "('drive/MyDrive/Colab Notebooks/working_balanced_df.pkl')\n", (3326, 3383), True, 'import pandas as pd\n')] |
from __future__ import print_function
import time
import os
os.chdir("d:/assignment")
import json
import matplotlib.pyplot as plt
import tensorflow as tf
from utils.classifiers.squeezenet import SqueezeNet
from utils.data_utils import load_tiny_imagenet
from utils.image_utils import SQUEEZENET_MEAN, SQUEEZENET_STD
from utils.data_utils import load_imagenet_val
from scipy.ndimage.filters import gaussian_filter1d
import numpy as np
from utils.image_utils import preprocess_image, deprocess_image
import PIL.Image as pilimg
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
def get_session():
'''
## fuction ##
session return, 설정
'''
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
session = tf.Session(config=config)
return session
tf.reset_default_graph() # 기존 존재하던 graph 삭제 (ram)
sess = get_session() # session 실행
SAVE_PATH = 'utils/datasets/squeezenet.ckpt' # pre-trained model
model = SqueezeNet(save_path=SAVE_PATH, sess=sess)
### load imagenet ###
X_raw, y, class_names = load_imagenet_val(num=5) # image net value 5장
def imageNetVis():
'''
function:
5장 imagenet 시각화
'''
plt.figure(figsize=(12,6))
for i in range(5):
plt.subplot(1, 5, i +1)
plt.imshow(X_raw[i])
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
def blurImage(X, sigma=1):
'''
function:
using gaussian filter, blur images
input:
- X: image input
- sigma: 흐리는 정도
return:
- X: image blurred
'''
X = gaussian_filter1d(X, sigma, axis=1)
X = gaussian_filter1d(X, sigma, axis=2)
return X
def computeMapping(X, y, model):
'''
function:
X와 labely에서 작용하는 feature를 계산
input:
- X: input images (N, H, W, 3)
- y: Labels shape (N, )
- model: squeeze Net
Return:
- map: (N, H, W)
'''
mapping = None
correct_scores = tf.gather_nd(model.classifier,
tf.stack((tf.range(X.shape[0]), model.labels), axis=1))
dX = tf.gradients(correct_scores, model.image) # 역 그라디언트 계산
mapping_abs = tf.abs(dX) # 절댓값 --> 음수는 활성화되지 않은 값
mapping_max = tf.reduce_max(mapping_abs, axis=4) # rgb중 가장 활성화된 값
mapping_squeeze = tf.squeeze(mapping_max)
mapping = sess.run(mapping_squeeze, feed_dict={model.image:X, model.labels:y})
return mapping
def visualizeMapping(X, y, mask):
'''
function:
시각화 함수
input:
- X: input image
- y: input class
- mask: 자극받은 해당 idx
'''
mask = np.asarray(mask)
Xm = X[mask]
ym = y[mask]
mapping = computeMapping(Xm, ym, model)
# mapping = mapping.reshape(1, 224, 224)
print(mapping.shape)
for i in range(mask.size):
plt.subplot(2, mask.size, i+1)
plt.imshow(deprocess_image(Xm[i]))
plt.axis('off')
plt.title(class_names[ym[i]])
plt.subplot(2, mask.size, mask.size + i +1)
plt.title(mask[i])
plt.imshow(mapping[i])
plt.axis('off')
plt.gcf().set_size_inches(10,4)
plt.show()
return mapping
X = np.array([preprocess_image(img) for img in X_raw])
mask = np.arange(5)
mapping = visualizeMapping(X, y, mask)
test_image = np.array(pilimg.open("test.jpg")).reshape(1,224, 224, 3)
test_image = preprocess_image(test_image)
y_hat = sess.run(model.classifier, feed_dict={model.image:test_image})
np.argmax(y_hat)
idx = np.array([664])
mask = np.arange(1)
mapping = visualizeMapping(test_image, idx, mask)
def createClassVisualization(target_y, model, **kwargs):
'''
function:
학습된 모델에서 해당 클래스에 최대로 점수화하는 이미지를 생성
inputs:
- target_y: 해당 클래스 one-hot
- model: pretrained model (squeezeNet)
returns:
'''
l2_reg = kwargs.pop('l2_reg', 1e-3) # numpy, 변수 저장
learning_Rate = kwargs.pop('learning_rate', 25) # 굉장히 큰수로 학습
num_iterations = kwargs.pop('num_iteration', 100)
blur_every = kwargs.pop('blur_every', 10)
max_jitter = kwargs.pop('max_jitter', 16)
show_every = kwargs.pop('show_every', 25)
X = 255 * np.random.rand(224, 244, 3) # 학습 image (random init in pixel range)
X = preprocess_image(X)[None]
| [
"matplotlib.pyplot.title",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.argmax",
"tensorflow.reset_default_graph",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"numpy.arange",
"utils.classifiers.squeezenet.SqueezeNet",
"tensorflow.reduce_max",
"os.chdir",
"tensorflow.abs",
"matpl... | [((60, 85), 'os.chdir', 'os.chdir', (['"""d:/assignment"""'], {}), "('d:/assignment')\n", (68, 85), False, 'import os\n'), ((866, 890), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (888, 890), True, 'import tensorflow as tf\n'), ((1023, 1065), 'utils.classifiers.squeezenet.SqueezeNet', 'SqueezeNet', ([], {'save_path': 'SAVE_PATH', 'sess': 'sess'}), '(save_path=SAVE_PATH, sess=sess)\n', (1033, 1065), False, 'from utils.classifiers.squeezenet import SqueezeNet\n'), ((1114, 1138), 'utils.data_utils.load_imagenet_val', 'load_imagenet_val', ([], {'num': '(5)'}), '(num=5)\n', (1131, 1138), False, 'from utils.data_utils import load_imagenet_val\n'), ((3323, 3335), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3332, 3335), True, 'import numpy as np\n'), ((3459, 3487), 'utils.image_utils.preprocess_image', 'preprocess_image', (['test_image'], {}), '(test_image)\n', (3475, 3487), False, 'from utils.image_utils import preprocess_image, deprocess_image\n'), ((3559, 3575), 'numpy.argmax', 'np.argmax', (['y_hat'], {}), '(y_hat)\n', (3568, 3575), True, 'import numpy as np\n'), ((3582, 3597), 'numpy.array', 'np.array', (['[664]'], {}), '([664])\n', (3590, 3597), True, 'import numpy as np\n'), ((3605, 3617), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (3614, 3617), True, 'import numpy as np\n'), ((745, 761), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (759, 761), True, 'import tensorflow as tf\n'), ((820, 845), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (830, 845), True, 'import tensorflow as tf\n'), ((1247, 1274), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1257, 1274), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1705), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['X', 'sigma'], {'axis': '(1)'}), '(X, sigma, axis=1)\n', (1687, 1705), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((1714, 1749), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['X', 'sigma'], {'axis': '(2)'}), '(X, sigma, axis=2)\n', (1731, 1749), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((2193, 2234), 'tensorflow.gradients', 'tf.gradients', (['correct_scores', 'model.image'], {}), '(correct_scores, model.image)\n', (2205, 2234), True, 'import tensorflow as tf\n'), ((2266, 2276), 'tensorflow.abs', 'tf.abs', (['dX'], {}), '(dX)\n', (2272, 2276), True, 'import tensorflow as tf\n'), ((2320, 2354), 'tensorflow.reduce_max', 'tf.reduce_max', (['mapping_abs'], {'axis': '(4)'}), '(mapping_abs, axis=4)\n', (2333, 2354), True, 'import tensorflow as tf\n'), ((2394, 2417), 'tensorflow.squeeze', 'tf.squeeze', (['mapping_max'], {}), '(mapping_max)\n', (2404, 2417), True, 'import tensorflow as tf\n'), ((2709, 2725), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (2719, 2725), True, 'import numpy as np\n'), ((3226, 3236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3234, 3236), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1329), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (1316, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1357), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_raw[i]'], {}), '(X_raw[i])\n', (1347, 1357), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1394), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (1375, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1418), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1411, 1418), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2944), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'mask.size', '(i + 1)'], {}), '(2, mask.size, i + 1)\n', (2923, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3009), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3002, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3047), 'matplotlib.pyplot.title', 'plt.title', (['class_names[ym[i]]'], {}), '(class_names[ym[i]])\n', (3027, 3047), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3100), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'mask.size', '(mask.size + i + 1)'], {}), '(2, mask.size, mask.size + i + 1)\n', (3067, 3100), True, 'import matplotlib.pyplot as plt\n'), ((3108, 3126), 'matplotlib.pyplot.title', 'plt.title', (['mask[i]'], {}), '(mask[i])\n', (3117, 3126), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3157), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mapping[i]'], {}), '(mapping[i])\n', (3145, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3181), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3174, 3181), True, 'import matplotlib.pyplot as plt\n'), ((3275, 3296), 'utils.image_utils.preprocess_image', 'preprocess_image', (['img'], {}), '(img)\n', (3291, 3296), False, 'from utils.image_utils import preprocess_image, deprocess_image\n'), ((4247, 4274), 'numpy.random.rand', 'np.random.rand', (['(224)', '(244)', '(3)'], {}), '(224, 244, 3)\n', (4261, 4274), True, 'import numpy as np\n'), ((4323, 4342), 'utils.image_utils.preprocess_image', 'preprocess_image', (['X'], {}), '(X)\n', (4339, 4342), False, 'from utils.image_utils import preprocess_image, deprocess_image\n'), ((1423, 1432), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1430, 1432), True, 'import matplotlib.pyplot as plt\n'), ((2962, 2984), 'utils.image_utils.deprocess_image', 'deprocess_image', (['Xm[i]'], {}), '(Xm[i])\n', (2977, 2984), False, 'from utils.image_utils import preprocess_image, deprocess_image\n'), ((3398, 3421), 'PIL.Image.open', 'pilimg.open', (['"""test.jpg"""'], {}), "('test.jpg')\n", (3409, 3421), True, 'import PIL.Image as pilimg\n'), ((2138, 2158), 'tensorflow.range', 'tf.range', (['X.shape[0]'], {}), '(X.shape[0])\n', (2146, 2158), True, 'import tensorflow as tf\n'), ((3190, 3199), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3197, 3199), True, 'import matplotlib.pyplot as plt\n')] |
from math import sqrt
import numpy as np
import torch
from scipy.stats import truncnorm
from ...support import utilities
class MultiScalarTruncatedNormalDistribution:
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self, variance=None, std=None):
self.mean = None
if variance is not None:
self.set_variance(variance)
elif std is not None:
self.set_variance_sqrt(std)
####################################################################################################################
### Encapsulation methods:
####################################################################################################################
def get_mean(self):
return self.mean
def set_mean(self, m):
self.mean = m
def get_variance_sqrt(self):
return self.variance_sqrt
def set_variance_sqrt(self, std):
self.variance_sqrt = std
self.variance_inverse = 1.0 / std ** 2
def set_variance(self, var):
self.variance_sqrt = sqrt(var)
self.variance_inverse = 1.0 / var
def get_expected_mean(self):
assert len(self.mean) == 1 # Only coded case for now.
mean = self.mean[0]
return float(truncnorm.stats(- mean / self.variance_sqrt, 100.0 * self.variance_sqrt,
loc=mean, scale=self.variance_sqrt, moments='m'))
####################################################################################################################
### Public methods:
####################################################################################################################
def sample(self):
out = np.zeros(self.mean.shape)
for index, mean in np.ndenumerate(self.mean):
out[index] = truncnorm.rvs(- mean / self.variance_sqrt, float('inf'), loc=mean, scale=self.variance_sqrt)
return out
def compute_log_likelihood(self, observation):
"""
Fully numpy method.
Returns only the part that includes the observation argument.
"""
assert self.mean.size == 1 or self.mean.shape == observation.shape
if np.min(observation) < 0.0:
return - float('inf')
else:
delta = observation.ravel() - self.mean.ravel()
return - 0.5 * self.variance_inverse * np.sum(delta ** 2)
def compute_log_likelihood_torch(self, observation, tensor_scalar_type, device='cpu'):
"""
Fully torch method.
Returns only the part that includes the observation argument.
"""
mean = utilities.move_data(self.mean, dtype=tensor_scalar_type, requires_grad=False, device=device)
observation = utilities.move_data(observation, dtype=tensor_scalar_type, device=device)
assert mean.detach().cpu().numpy().size == observation.detach().cpu().numpy().size, \
'mean.detach().cpu().numpy().size = %d, \t observation.detach().cpu().numpy().size = %d' \
% (mean.detach().cpu().numpy().size, observation.detach().cpu().numpy().size)
if np.min(observation.detach().cpu().numpy()) < 0.0:
return torch.sum(tensor_scalar_type([- float('inf')]))
else:
delta = observation.contiguous().view(-1, 1) - mean.contiguous().view(-1, 1)
return -0.5 * torch.sum(delta ** 2) * self.variance_inverse
| [
"numpy.sum",
"math.sqrt",
"numpy.ndenumerate",
"scipy.stats.truncnorm.stats",
"numpy.zeros",
"numpy.min",
"torch.sum"
] | [((1278, 1287), 'math.sqrt', 'sqrt', (['var'], {}), '(var)\n', (1282, 1287), False, 'from math import sqrt\n'), ((1940, 1965), 'numpy.zeros', 'np.zeros', (['self.mean.shape'], {}), '(self.mean.shape)\n', (1948, 1965), True, 'import numpy as np\n'), ((1993, 2018), 'numpy.ndenumerate', 'np.ndenumerate', (['self.mean'], {}), '(self.mean)\n', (2007, 2018), True, 'import numpy as np\n'), ((1476, 1601), 'scipy.stats.truncnorm.stats', 'truncnorm.stats', (['(-mean / self.variance_sqrt)', '(100.0 * self.variance_sqrt)'], {'loc': 'mean', 'scale': 'self.variance_sqrt', 'moments': '"""m"""'}), "(-mean / self.variance_sqrt, 100.0 * self.variance_sqrt, loc\n =mean, scale=self.variance_sqrt, moments='m')\n", (1491, 1601), False, 'from scipy.stats import truncnorm\n'), ((2417, 2436), 'numpy.min', 'np.min', (['observation'], {}), '(observation)\n', (2423, 2436), True, 'import numpy as np\n'), ((2603, 2621), 'numpy.sum', 'np.sum', (['(delta ** 2)'], {}), '(delta ** 2)\n', (2609, 2621), True, 'import numpy as np\n'), ((3585, 3606), 'torch.sum', 'torch.sum', (['(delta ** 2)'], {}), '(delta ** 2)\n', (3594, 3606), False, 'import torch\n')] |
import argparse
import numpy as np
import torch
from torch import nn
from core.layers import LinearGaussian, ReluGaussian
from core.losses import ClassificationLoss
from core.utils import generate_classification_data, draw_classification_results
np.random.seed(42)
EPS = 1e-6
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=500)
parser.add_argument('--mcvi', action='store_true')
parser.add_argument('--hid_size', type=int, default=128)
parser.add_argument('--n_classes', type=int, default=2)
parser.add_argument('--data_size', type=int, default=500)
parser.add_argument('--method', type=str, default='bayes')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--anneal_updates', type=int, default=1000)
parser.add_argument('--warmup_updates', type=int, default=14000)
parser.add_argument('--test_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--gamma', type=float, default=0.5,
help='lr decrease rate in MultiStepLR scheduler')
parser.add_argument('--epochs', type=int, default=23000)
parser.add_argument('--draw_every', type=int, default=1000)
parser.add_argument('--milestones', nargs='+', type=int,
default=[3000, 5000, 9000, 13000])
parser.add_argument('--dataset', default='classification')
parser.add_argument('--input_size', default=2, type=int)
parser.add_argument('--mc_samples', default=1, type=int)
class Model(nn.Module):
def __init__(self, args):
super().__init__()
hid_size = args.hid_size
self.linear = LinearGaussian(args.input_size, hid_size, certain=True)
self.relu1 = ReluGaussian(hid_size, hid_size)
self.out = ReluGaussian(hid_size, args.n_classes)
if args.mcvi:
self.mcvi()
def forward(self, x):
x = self.linear(x)
x = self.relu1(x)
return self.out(x)
def mcvi(self):
self.linear.mcvi()
self.relu1.mcvi()
self.out.mcvi()
def determenistic(self):
self.linear.determenistic()
self.relu1.determenistic()
self.out.determenistic()
if __name__ == "__main__":
args = parser.parse_args()
args.device = torch.device(
'cuda:{}'.format(args.device) if torch.cuda.is_available() else 'cpu')
x_train, y_train, y_onehot_train, x_test, y_test, y_onehot_test = generate_classification_data(
args)
draw_classification_results(x_test, y_test, 'test.png', args)
draw_classification_results(x_train, y_train, 'train.png', args)
model = Model(args).to(args.device)
criterion = ClassificationLoss(model, args)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
args.milestones,
gamma=args.gamma)
step = 0
for epoch in range(args.epochs):
step += 1
optimizer.zero_grad()
y_logits = model(x_train)
loss, categorical_mean, kl, logsoftmax = criterion(y_logits,
y_onehot_train, step)
pred = torch.argmax(logsoftmax, dim=1)
loss.backward()
nn.utils.clip_grad.clip_grad_value_(model.parameters(), 0.1)
scheduler.step()
optimizer.step()
if epoch % args.draw_every == 0:
draw_classification_results(x_train, pred,
'after_{}_epoch.png'.format(epoch),
args)
with torch.no_grad():
y_logits = model(x_train)
_, _, _, logsoftmax = criterion(y_logits, y_onehot_train, step)
pred = torch.argmax(logsoftmax, dim=1)
draw_classification_results(x_train, pred, 'end_train.png', args)
y_logits = model(x_test)
_, _, _, logsoftmax = criterion(y_logits, y_onehot_test, step)
pred = torch.argmax(logsoftmax, dim=1)
draw_classification_results(x_test, pred, 'end_test.png', args)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"core.layers.LinearGaussian",
"core.utils.generate_classification_data",
"torch.argmax",
"core.layers.ReluGaussian",
"core.losses.ClassificationLoss",
"torch.cuda.is_available",
"core.utils.draw_classification_results",
"torch.no_grad",
"torch.opti... | [((249, 267), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (263, 267), True, 'import numpy as np\n'), ((290, 315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (313, 315), False, 'import argparse\n'), ((2413, 2447), 'core.utils.generate_classification_data', 'generate_classification_data', (['args'], {}), '(args)\n', (2441, 2447), False, 'from core.utils import generate_classification_data, draw_classification_results\n'), ((2461, 2522), 'core.utils.draw_classification_results', 'draw_classification_results', (['x_test', 'y_test', '"""test.png"""', 'args'], {}), "(x_test, y_test, 'test.png', args)\n", (2488, 2522), False, 'from core.utils import generate_classification_data, draw_classification_results\n'), ((2527, 2591), 'core.utils.draw_classification_results', 'draw_classification_results', (['x_train', 'y_train', '"""train.png"""', 'args'], {}), "(x_train, y_train, 'train.png', args)\n", (2554, 2591), False, 'from core.utils import generate_classification_data, draw_classification_results\n'), ((2649, 2680), 'core.losses.ClassificationLoss', 'ClassificationLoss', (['model', 'args'], {}), '(model, args)\n', (2667, 2680), False, 'from core.losses import ClassificationLoss\n'), ((2763, 2850), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer', 'args.milestones'], {'gamma': 'args.gamma'}), '(optimizer, args.milestones, gamma=args\n .gamma)\n', (2799, 2850), False, 'import torch\n'), ((1617, 1672), 'core.layers.LinearGaussian', 'LinearGaussian', (['args.input_size', 'hid_size'], {'certain': '(True)'}), '(args.input_size, hid_size, certain=True)\n', (1631, 1672), False, 'from core.layers import LinearGaussian, ReluGaussian\n'), ((1694, 1726), 'core.layers.ReluGaussian', 'ReluGaussian', (['hid_size', 'hid_size'], {}), '(hid_size, hid_size)\n', (1706, 1726), False, 'from core.layers import LinearGaussian, ReluGaussian\n'), ((1746, 1784), 'core.layers.ReluGaussian', 'ReluGaussian', (['hid_size', 'args.n_classes'], {}), '(hid_size, args.n_classes)\n', (1758, 1784), False, 'from core.layers import LinearGaussian, ReluGaussian\n'), ((3253, 3284), 'torch.argmax', 'torch.argmax', (['logsoftmax'], {'dim': '(1)'}), '(logsoftmax, dim=1)\n', (3265, 3284), False, 'import torch\n'), ((3658, 3673), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3671, 3673), False, 'import torch\n'), ((3796, 3827), 'torch.argmax', 'torch.argmax', (['logsoftmax'], {'dim': '(1)'}), '(logsoftmax, dim=1)\n', (3808, 3827), False, 'import torch\n'), ((3836, 3901), 'core.utils.draw_classification_results', 'draw_classification_results', (['x_train', 'pred', '"""end_train.png"""', 'args'], {}), "(x_train, pred, 'end_train.png', args)\n", (3863, 3901), False, 'from core.utils import generate_classification_data, draw_classification_results\n'), ((4022, 4053), 'torch.argmax', 'torch.argmax', (['logsoftmax'], {'dim': '(1)'}), '(logsoftmax, dim=1)\n', (4034, 4053), False, 'import torch\n'), ((4062, 4125), 'core.utils.draw_classification_results', 'draw_classification_results', (['x_test', 'pred', '"""end_test.png"""', 'args'], {}), "(x_test, pred, 'end_test.png', args)\n", (4089, 4125), False, 'from core.utils import generate_classification_data, draw_classification_results\n'), ((2304, 2329), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2327, 2329), False, 'import torch\n')] |
"""
pyuvwsim
--------
Experimental python interface to uvwsim.
"""
import _pyuvwsim
from numpy import asarray
from .version import __version__
def load_station_coords(file_name):
"""
Loads station coordinates from an ASCII layout file. The layout file
should be 2 or 3 columns of coordinates, which are either space,
comma, or tab separated.
Args:
file_name (string): File name path of the station coordinate file.
Returns:
(x, y, z) tuple of station coordinate arrays.
"""
return _pyuvwsim.load_station_coords(file_name)
def convert_enu_to_ecef(x, y, z, lon, lat, alt=0.0):
"""
Convert ENU (East, North, Up) to ECEF coordinates.
Args:
x (array-like): Array of x (East) coordinates, in metres.
y (array-like): Array of y (North) coordinates, in metres.
z (array-like): Array of z (Up) coordinates, in metres.
lon (double): Longitude, in radians.
lat (double): Latitude, in radians.
alt (Optional[double]): Altitude, in metres.
Returns:
(x, y, z) tuple of coordinate arrays, in metres.
"""
x = asarray(x)
y = asarray(y)
z = asarray(z)
return _pyuvwsim.convert_enu_to_ecef(x, y, z, lon, lat, alt)
def evaluate_baseline_uvw(x, y, z, ra, dec, mjd):
"""
Generate baseline coordinates from station ECEF coordinates, pointing
direction and time.
Args:
x (array-like): Array of x (ECEF) coordinates, in metres.
y (array-like): Array of y (ECEF) coordinates, in metres.
z (array-like): Array of z (ECEF) coordinates, in metres.
ra (double): Right Ascension of pointing direction, in radians.
dec (double): Declination of pointing direction, in radians.
mjd (double): Modified Julian date (UTC).
Returns:
(uu, vv, ww) tuple of baseline coordinate arrays, in metres.
"""
x = asarray(x)
y = asarray(y)
z = asarray(z)
return _pyuvwsim.evaluate_baseline_uvw(x, y, z, ra, dec, mjd)
def evaluate_baseline_uvw_ha_dec(x, y, z, ha, dec):
"""
Generate baseline coordinates from station ECEF coordinates, Hour angle,
and declination
Note:
Greenwich hour angle = hour angle - east longitude
eg. for the VLA, longitude = -107°37'03.819" east
a source is overhead when its Greenwich hour angle is
+107.6177275 degrees
Args:
x (array-like): Array of x (ECEF) coordinates, in metres.
y (array-like): Array of y (ECEF) coordinates, in metres.
z (array-like): Array of z (ECEF) coordinates, in metres.
ha (double): Greenwich hour angle, in radians (24h == 2pi).
dec (double): Declination of pointing direction, in radians.
Returns:
(uu, vv, ww) tuple of baseline coordinate arrays, in metres.
"""
x = asarray(x)
y = asarray(y)
z = asarray(z)
return _pyuvwsim.evaluate_baseline_uvw_ha_dec(x, y, z, ha, dec)
def evaluate_station_uvw(x, y, z, ra, dec, mjd):
"""
Generate station uvw coordinates from station ECEF coordinates, pointing
direction and time.
Args:
x (array-like): Array of x (ECEF) coordinates, in metres.
y (array-like): Array of y (ECEF) coordinates, in metres.
z (array-like): Array of z (ECEF) coordinates, in metres.
ra (double): Right Ascension of pointing direction, in radians.
dec (double): Declination of pointing direction, in radians.
mjd (double): Modified Julian date (UTC).
Returns:
(u, v, w) tuple of station uvw coordinate arrays, in metres.
"""
x = asarray(x)
y = asarray(y)
z = asarray(z)
return _pyuvwsim.evaluate_station_uvw(x, y, z, ra, dec, mjd)
def evaluate_station_uvw_ha_dec(x, y, z, ha, dec):
"""
Generate station uvw coordinates from station ECEF coordinates, pointing
direction and Greenwich hour angle.
Note:
Greenwich hour angle = hour angle - east longitude
eg. for the VLA, longitude = -107°37'03.819" east
a source is overhead when its Greenwich hour angle is
+107.6177275 degrees
Args:
x (array-like): Array of x (ECEF) coordinates, in metres.
y (array-like): Array of y (ECEF) coordinates, in metres.
z (array-like): Array of z (ECEF) coordinates, in metres.
ha (double): Greenwich hour angle (24h == 2pi), in radians.
dec (double): Declination of pointing direction, in radians.
Returns:
(u, v, w) tuple of station uvw coordinate arrays, in metres.
"""
x = asarray(x)
y = asarray(y)
z = asarray(z)
return _pyuvwsim.evaluate_station_uvw_ha_dec(x, y, z, ha, dec)
def datetime_to_mjd(year, month, day, hour, minute, seconds):
"""
Convert datetime to Modified Julian date.
Args:
year (int): Year.
month (int): Month.
day (int): Day.
hour (int): Hour.
minute (int): Minute.
seconds (double): Seconds.
Returns:
double, Modified Julian date.
"""
return _pyuvwsim.datetime_to_mjd(year, month, day, hour, minute, seconds)
| [
"_pyuvwsim.convert_enu_to_ecef",
"_pyuvwsim.evaluate_station_uvw",
"numpy.asarray",
"_pyuvwsim.evaluate_baseline_uvw_ha_dec",
"_pyuvwsim.load_station_coords",
"_pyuvwsim.evaluate_baseline_uvw",
"_pyuvwsim.evaluate_station_uvw_ha_dec",
"_pyuvwsim.datetime_to_mjd"
] | [((535, 575), '_pyuvwsim.load_station_coords', '_pyuvwsim.load_station_coords', (['file_name'], {}), '(file_name)\n', (564, 575), False, 'import _pyuvwsim\n'), ((1131, 1141), 'numpy.asarray', 'asarray', (['x'], {}), '(x)\n', (1138, 1141), False, 'from numpy import asarray\n'), ((1150, 1160), 'numpy.asarray', 'asarray', (['y'], {}), '(y)\n', (1157, 1160), False, 'from numpy import asarray\n'), ((1169, 1179), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (1176, 1179), False, 'from numpy import asarray\n'), ((1191, 1244), '_pyuvwsim.convert_enu_to_ecef', '_pyuvwsim.convert_enu_to_ecef', (['x', 'y', 'z', 'lon', 'lat', 'alt'], {}), '(x, y, z, lon, lat, alt)\n', (1220, 1244), False, 'import _pyuvwsim\n'), ((1903, 1913), 'numpy.asarray', 'asarray', (['x'], {}), '(x)\n', (1910, 1913), False, 'from numpy import asarray\n'), ((1922, 1932), 'numpy.asarray', 'asarray', (['y'], {}), '(y)\n', (1929, 1932), False, 'from numpy import asarray\n'), ((1941, 1951), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (1948, 1951), False, 'from numpy import asarray\n'), ((1963, 2017), '_pyuvwsim.evaluate_baseline_uvw', '_pyuvwsim.evaluate_baseline_uvw', (['x', 'y', 'z', 'ra', 'dec', 'mjd'], {}), '(x, y, z, ra, dec, mjd)\n', (1994, 2017), False, 'import _pyuvwsim\n'), ((2842, 2852), 'numpy.asarray', 'asarray', (['x'], {}), '(x)\n', (2849, 2852), False, 'from numpy import asarray\n'), ((2861, 2871), 'numpy.asarray', 'asarray', (['y'], {}), '(y)\n', (2868, 2871), False, 'from numpy import asarray\n'), ((2880, 2890), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (2887, 2890), False, 'from numpy import asarray\n'), ((2902, 2958), '_pyuvwsim.evaluate_baseline_uvw_ha_dec', '_pyuvwsim.evaluate_baseline_uvw_ha_dec', (['x', 'y', 'z', 'ha', 'dec'], {}), '(x, y, z, ha, dec)\n', (2940, 2958), False, 'import _pyuvwsim\n'), ((3619, 3629), 'numpy.asarray', 'asarray', (['x'], {}), '(x)\n', (3626, 3629), False, 'from numpy import asarray\n'), ((3638, 3648), 'numpy.asarray', 'asarray', (['y'], {}), '(y)\n', (3645, 3648), False, 'from numpy import asarray\n'), ((3657, 3667), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (3664, 3667), False, 'from numpy import asarray\n'), ((3679, 3732), '_pyuvwsim.evaluate_station_uvw', '_pyuvwsim.evaluate_station_uvw', (['x', 'y', 'z', 'ra', 'dec', 'mjd'], {}), '(x, y, z, ra, dec, mjd)\n', (3709, 3732), False, 'import _pyuvwsim\n'), ((4576, 4586), 'numpy.asarray', 'asarray', (['x'], {}), '(x)\n', (4583, 4586), False, 'from numpy import asarray\n'), ((4595, 4605), 'numpy.asarray', 'asarray', (['y'], {}), '(y)\n', (4602, 4605), False, 'from numpy import asarray\n'), ((4614, 4624), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (4621, 4624), False, 'from numpy import asarray\n'), ((4636, 4691), '_pyuvwsim.evaluate_station_uvw_ha_dec', '_pyuvwsim.evaluate_station_uvw_ha_dec', (['x', 'y', 'z', 'ha', 'dec'], {}), '(x, y, z, ha, dec)\n', (4673, 4691), False, 'import _pyuvwsim\n'), ((5061, 5127), '_pyuvwsim.datetime_to_mjd', '_pyuvwsim.datetime_to_mjd', (['year', 'month', 'day', 'hour', 'minute', 'seconds'], {}), '(year, month, day, hour, minute, seconds)\n', (5086, 5127), False, 'import _pyuvwsim\n')] |
from __future__ import print_function
# standard library imports
import sys
import os
from os import path
# third party
import numpy as np
from collections import Counter
# local application imports
sys.path.append(path.dirname( path.dirname( path.abspath(__file__))))
from utilities import *
from wrappers import Molecule
from .main_gsm import MainGSM
from coordinate_systems import Distance,Angle,Dihedral,OutOfPlane,TranslationX,TranslationY,TranslationZ,RotationA,RotationB,RotationC
class SE_GSM(MainGSM):
def __init__(
self,
options,
):
super(SE_GSM,self).__init__(options)
self.current_nnodes=1
print(" Assuming the isomers are initialized!")
#self.isomer_init()
print(" Done initializing isomer")
#self.nodes[0].form_Primitive_Hessian()
print(" Primitive Internal Coordinates")
print(self.nodes[0].primitive_internal_coordinates[0:50])
print(" number of primitives is", self.nodes[0].num_primitives)
print('Driving Coordinates')
print(self.driving_coords)
sys.stdout.flush()
# stash bdist for node 0
ictan,self.nodes[0].bdist = self.get_tangent(
self.nodes[0],
None,
driving_coords=self.driving_coords,
)
self.nodes[0].update_coordinate_basis(constraints=ictan)
def set_V0(self):
self.nodes[0].V0 = self.nodes[0].energy
#TODO should be actual gradient
self.nodes[0].gradrms = 0.
def isomer_init(self):
'''
The purpose of this function is to add to the primitives the driving coordinate prims if
they dont exist.
This is depracated because it's better to build the topology properly before initializing
GSM. See main.py
'''
#TODO ANGLE, TORSION or OOP between fragments will not work if using TRIC with BLOCK LA
changed_top = False
#TODO first check if there is any add/break then rebuild topology and makePrimitives
for i in self.driving_coords:
if "ADD" in i or "BREAK" in i:
# order
if i[1]<i[2]:
bond = Distance(i[1]-1,i[2]-1)
else:
bond = Distance(i[2]-1,i[1]-1)
self.nodes[0].coord_obj.Prims.add(bond,verbose=True)
changed_top =True
if "ANGLE" in i:
if i[1]<i[3]:
angle = Angle(i[1]-1,i[2]-1,i[3]-1)
else:
angle = Angle(i[3]-1,i[2]-1,i[1]-1)
self.nodes[0].coord_obj.Prims.add(angle,verbose=True)
if "TORSION" in i:
if i[1]<i[4]:
torsion = Dihedral(i[1]-1,i[2]-1,i[3]-1,i[4]-1)
else:
torsion = Dihedral(i[4]-1,i[3]-1,i[2]-1,i[1]-1)
self.nodes[0].coord_obj.Prims.add(torsion,verbose=True)
if "OOP" in i:
if i[1]<i[4]:
oop = OutOfPlane(i[1]-1,i[2]-1,i[3]-1,i[4]-1)
else:
oop = OutOfPlane(i[4]-1,i[3]-1,i[2]-1,i[1]-1)
self.nodes[0].coord_obj.Prims.add(oop,verbose=True)
self.nodes[0].coord_obj.Prims.clearCache()
if changed_top:
self.nodes[0].coord_obj.Prims.rebuild_topology_from_prim_bonds(self.nodes[0].xyz)
self.nodes[0].coord_obj.Prims.reorderPrimitives()
self.nodes[0].update_coordinate_basis()
def go_gsm(self,max_iters=50,opt_steps=10,rtype=2):
"""
rtype=2 Find and Climb TS,
1 Climb with no exact find,
0 turning of climbing image and TS search
"""
self.set_V0()
if self.isRestarted==False:
self.nodes[0].gradrms = 0.
self.nodes[0].V0 = self.nodes[0].energy
print(" Initial energy is %1.4f" % self.nodes[0].energy)
self.add_GSM_nodeR()
self.grow_string(max_iters=max_iters,max_opt_steps=opt_steps)
if self.tscontinue:
if self.pastts==1: #normal over the hill
self.add_GSM_nodeR(1)
self.add_last_node(2)
elif self.pastts==2 or self.pastts==3: #when cgrad is positive
self.add_last_node(1)
if self.nodes[self.nR-1].gradrms>5.*self.options['CONV_TOL']:
self.add_last_node(1)
elif self.pastts==3: #product detected by bonding
self.add_last_node(1)
self.nnodes=self.nR
self.nodes = self.nodes[:self.nR]
energies = self.energies
if self.TSnode == self.nR-1:
print(" The highest energy node is the last")
print(" not continuing with TS optimization.")
self.tscontinue=False
print(" Number of nodes is ",self.nnodes)
print(" Warning last node still not optimized fully")
self.xyz_writer('grown_string_{:03}.xyz'.format(self.ID),self.geometries,self.energies,self.gradrmss,self.dEs)
print(" SSM growth phase over")
self.done_growing=True
print(" beginning opt phase")
print("Setting all interior nodes to active")
for n in range(1,self.nnodes-1):
self.active[n]=True
self.active[self.nnodes-1] = False
self.active[0] = False
if not self.isRestarted:
print(" initial ic_reparam")
self.reparameterize(ic_reparam_steps=25)
print(" V_profile (after reparam): ", end=' ')
energies = self.energies
for n in range(self.nnodes):
print(" {:7.3f}".format(float(energies[n])), end=' ')
print()
self.xyz_writer('grown_string1_{:03}.xyz'.format(self.ID),self.geometries,self.energies,self.gradrmss,self.dEs)
if self.tscontinue:
self.optimize_string(max_iter=max_iters,opt_steps=3,rtype=rtype) #opt steps fixed at 3 for rtype=1 and 2, else set it to be the large number :) muah hahaahah
else:
print("Exiting early")
self.end_early=True
filename="opt_converged_{:03d}.xyz".format(self.ID)
print(" Printing string to " + filename)
self.xyz_writer(filename,self.geometries,self.energies,self.gradrmss,self.dEs)
print("Finished GSM!")
def add_last_node(self,rtype):
assert rtype==1 or rtype==2, "rtype must be 1 or 2"
samegeom=False
noptsteps=100
if self.nodes[self.nR-1].PES.lot.do_coupling:
opt_type='MECI'
else:
opt_type='UNCONSTRAINED'
if rtype==1:
print(" copying last node, opting")
#self.nodes[self.nR] = DLC.copy_node(self.nodes[self.nR-1],self.nR)
self.nodes[self.nR] = Molecule.copy_from_options(self.nodes[self.nR-1],new_node_id=self.nR)
print(" Optimizing node %i" % self.nR)
self.optimizer[self.nR].conv_grms = self.options['CONV_TOL']
self.optimizer[self.nR].conv_gmax = self.options['CONV_gmax']
self.optimizer[self.nR].conv_Ediff = self.options['CONV_Ediff']
self.optimizer[self.nR].conv_dE = self.options['CONV_dE']
path=os.path.join(os.getcwd(),'scratch/{:03d}/{}'.format(self.ID,self.nR))
self.optimizer[self.nR].optimize(
molecule=self.nodes[self.nR],
refE=self.nodes[0].V0,
opt_steps=noptsteps,
opt_type=opt_type,
path=path,
)
self.active[self.nR]=True
if (self.nodes[self.nR].xyz == self.nodes[self.nR-1].xyz).all():
print(" Opt did not produce new geometry")
else:
self.nR+=1
elif rtype==2:
print(" already created node, opting")
self.optimizer[self.nR-1].conv_grms = self.options['CONV_TOL']
self.optimizer[self.nR-1].conv_gmax = self.options['CONV_gmax']
self.optimizer[self.nR-1].conv_Ediff = self.options['CONV_Ediff']
self.optimizer[self.nR-1].conv_dE = self.options['CONV_dE']
path=os.path.join(os.getcwd(),'scratch/{:03d}/{}'.format(self.ID,self.nR-1))
self.optimizer[self.nR-1].optimize(
molecule=self.nodes[self.nR-1],
refE=self.nodes[0].V0,
opt_steps=noptsteps,
opt_type=opt_type,
path=path,
)
#print(" Aligning")
#self.nodes[self.nR-1].xyz = self.com_rotate_move(self.nR-2,self.nR,self.nR-1)
return
def grow_nodes(self):
if self.nodes[self.nR-1].gradrms < self.options['ADD_NODE_TOL']:
if self.nR == self.nnodes:
print(" Ran out of nodes, exiting GSM")
raise ValueError
if self.nodes[self.nR] == None:
self.add_GSM_nodeR()
print(" getting energy for node %d: %5.4f" %(self.nR-1,self.nodes[self.nR-1].energy - self.nodes[0].V0))
return
def add_GSM_nodes(self,newnodes=1):
if self.nn+newnodes > self.nnodes:
print("Adding too many nodes, cannot interpolate")
for i in range(newnodes):
self.add_GSM_nodeR()
def ic_reparam_g(self,ic_reparam_steps=4,n0=0,nconstraints=1): #see line 3863 of gstring.cpp
'''
Dont do ic_reparam_g for SE-GSM
'''
return
def set_frontier_convergence(self,nR):
# set
self.optimizer[nR].conv_grms = self.options['ADD_NODE_TOL']
self.optimizer[nR].conv_gmax = 100. #self.options['ADD_NODE_TOL'] # could use some multiplier times CONV_GMAX...
self.optimizer[nR].conv_Ediff = 1000. # 2.5
print(" conv_tol of node %d is %.4f" % (nR,self.optimizer[nR].conv_grms))
def set_active(self,nR,nP=None):
#print(" Here is active:",self.active)
print((" setting active node to %i "%nR))
for i in range(self.nnodes):
if self.nodes[i] != None:
self.active[i] = False
self.set_frontier_convergence(nR)
self.active[nR] = True
#print(" Here is new active:",self.active)
def make_tan_list(self):
ncurrent,nlist = self.make_difference_node_list()
param_list = []
for n in range(ncurrent-1):
if nlist[2*n] not in param_list:
param_list.append(nlist[2*n])
return param_list
def make_move_list(self):
ncurrent,nlist = self.make_difference_node_list()
param_list = []
for n in range(ncurrent):
if nlist[2*n+1] not in param_list:
param_list.append(nlist[2*n+1])
return param_list
def make_difference_node_list(self):
ncurrent =0
nlist = [0]*(2*self.nnodes)
for n in range(self.nR-1):
nlist[2*ncurrent] = n
nlist[2*ncurrent+1] = n+1
ncurrent += 1
nlist[2*ncurrent+1] = self.nR -1
nlist[2*ncurrent] = self.nR -1
ncurrent += 1
return ncurrent,nlist
def past_ts(self):
'''
'''
ispast=ispast1=ispast2=ispast3=0
THRESH1=5.
THRESH2=3.
THRESH3=-1.
THRESHB=0.05
CTHRESH=0.005
OTHRESH=-0.015
emax = -100.
nodemax =1
#n0 is zero until after finished growing
ns = self.n0-1
if ns<nodemax: ns=nodemax
print(" Energies",end=' ')
energies = self.energies
for n in range(ns,self.nR):
print(" {:4.3f}".format(energies[n]),end=' ')
if energies[n]>emax:
nodemax=n
emax=energies[n]
print("\n nodemax ",nodemax)
for n in range(nodemax,self.nR):
if energies[n]<emax-THRESH1:
ispast1+=1
if energies[n]<emax-THRESH2:
ispast2+=1
if energies[n]<emax-THRESH3:
ispast3+=1
if ispast1>1:
break
print(" ispast1",ispast1)
print(" ispast2",ispast2)
print(" ispast3",ispast3)
#TODO 5/9/2019 what about multiple constraints
# Done 6/23/2019
constraints = self.nodes[self.nR-1].constraints[:,0]
gradient = self.nodes[self.nR-1].gradient
overlap = np.dot(gradient.T,constraints)
cgrad = overlap*constraints
cgrad = np.linalg.norm(cgrad)*np.sign(overlap)
#cgrad = np.sum(cgrad)
print((" cgrad: %4.3f nodemax: %i nR: %i" %(cgrad,nodemax,self.nR)))
# 6/17 THIS should check if the last node is high in energy
if cgrad>CTHRESH and not self.nodes[self.nR-1].PES.lot.do_coupling and nodemax != self.TSnode:
print(" constraint gradient positive")
ispast=2
elif ispast1>0 and cgrad>OTHRESH:
print(" over the hill(1)")
ispast=1
elif ispast2>1:
print(" over the hill(2)")
ispast=1
else:
ispast=0
if ispast==0:
bch=self.check_for_reaction_g(1,self.driving_coords)
if ispast3>1 and bch:
print("over the hill(3) connection changed %r " %bch)
ispast=3
print(" ispast=",ispast)
return ispast
def check_if_grown(self):
'''
Check if the string is grown
Returns True if grown
'''
self.pastts = self.past_ts()
isDone=False
#TODO break planes
condition1 = (abs(self.nodes[self.nR-1].bdist) <=(1-self.BDIST_RATIO)*abs(self.nodes[0].bdist))
print(" bdist %.3f" % self.nodes[self.nR-1].bdist)
fp = self.find_peaks('growing')
if self.pastts and self.current_nnodes>3 and condition1: #TODO extra criterion here
print(" pastts is ",self.pastts)
if self.TSnode == self.nR-1:
print(" The highest energy node is the last")
print(" not continuing with TS optimization.")
self.tscontinue=False
nifty.printcool("Over the hill")
isDone=True
elif fp==-1 and self.energies[self.nR-1]>200. and self.nodes[self.nR-1].gradrms>self.options['CONV_TOL']*5:
print("growth_iters over: all uphill and high energy")
self.end_early=2
self.tscontinue=False
self.nnodes=self.nR
isDone=True
elif fp==-2:
print("growth_iters over: all uphill and flattening out")
self.end_early=2
self.tscontinue=False
self.nnodes=self.nR
isDone=True
# ADD extra criteria here to check if TS is higher energy than product
return isDone
def is_converged(self,totalgrad,fp,rtype,ts_cgradq):
isDone=False
added=False
if self.TSnode == self.nnodes-2 and (self.find or totalgrad<0.2) and fp==1:
if self.nodes[self.nR-1].gradrms>self.options['CONV_TOL']:
print("TS node is second to last node, adding one more node")
self.add_last_node(1)
self.nnodes=self.nR
self.active[self.nnodes-1]=False #GSM makes self.active[self.nnodes-1]=True as well
self.active[self.nnodes-2]=True #GSM makes self.active[self.nnodes-1]=True as well
added=True
print("done adding node")
print("nnodes = ",self.nnodes)
self.ictan,self.dqmaga = self.get_tangents(self.nodes)
self.refresh_coordinates()
return False
# => check string profile <= #
if fp==-1: #total string is uphill
print("fp == -1, check V_profile")
print("total dissociation")
self.endearly=True #bools
self.tscontinue=False
return True
elif fp==-2:
print("termination due to dissociation")
self.tscontinue=False
self.endearly=True #bools
return True
elif fp==0:
self.tscontinue=False
self.endearly=True #bools
return True
elif self.climb and fp>0 and self.finder:
fp=self.find_peaks('opting')
if fp>1:
rxnocc,wint = self.check_for_reaction()
if fp >1 and rxnocc and wint<self.nnodes-1:
print("Need to trim string")
self.tscontinue=False
self.endearly=True #bools
return True
else:
return False
else:
return super(SE_GSM,self).is_converged(totalgrad,fp,rtype,ts_cgradq)
def check_for_reaction(self):
'''
'''
isrxn = self.check_for_reaction_g(1, self.driving_coords)
minnodes=[]
maxnodes=[]
wint=0
energies = self.energies
if energies[1]>energies[0]:
minnodes.append(0)
if energies[self.nnodes-1]<energies[self.nnodes-2]:
minnodes.append(self.nnodes-1)
for n in range(self.n0,self.nnodes-1):
if energies[n+1]>energies[n]:
if energies[n]<energies[n-1]:
minnodes.append(n)
if energies[n+1]<energies[n]:
if energies[n]>energies[n-1]:
maxnodes.append(n)
if len(minnodes)>2 and len(maxnodes)>1:
wint=minnodes[1] # the real reaction ends at first minimum
print(" wint ", wint)
return isrxn,wint
def check_for_reaction_g(self,rtype,driving_coords):
'''
'''
c = Counter(elem[0] for elem in driving_coords)
nadds = c['ADD']
nbreaks = c['BREAK']
isrxn=False
if (nadds+nbreaks) <1:
return False
nadded=0
nbroken=0
nnR = self.nR-1
xyz = self.nodes[nnR].xyz
atoms = self.nodes[nnR].atoms
for i in driving_coords:
if "ADD" in i:
index = [i[1]-1, i[2]-1]
bond = Distance(index[0],index[1])
d = bond.value(xyz)
d0 = (atoms[index[0]].vdw_radius + atoms[index[1]].vdw_radius)/2
if d<d0:
nadded+=1
if "BREAK" in i:
index = [i[1]-1, i[2]-1]
bond = Distance(index[0],index[1])
d = bond.value(xyz)
d0 = (atoms[index[0]].vdw_radius + atoms[index[1]].vdw_radius)/2
if d>d0:
nbroken+=1
if rtype==1:
if (nadded+nbroken)>=(nadds+nbreaks):
isrxn=True
#isrxn=nadded+nbroken
else:
isrxn=True
#isrxn=nadded+nbroken
print(" check_for_reaction_g isrxn: %r nadd+nbrk: %i" %(isrxn,nadds+nbreaks))
return isrxn
## => Convergence Criteria
#dE_iter = abs(self.emaxp - self.emax)
#TS_conv = self.options['CONV_TOL']
#if self.find and self.optimizer[self.TSnode].nneg>1:
# print(" reducing TS convergence because nneg>1")
# TS_conv = self.options['CONV_TOL']/2.
#self.optimizer[self.TSnode].conv_grms = TS_conv
#if (rtype == 2 and self.find ):
# if self.nodes[self.TSnode].gradrms< TS_conv:
# self.tscontinue=False
# isDone=True
# #print(" Number of imaginary frequencies %i" % self.optimizer[self.TSnode].nneg)
# return isDone
# if totalgrad<0.1 and self.nodes[self.TSnode].gradrms<2.5*TS_conv and dE_iter < 0.02:
# self.tscontinue=False
# isDone=True
# #print(" Number of imaginary frequencies %i" % self.optimizer[self.TSnode].nneg)
#if rtype==1 and self.climb:
# if self.nodes[self.TSnode].gradrms<TS_conv and ts_cgradq < self.options['CONV_TOL']:
# isDone=True
if __name__=='__main__':
from .qchem import QChem
from .pes import PES
from .dlc_new import DelocalizedInternalCoordinates
from .eigenvector_follow import eigenvector_follow
from ._linesearch import backtrack,NoLineSearch
from .molecule import Molecule
basis='6-31G'
nproc=8
functional='B3LYP'
filepath1="examples/tests/butadiene_ethene.xyz"
lot1=QChem.from_options(states=[(1,0)],charge=0,basis=basis,functional=functional,nproc=nproc,fnm=filepath1)
pes1 = PES.from_options(lot=lot1,ad_idx=0,multiplicity=1)
M1 = Molecule.from_options(fnm=filepath1,PES=pes1,coordinate_type="DLC")
optimizer=eigenvector_follow.from_options(print_level=1) #default parameters fine here/opt_type will get set by GSM
gsm = SE_GSM.from_options(reactant=M1,nnodes=20,driving_coords=[("ADD",6,4),("ADD",5,1)],optimizer=optimizer,print_level=1)
gsm.go_gsm()
| [
"os.path.abspath",
"wrappers.Molecule.from_options",
"coordinate_systems.Angle",
"os.getcwd",
"coordinate_systems.Distance",
"collections.Counter",
"coordinate_systems.Dihedral",
"sys.stdout.flush",
"numpy.linalg.norm",
"numpy.sign",
"numpy.dot",
"coordinate_systems.OutOfPlane",
"wrappers.Mo... | [((20819, 20888), 'wrappers.Molecule.from_options', 'Molecule.from_options', ([], {'fnm': 'filepath1', 'PES': 'pes1', 'coordinate_type': '"""DLC"""'}), "(fnm=filepath1, PES=pes1, coordinate_type='DLC')\n", (20840, 20888), False, 'from wrappers import Molecule\n'), ((1108, 1126), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1124, 1126), False, 'import sys\n'), ((12625, 12656), 'numpy.dot', 'np.dot', (['gradient.T', 'constraints'], {}), '(gradient.T, constraints)\n', (12631, 12656), True, 'import numpy as np\n'), ((17925, 17968), 'collections.Counter', 'Counter', (['(elem[0] for elem in driving_coords)'], {}), '(elem[0] for elem in driving_coords)\n', (17932, 17968), False, 'from collections import Counter\n'), ((245, 267), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (257, 267), False, 'from os import path\n'), ((6963, 7035), 'wrappers.Molecule.copy_from_options', 'Molecule.copy_from_options', (['self.nodes[self.nR - 1]'], {'new_node_id': 'self.nR'}), '(self.nodes[self.nR - 1], new_node_id=self.nR)\n', (6989, 7035), False, 'from wrappers import Molecule\n'), ((12709, 12730), 'numpy.linalg.norm', 'np.linalg.norm', (['cgrad'], {}), '(cgrad)\n', (12723, 12730), True, 'import numpy as np\n'), ((12731, 12747), 'numpy.sign', 'np.sign', (['overlap'], {}), '(overlap)\n', (12738, 12747), True, 'import numpy as np\n'), ((7407, 7418), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7416, 7418), False, 'import os\n'), ((18357, 18385), 'coordinate_systems.Distance', 'Distance', (['index[0]', 'index[1]'], {}), '(index[0], index[1])\n', (18365, 18385), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((18650, 18678), 'coordinate_systems.Distance', 'Distance', (['index[0]', 'index[1]'], {}), '(index[0], index[1])\n', (18658, 18678), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((2239, 2267), 'coordinate_systems.Distance', 'Distance', (['(i[1] - 1)', '(i[2] - 1)'], {}), '(i[1] - 1, i[2] - 1)\n', (2247, 2267), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((2312, 2340), 'coordinate_systems.Distance', 'Distance', (['(i[2] - 1)', '(i[1] - 1)'], {}), '(i[2] - 1, i[1] - 1)\n', (2320, 2340), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((2526, 2561), 'coordinate_systems.Angle', 'Angle', (['(i[1] - 1)', '(i[2] - 1)', '(i[3] - 1)'], {}), '(i[1] - 1, i[2] - 1, i[3] - 1)\n', (2531, 2561), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((2604, 2639), 'coordinate_systems.Angle', 'Angle', (['(i[3] - 1)', '(i[2] - 1)', '(i[1] - 1)'], {}), '(i[3] - 1, i[2] - 1, i[1] - 1)\n', (2609, 2639), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((2793, 2841), 'coordinate_systems.Dihedral', 'Dihedral', (['(i[1] - 1)', '(i[2] - 1)', '(i[3] - 1)', '(i[4] - 1)'], {}), '(i[1] - 1, i[2] - 1, i[3] - 1, i[4] - 1)\n', (2801, 2841), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((2883, 2931), 'coordinate_systems.Dihedral', 'Dihedral', (['(i[4] - 1)', '(i[3] - 1)', '(i[2] - 1)', '(i[1] - 1)'], {}), '(i[4] - 1, i[3] - 1, i[2] - 1, i[1] - 1)\n', (2891, 2931), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((3076, 3126), 'coordinate_systems.OutOfPlane', 'OutOfPlane', (['(i[1] - 1)', '(i[2] - 1)', '(i[3] - 1)', '(i[4] - 1)'], {}), '(i[1] - 1, i[2] - 1, i[3] - 1, i[4] - 1)\n', (3086, 3126), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((3164, 3214), 'coordinate_systems.OutOfPlane', 'OutOfPlane', (['(i[4] - 1)', '(i[3] - 1)', '(i[2] - 1)', '(i[1] - 1)'], {}), '(i[4] - 1, i[3] - 1, i[2] - 1, i[1] - 1)\n', (3174, 3214), False, 'from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane, TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC\n'), ((8384, 8395), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8393, 8395), False, 'import os\n')] |
import flowio
import numpy
f = flowio.FlowData('fcs_files/data1.fcs')
n = numpy.reshape(f.events, (-1, f.channel_count))
print(n.shape)
| [
"flowio.FlowData",
"numpy.reshape"
] | [((32, 70), 'flowio.FlowData', 'flowio.FlowData', (['"""fcs_files/data1.fcs"""'], {}), "('fcs_files/data1.fcs')\n", (47, 70), False, 'import flowio\n'), ((75, 121), 'numpy.reshape', 'numpy.reshape', (['f.events', '(-1, f.channel_count)'], {}), '(f.events, (-1, f.channel_count))\n', (88, 121), False, 'import numpy\n')] |
#!/usr/bin/python -*- coding: utf-8 -*-
#
# Merlin - Almost Native Python Machine Learning Library: Gaussian Distribution
#
# Copyright (C) 2014-2015 alvations
# URL:
# For license information, see LICENSE.md
import numpy as np
"""
Class for univariate gaussian
p(x) = 1/sqrt(2*pi*simga^2) * e ^ - (x-miu)^2/2*sigma^2
Where miu is the gaussian mean, and sigma^2 is the gaussian variance
"""
class Gaussian:
def __init__(self,mean,variance):
self.mean = mean;
self.variance = variance;
def sample(self,points):
return np.random.normal(self.mean,self.variance,points)
def estimate_gaussian(X):
"""
Returns the mean and the variance of a data set of X points assuming that
the points come from a gaussian distribution X.
"""
mean = np.mean(X,0)
variance = np.var(X,0)
return Gaussian(mean,variance) | [
"numpy.mean",
"numpy.var",
"numpy.random.normal"
] | [((788, 801), 'numpy.mean', 'np.mean', (['X', '(0)'], {}), '(X, 0)\n', (795, 801), True, 'import numpy as np\n'), ((816, 828), 'numpy.var', 'np.var', (['X', '(0)'], {}), '(X, 0)\n', (822, 828), True, 'import numpy as np\n'), ((554, 604), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.variance', 'points'], {}), '(self.mean, self.variance, points)\n', (570, 604), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
from .abc import Codec
from .compat import ensure_ndarray, ndarray_copy
class PackBits(Codec):
"""Codec to pack elements of a boolean array into bits in a uint8 array.
Examples
--------
>>> import numcodecs
>>> import numpy as np
>>> codec = numcodecs.PackBits()
>>> x = np.array([True, False, False, True], dtype=bool)
>>> y = codec.encode(x)
>>> y
array([ 4, 144], dtype=uint8)
>>> z = codec.decode(y)
>>> z
array([ True, False, False, True])
Notes
-----
The first element of the encoded array stores the number of bits that
were padded to complete the final byte.
"""
codec_id = 'packbits'
def __init__(self):
pass
def encode(self, buf):
# normalise input
arr = ensure_ndarray(buf).view(bool)
# flatten to simplify implementation
arr = arr.reshape(-1, order='A')
# determine size of packed data
n = arr.size
n_bytes_packed = (n // 8)
n_bits_leftover = n % 8
if n_bits_leftover > 0:
n_bytes_packed += 1
# setup output
enc = np.empty(n_bytes_packed + 1, dtype='u1')
# store how many bits were padded
if n_bits_leftover:
n_bits_padded = 8 - n_bits_leftover
else:
n_bits_padded = 0
enc[0] = n_bits_padded
# apply encoding
enc[1:] = np.packbits(arr)
return enc
def decode(self, buf, out=None):
# normalise input
enc = ensure_ndarray(buf).view('u1')
# flatten to simplify implementation
enc = enc.reshape(-1, order='A')
# find out how many bits were padded
n_bits_padded = int(enc[0])
# apply decoding
dec = np.unpackbits(enc[1:])
# remove padded bits
if n_bits_padded:
dec = dec[:-n_bits_padded]
# view as boolean array
dec = dec.view(bool)
# handle destination
return ndarray_copy(dec, out)
| [
"numpy.empty",
"numpy.packbits",
"numpy.unpackbits"
] | [((1244, 1284), 'numpy.empty', 'np.empty', (['(n_bytes_packed + 1)'], {'dtype': '"""u1"""'}), "(n_bytes_packed + 1, dtype='u1')\n", (1252, 1284), True, 'import numpy as np\n'), ((1523, 1539), 'numpy.packbits', 'np.packbits', (['arr'], {}), '(arr)\n', (1534, 1539), True, 'import numpy as np\n'), ((1879, 1901), 'numpy.unpackbits', 'np.unpackbits', (['enc[1:]'], {}), '(enc[1:])\n', (1892, 1901), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import DiagnoseObsStatisticsArgs
import binning_utils as bu
import predefined_configs as pconf
import config as conf
from copy import deepcopy
import diag_utils as du
import fnmatch
import glob
from JediDB import JediDB
from JediDBArgs import obsFKey
import logging
import logsetup
import multiprocessing as mp
from netCDF4 import Dataset
import numpy as np
import os
import stat_utils as su
import var_utils as vu
_logger = logging.getLogger(__name__)
class DiagnoseObsStatistics:
'''
Diagnose observation-space statistics
Driven by
- static selections in conf
- command-line arguments in DiagnoseObsStatisticsArgs
'''
def __init__(self):
self.name = 'DiagnoseObsStatistics'
self.args = DiagnoseObsStatisticsArgs.args
self.logger = logging.getLogger(self.name)
# construct mean DB into 0th member slot
self.logger.info('mean database: '+self.args.meanPath)
self.jdbs = {vu.mean: JediDB(self.args.meanPath)}
self.osKeys = sorted(self.jdbs[vu.mean].Files.keys())
# construct ens member DBs into subsequent slots (when available)
for member in list(range(1, self.args.nMembers+1)):
ensemblePath = str(self.args.ensemblePath).format(member)
self.logger.info('adding member database: '+ensemblePath)
self.jdbs[vu.ensSuffix(member)] = JediDB(ensemblePath)
def diagnose(self, workers = None):
'''
conducts diagnoseObsSpace across multiple ObsSpaces in parallel
'''
# Loop over all experiment+observation combinations (keys) alphabetically
for osKey in self.osKeys:
self.logger.info(osKey)
if workers is None:
self.diagnoseObsSpace(self.jdbs, osKey)
else:
res = workers.apply_async(self.diagnoseObsSpace, args=(self.jdbs, osKey))
def diagnoseObsSpace(self, jdbs, osKey):
# osKey - key of jdbs members to reference
logger = logging.getLogger(self.name+'.diagnoseObsSpace('+osKey+')')
nMembers = len(jdbs)-1
# initialize mean db file handles
jdbs[vu.mean].initHandles(osKey)
###############################################
## Extract constructor info about the ObsSpace
###############################################
ObsSpaceName = jdbs[vu.mean].ObsSpaceName[osKey]
ObsSpaceInfo = conf.DiagSpaceConfig[ObsSpaceName]
ObsSpaceGrp = ObsSpaceInfo['DiagSpaceGrp']
binVarConfigs = ObsSpaceInfo.get('binVarConfigs',{})
selectDiagNames = ObsSpaceInfo.get('diagNames',{})
# create observed variable list by selecting those variables in the
# obs feedback files (obsFKey) with the proper suffix
if self.args.jediAppName == 'variational':
markerGroup = vu.depbgGroup
elif self.args.jediAppName == 'hofx':
markerGroup = vu.hofxGroup
else:
logger.error('JEDI Application is not supported:: '+self.args.jediAppName)
obsVars = jdbs[vu.mean].varList(osKey, obsFKey, markerGroup)
########################################################
## Construct dictionary of binMethods for this ObsSpace
########################################################
binMethods = {}
for binVarKey, binMethodKeys in binVarConfigs.items():
binVarConfig = pconf.binVarConfigs.get(binVarKey,pconf.nullBinVarConfig)
for binMethodKey in binMethodKeys:
config = binVarConfig.get(binMethodKey,pconf.nullBinMethod).copy()
if (len(config['values']) < 1 or
len(config['filters']) < 1): continue
config['osName'] = ObsSpaceName
config['fileFormat'] = jdbs[vu.mean].fileFormat(osKey, obsFKey)
binMethods[(binVarKey,binMethodKey)] = bu.BinMethod(config)
######################################
## Construct diagnostic configurations
######################################
diagnosticConfigs = du.diagnosticConfigs(
selectDiagNames, ObsSpaceName,
includeEnsembleDiagnostics = (nMembers > 1),
fileFormat = jdbs[vu.mean].fileFormat(osKey, obsFKey))
#####################################################
## Generate comprehensive dict of required variables
#####################################################
meanDBVars = []
ensDBVars = []
dbVars = {vu.mean: [], vu.ensemble: []}
for varName in obsVars:
for diagName, diagnosticConfig in diagnosticConfigs.items():
if 'ObsFunction' not in diagnosticConfig: continue
# variables for diagnostics
for grpVar in diagnosticConfig['ObsFunction'].dbVars(
varName, diagnosticConfig['outerIter']):
for memberType in dbVars.keys():
if diagnosticConfig[memberType]:
dbVars[memberType].append(grpVar)
# variables for binning
# TODO: anIter grpVar's are not needed for all applications
# can save some reading time+memory by checking all diagnosticConfigs
# for required iterations before appending to dbVars[vu.mean] below
for (binVarKey,binMethodKey), binMethod in binMethods.items():
for grpVar in binMethod.dbVars(
varName, diagnosticConfig['outerIter']):
dbVars[vu.mean].append(grpVar)
#####################################
## Read required variables from jdbs
#####################################
# read mean database variable values into memory
dbVals = jdbs[vu.mean].readVars(osKey, dbVars[vu.mean])
# destroy mean file handles
jdbs[vu.mean].destroyHandles(osKey)
# now for ensemble members
for memStr, jdb in jdbs.items():
if memStr == vu.mean: continue
# initialize member db file handles
jdb.initHandles(osKey)
# read database variable values into memory
memberDBVals = jdb.readVars(osKey, dbVars[vu.ensemble])
for dbVar, vals in memberDBVals.items():
dbVals[dbVar+memStr] = vals.copy()
# destroy file handles
jdb.destroyHandles(osKey)
######################################
## Collect statistics for all obsVars
######################################
# Initialize a dictionary to contain all statistical info for this osKey
statsDict = {}
for attribName in su.fileStatAttributes:
statsDict[attribName] = []
for statName in su.allFileStats:
statsDict[statName] = []
# collect stats for all diagnosticConfigs
for diagName, diagnosticConfig in sorted(diagnosticConfigs.items()):
if 'ObsFunction' not in diagnosticConfig: continue
logger.info('Calculating/writing diagnostic stats for:')
logger.info('DIAG = '+diagName)
Diagnostic = diagnosticConfig['ObsFunction']
outerIter = diagnosticConfig['outerIter']
for varName in obsVars:
logger.info('VARIABLE = '+varName)
varShort, varUnits = vu.varAttributes(varName)
Diagnostic.evaluate(dbVals, varName, outerIter)
diagValues = Diagnostic.result
if len(diagValues)-np.isnan(diagValues).sum() == 0:
logger.warning('All missing values for diagnostic: '+diagName)
for (binVarKey,binMethodKey), binMethod in binMethods.items():
if diagName in binMethod.excludeDiags: continue
binVarName, binGrpName = vu.splitObsVarGrp(binVarKey)
binVarShort, binVarUnits = vu.varAttributes(binVarName)
# initialize binMethod filter function result
# NOTE: binning is performed using mean values
# and not ensemble member values
binMethod.evaluate(dbVals, varName, outerIter)
for binVal in binMethod.values:
# apply binMethod filters for binVal
binnedDiagnostic = binMethod.apply(diagValues,diagName,binVal)
# store value and statistics associated with this bin
statsDict['binVal'].append(binVal)
statsVal = su.calcStats(binnedDiagnostic)
for statName in su.allFileStats:
statsDict[statName].append(statsVal[statName])
# store metadata common to all bins
statsDict['DiagSpaceGrp'].append(ObsSpaceGrp)
statsDict['varName'].append(varShort)
statsDict['varUnits'].append(varUnits)
statsDict['diagName'].append(diagName)
statsDict['binMethod'].append(binMethodKey)
statsDict['binVar'].append(binVarShort)
statsDict['binUnits'].append(binVarUnits)
#END binMethod.values LOOP
#END binMethods tuple LOOP
#END obsVars LOOP
#END diagnosticConfigs LOOP
## Create a new stats file for osKey
logger.info('Writing statistics file')
su.write_stats_nc(osKey,statsDict)
logger.info('Finished')
#=========================================================================
# main program
def main():
_logger.info('Starting '+__name__)
statistics = DiagnoseObsStatistics()
if statistics.args.nprocs == 1:
statistics.diagnose()
else:
# create pool of workers
workers = mp.Pool(processes = statistics.args.nprocs)
# diagnose statistics
statistics.diagnose(workers)
# wait for workers to finish
workers.close()
workers.join()
_logger.info('Finished '+__name__+' successfully')
if __name__ == '__main__': main()
| [
"JediDB.JediDB",
"var_utils.splitObsVarGrp",
"var_utils.varAttributes",
"predefined_configs.binVarConfigs.get",
"stat_utils.calcStats",
"var_utils.ensSuffix",
"binning_utils.BinMethod",
"numpy.isnan",
"stat_utils.write_stats_nc",
"multiprocessing.Pool",
"logging.getLogger"
] | [((451, 478), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (468, 478), False, 'import logging\n'), ((790, 818), 'logging.getLogger', 'logging.getLogger', (['self.name'], {}), '(self.name)\n', (807, 818), False, 'import logging\n'), ((1892, 1957), 'logging.getLogger', 'logging.getLogger', (["(self.name + '.diagnoseObsSpace(' + osKey + ')')"], {}), "(self.name + '.diagnoseObsSpace(' + osKey + ')')\n", (1909, 1957), False, 'import logging\n'), ((8963, 8998), 'stat_utils.write_stats_nc', 'su.write_stats_nc', (['osKey', 'statsDict'], {}), '(osKey, statsDict)\n', (8980, 8998), True, 'import stat_utils as su\n'), ((9325, 9366), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'statistics.args.nprocs'}), '(processes=statistics.args.nprocs)\n', (9332, 9366), True, 'import multiprocessing as mp\n'), ((950, 976), 'JediDB.JediDB', 'JediDB', (['self.args.meanPath'], {}), '(self.args.meanPath)\n', (956, 976), False, 'from JediDB import JediDB\n'), ((1337, 1357), 'JediDB.JediDB', 'JediDB', (['ensemblePath'], {}), '(ensemblePath)\n', (1343, 1357), False, 'from JediDB import JediDB\n'), ((3218, 3276), 'predefined_configs.binVarConfigs.get', 'pconf.binVarConfigs.get', (['binVarKey', 'pconf.nullBinVarConfig'], {}), '(binVarKey, pconf.nullBinVarConfig)\n', (3241, 3276), True, 'import predefined_configs as pconf\n'), ((1313, 1333), 'var_utils.ensSuffix', 'vu.ensSuffix', (['member'], {}), '(member)\n', (1325, 1333), True, 'import var_utils as vu\n'), ((3671, 3691), 'binning_utils.BinMethod', 'bu.BinMethod', (['config'], {}), '(config)\n', (3683, 3691), True, 'import binning_utils as bu\n'), ((6942, 6967), 'var_utils.varAttributes', 'vu.varAttributes', (['varName'], {}), '(varName)\n', (6958, 6967), True, 'import var_utils as vu\n'), ((7398, 7426), 'var_utils.splitObsVarGrp', 'vu.splitObsVarGrp', (['binVarKey'], {}), '(binVarKey)\n', (7415, 7426), True, 'import var_utils as vu\n'), ((7470, 7498), 'var_utils.varAttributes', 'vu.varAttributes', (['binVarName'], {}), '(binVarName)\n', (7486, 7498), True, 'import var_utils as vu\n'), ((8093, 8123), 'stat_utils.calcStats', 'su.calcStats', (['binnedDiagnostic'], {}), '(binnedDiagnostic)\n', (8105, 8123), True, 'import stat_utils as su\n'), ((7104, 7124), 'numpy.isnan', 'np.isnan', (['diagValues'], {}), '(diagValues)\n', (7112, 7124), True, 'import numpy as np\n')] |
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, cdist
class fuzzykmeans(object):
def __init__(self, train_data, K, debug=True):
'''train_data is the the dataset where each row is a sample
and K is the number of clusters that need to be found.'''
self.nclasses = K
#Initialize K random means
self.means = np.max(train_data)*np.random.randn(K, train_data.shape[1])
#randinds = [i for i in range(train_data.shape[0])]
#rinds = randinds[:K]
#self.means = train_data[rinds,:]
self.T = np.zeros((train_data.shape[0], K))
self.DEBUG = debug
self.costs = []
def costf(self, train_data):
'''Calculates the cost function on the current
data set'''
membs = self.m(train_data)
dists = (cdist(self.means, train_data))**2
return np.sum(membs*dists)
def m(self, train_data, l=2):
'''Calculates the membership function between every data
point and every class. The parameter l determines the
fuzziness of the algorithm'''
mems = cdist(self.means, train_data)
mems = np.exp(-mems**2)
return mems**l
def train(self, train_data, epochs=10):
'''Train the k-means algorithm until convergence.'''
print("Epochs: ", epochs)
self.costs = np.zeros(epochs)
for j in range(epochs):
print("Training Epoch %d:"%(j))
#Calculate the cost function for plotting
if self.DEBUG:
J = self.costf(train_data)
self.costs[j] = J
print("Cost: "+str(J))
print("Means: "+str(self.means))
#Find the degree of membership between each points and
#each class
membs = self.m(train_data)
#Find the new centroids of the classes
A = np.dot(membs, train_data)
for i in range(self.nclasses):
self.means[i,:] = A[i,:]/np.sum(membs[i,:])
if __name__=='__main__':
#Generate the class Data
CL1 = np.random.multivariate_normal([5, 0], 0.2*np.identity(2), 200)
CL2 = np.random.multivariate_normal([-5,0], 0.2*np.identity(2), 200)
CL3 = np.random.multivariate_normal([0, 5], 0.2*np.identity(2), 200)
CL4 = np.random.multivariate_normal([0, 0], 0.2*np.identity(2), 200)
CL5 = np.random.multivariate_normal([0, -5], 0.2*np.identity(2), 200)
X = np.vstack((CL1, CL2, CL3))
np.random.shuffle(X)
#And plotting
plt.scatter(X[:,0], X[:,1])
plt.title('Unclassified data plot')
plt.hold(True)
km = fuzzykmeans(X, 3)
km.train(X)
plt.scatter(km.means[:,0], km.means[:,1], c='r', marker='s')
plt.show()
| [
"matplotlib.pyplot.title",
"scipy.spatial.distance.cdist",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.hold",
"numpy.zeros",
"numpy.identity",
"numpy.max",
"numpy.exp",
"numpy.dot",
"numpy.vstack",
"numpy.random.shuffle"
] | [((2511, 2537), 'numpy.vstack', 'np.vstack', (['(CL1, CL2, CL3)'], {}), '((CL1, CL2, CL3))\n', (2520, 2537), True, 'import numpy as np\n'), ((2542, 2562), 'numpy.random.shuffle', 'np.random.shuffle', (['X'], {}), '(X)\n', (2559, 2562), True, 'import numpy as np\n'), ((2586, 2615), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {}), '(X[:, 0], X[:, 1])\n', (2597, 2615), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2653), 'matplotlib.pyplot.title', 'plt.title', (['"""Unclassified data plot"""'], {}), "('Unclassified data plot')\n", (2627, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2672), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (2666, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2783), 'matplotlib.pyplot.scatter', 'plt.scatter', (['km.means[:, 0]', 'km.means[:, 1]'], {'c': '"""r"""', 'marker': '"""s"""'}), "(km.means[:, 0], km.means[:, 1], c='r', marker='s')\n", (2732, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2796), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2794, 2796), True, 'import matplotlib.pyplot as plt\n'), ((621, 655), 'numpy.zeros', 'np.zeros', (['(train_data.shape[0], K)'], {}), '((train_data.shape[0], K))\n', (629, 655), True, 'import numpy as np\n'), ((917, 938), 'numpy.sum', 'np.sum', (['(membs * dists)'], {}), '(membs * dists)\n', (923, 938), True, 'import numpy as np\n'), ((1153, 1182), 'scipy.spatial.distance.cdist', 'cdist', (['self.means', 'train_data'], {}), '(self.means, train_data)\n', (1158, 1182), False, 'from scipy.spatial.distance import pdist, cdist\n'), ((1198, 1216), 'numpy.exp', 'np.exp', (['(-mems ** 2)'], {}), '(-mems ** 2)\n', (1204, 1216), True, 'import numpy as np\n'), ((1399, 1415), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (1407, 1415), True, 'import numpy as np\n'), ((413, 431), 'numpy.max', 'np.max', (['train_data'], {}), '(train_data)\n', (419, 431), True, 'import numpy as np\n'), ((432, 471), 'numpy.random.randn', 'np.random.randn', (['K', 'train_data.shape[1]'], {}), '(K, train_data.shape[1])\n', (447, 471), True, 'import numpy as np\n'), ((868, 897), 'scipy.spatial.distance.cdist', 'cdist', (['self.means', 'train_data'], {}), '(self.means, train_data)\n', (873, 897), False, 'from scipy.spatial.distance import pdist, cdist\n'), ((1952, 1977), 'numpy.dot', 'np.dot', (['membs', 'train_data'], {}), '(membs, train_data)\n', (1958, 1977), True, 'import numpy as np\n'), ((2188, 2202), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2199, 2202), True, 'import numpy as np\n'), ((2261, 2275), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2272, 2275), True, 'import numpy as np\n'), ((2334, 2348), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2345, 2348), True, 'import numpy as np\n'), ((2407, 2421), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2418, 2421), True, 'import numpy as np\n'), ((2481, 2495), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2492, 2495), True, 'import numpy as np\n'), ((2062, 2081), 'numpy.sum', 'np.sum', (['membs[i, :]'], {}), '(membs[i, :])\n', (2068, 2081), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
def test_error():
df = pd.DataFrame(
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
)
with pytest.raises(ValueError):
df.explode(list("AA"))
df.columns = list("AA")
with pytest.raises(ValueError):
df.explode("A")
def test_basic():
df = pd.DataFrame(
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
)
result = df.explode("A")
expected = pd.DataFrame(
{
"A": pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object
),
"B": 1,
}
)
tm.assert_frame_equal(result, expected)
def test_multi_index_rows():
df = pd.DataFrame(
{"A": np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), "B": 1},
index=pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]),
)
result = df.explode("A")
expected = pd.DataFrame(
{
"A": pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4],
index=pd.MultiIndex.from_tuples(
[
("a", 1),
("a", 1),
("a", 1),
("a", 2),
("b", 1),
("b", 2),
("b", 2),
]
),
dtype=object,
),
"B": 1,
}
)
tm.assert_frame_equal(result, expected)
def test_multi_index_columns():
df = pd.DataFrame(
{("A", 1): np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), ("A", 2): 1}
)
result = df.explode(("A", 1))
expected = pd.DataFrame(
{
("A", 1): pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4],
index=pd.Index([0, 0, 0, 1, 2, 3, 3]),
dtype=object,
),
("A", 2): 1,
}
)
tm.assert_frame_equal(result, expected)
def test_usecase():
# explode a single column
# gh-10511
df = pd.DataFrame(
[[11, range(5), 10], [22, range(3), 20]], columns=list("ABC")
).set_index("C")
result = df.explode("B")
expected = pd.DataFrame(
{
"A": [11, 11, 11, 11, 11, 22, 22, 22],
"B": np.array([0, 1, 2, 3, 4, 0, 1, 2], dtype=object),
"C": [10, 10, 10, 10, 10, 20, 20, 20],
},
columns=list("ABC"),
).set_index("C")
tm.assert_frame_equal(result, expected)
# gh-8517
df = pd.DataFrame(
[["2014-01-01", "Alice", "A B"], ["2014-01-02", "Bob", "C D"]],
columns=["dt", "name", "text"],
)
result = df.assign(text=df.text.str.split(" ")).explode("text")
expected = pd.DataFrame(
[
["2014-01-01", "Alice", "A"],
["2014-01-01", "Alice", "B"],
["2014-01-02", "Bob", "C"],
["2014-01-02", "Bob", "D"],
],
columns=["dt", "name", "text"],
index=[0, 0, 1, 1],
)
tm.assert_frame_equal(result, expected)
| [
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.util.testing.assert_frame_equal",
"pandas.Index",
"pytest.raises",
"numpy.array"
] | [((754, 793), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (775, 793), True, 'from pandas.util import testing as tm\n'), ((1598, 1637), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (1619, 1637), True, 'from pandas.util import testing as tm\n'), ((2091, 2130), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (2112, 2130), True, 'from pandas.util import testing as tm\n'), ((2616, 2655), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (2637, 2655), True, 'from pandas.util import testing as tm\n'), ((2680, 2792), 'pandas.DataFrame', 'pd.DataFrame', (["[['2014-01-01', 'Alice', 'A B'], ['2014-01-02', 'Bob', 'C D']]"], {'columns': "['dt', 'name', 'text']"}), "([['2014-01-01', 'Alice', 'A B'], ['2014-01-02', 'Bob', 'C D']],\n columns=['dt', 'name', 'text'])\n", (2692, 2792), True, 'import pandas as pd\n'), ((2895, 3086), 'pandas.DataFrame', 'pd.DataFrame', (["[['2014-01-01', 'Alice', 'A'], ['2014-01-01', 'Alice', 'B'], ['2014-01-02',\n 'Bob', 'C'], ['2014-01-02', 'Bob', 'D']]"], {'columns': "['dt', 'name', 'text']", 'index': '[0, 0, 1, 1]'}), "([['2014-01-01', 'Alice', 'A'], ['2014-01-01', 'Alice', 'B'], [\n '2014-01-02', 'Bob', 'C'], ['2014-01-02', 'Bob', 'D']], columns=['dt',\n 'name', 'text'], index=[0, 0, 1, 1])\n", (2907, 3086), True, 'import pandas as pd\n'), ((3172, 3211), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3193, 3211), True, 'from pandas.util import testing as tm\n'), ((236, 261), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (249, 261), False, 'import pytest\n'), ((332, 357), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (345, 357), False, 'import pytest\n'), ((862, 917), 'numpy.array', 'np.array', (['[[0, 1, 2], np.nan, [], (3, 4)]'], {'dtype': 'object'}), '([[0, 1, 2], np.nan, [], (3, 4)], dtype=object)\n', (870, 917), True, 'import numpy as np\n'), ((942, 1009), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('a', 1), ('a', 2), ('b', 1), ('b', 2)]"], {}), "([('a', 1), ('a', 2), ('b', 1), ('b', 2)])\n", (967, 1009), True, 'import pandas as pd\n'), ((1714, 1769), 'numpy.array', 'np.array', (['[[0, 1, 2], np.nan, [], (3, 4)]'], {'dtype': 'object'}), '([[0, 1, 2], np.nan, [], (3, 4)], dtype=object)\n', (1722, 1769), True, 'import numpy as np\n'), ((1185, 1286), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('a', 1), ('a', 1), ('a', 1), ('a', 2), ('b', 1), ('b', 2), ('b', 2)]"], {}), "([('a', 1), ('a', 1), ('a', 1), ('a', 2), ('b', 1),\n ('b', 2), ('b', 2)])\n", (1210, 1286), True, 'import pandas as pd\n'), ((1968, 1999), 'pandas.Index', 'pd.Index', (['[0, 0, 0, 1, 2, 3, 3]'], {}), '([0, 0, 0, 1, 2, 3, 3])\n', (1976, 1999), True, 'import pandas as pd\n'), ((2449, 2497), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 0, 1, 2]'], {'dtype': 'object'}), '([0, 1, 2, 3, 4, 0, 1, 2], dtype=object)\n', (2457, 2497), True, 'import numpy as np\n')] |
#!/usr/bin/python3.5
# -*- coding: utf-8 -*-
import sys
import os
import time
import random
import numpy as np
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
from PIL import Image
SIZE = 1280
WIDTH = 32
HEIGHT = 40
NUM_CLASSES = 6
iterations = 300
SAVER_DIR = "train-saver/province/"
PROVINCES = ("京","沪","津","渝","鲁","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","豫","湘","鄂","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","港","澳","台")
nProvinceIndex = 0
time_begin = time.time()
# 定义输入节点,对应于图片像素值矩阵集合和图片标签(即所代表的数字)
x = tf.placeholder(tf.float32, shape=[None, SIZE])
y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])
x_image = tf.reshape(x, [-1, WIDTH, HEIGHT, 1])
# 定义卷积函数
def conv_layer(inputs, W, b, conv_strides, kernel_size, pool_strides, padding):
L1_conv = tf.nn.conv2d(inputs, W, strides=conv_strides, padding=padding)
L1_relu = tf.nn.relu(L1_conv + b)
return tf.nn.max_pool(L1_relu, ksize=kernel_size, strides=pool_strides, padding='SAME')
# 定义全连接层函数
def full_connect(inputs, W, b):
return tf.nn.relu(tf.matmul(inputs, W) + b)
if __name__ =='__main__' and sys.argv[1]=='train':
# 第一次遍历图片目录是为了获取图片总数
input_count = 0
for i in range(0,NUM_CLASSES):
dir = './train_images/training-set/chinese-characters/%s/' % i # 这里可以改成你自己的图片目录,i为分类标签
for rt, dirs, files in os.walk(dir):
for filename in files:
input_count += 1
# 定义对应维数和各维长度的数组
input_images = np.array([[0]*SIZE for i in range(input_count)])
input_labels = np.array([[0]*NUM_CLASSES for i in range(input_count)])
# 第二次遍历图片目录是为了生成图片数据和标签
index = 0
for i in range(0,NUM_CLASSES):
dir = './train_images/training-set/chinese-characters/%s/' % i # 这里可以改成你自己的图片目录,i为分类标签
for rt, dirs, files in os.walk(dir):
for filename in files:
filename = dir + filename
img = Image.open(filename)
width = img.size[0]
height = img.size[1]
for h in range(0, height):
for w in range(0, width):
# 通过这样的处理,使数字的线条变细,有利于提高识别准确率
if img.getpixel((w, h)) > 230:
input_images[index][w+h*width] = 0
else:
input_images[index][w+h*width] = 1
input_labels[index][i] = 1
index += 1
# 第一次遍历图片目录是为了获取图片总数
val_count = 0
for i in range(0,NUM_CLASSES):
dir = './train_images/validation-set/chinese-characters/%s/' % i # 这里可以改成你自己的图片目录,i为分类标签
for rt, dirs, files in os.walk(dir):
for filename in files:
val_count += 1
# 定义对应维数和各维长度的数组
val_images = np.array([[0]*SIZE for i in range(val_count)])
val_labels = np.array([[0]*NUM_CLASSES for i in range(val_count)])
# 第二次遍历图片目录是为了生成图片数据和标签
index = 0
for i in range(0,NUM_CLASSES):
dir = './train_images/validation-set/chinese-characters/%s/' % i # 这里可以改成你自己的图片目录,i为分类标签
for rt, dirs, files in os.walk(dir):
for filename in files:
filename = dir + filename
img = Image.open(filename)
width = img.size[0]
height = img.size[1]
for h in range(0, height):
for w in range(0, width):
# 通过这样的处理,使数字的线条变细,有利于提高识别准确率
if img.getpixel((w, h)) > 230:
val_images[index][w+h*width] = 0
else:
val_images[index][w+h*width] = 1
val_labels[index][i] = 1
index += 1
# 进行训练
with tf.Session() as sess:
# 第一个卷积层
W_conv1 = tf.get_variable('W_conv1', [5, 5, 1, 6], initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_conv1 = tf.Variable(tf.constant(0.1, shape=[16]), name="b_conv1")
conv_strides = [1, 1, 1, 1] #滑动步长
kernel_size = [1, 2, 2, 1] #核大小
pool_strides = [1, 2, 2, 1] #池化层
L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')
# 第二个卷积层
W_conv2 = tf.get_variable('W_conv2',[5, 5, 6, 16],initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_conv2 = tf.Variable(tf.constant(0.1, shape=[32]), name="b_conv2")
conv_strides = [1, 1, 1, 1]
kernel_size = [1, 1, 1, 1]
pool_strides = [1, 1, 1, 1]
L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')
# 第三个卷积层
W_conv3 = tf.get_variable('W_conv3', [5, 5, 16, 120], initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_conv3 = tf.Variable(tf.constant(0.1, shape=[120]), name="b_conv3")
conv_strides = [1, 1, 1, 1]
kernel_size = [1, 1, 1, 1]
pool_strides = [1, 1, 1, 1]
L3_pool = conv_layer(L2_pool, W_conv3, b_conv3, conv_strides, kernel_size, pool_strides, padding='SAME')
# 全连接层
W_fc1 = tf.get_variable('W_fc1', [16 * 20 * 120, 256], initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_fc1 = tf.Variable(tf.constant(0.1, shape=[512]), name="b_fc1")
h_pool2_flat = tf.reshape(L2_pool, [-1, 16 * 20 * 32])
h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)
# 随机失活层
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# 输出层
W_fc2 = tf.Variable(tf.truncated_normal([512, NUM_CLASSES], stddev=0.1), name="W_fc2")
b_fc2 = tf.Variable(tf.constant(0.1, shape=[NUM_CLASSES]), name="b_fc2")
# 定义优化器和训练op
# 将矩阵a乘以矩阵b,生成a * b
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# TODO 正则化
regularizer = tf.contrib.layers.l2_regularizer(0.0001)
# 计算模型的正则化损失。一般只计算神经网络边上权重的正则化损失,而不使用偏置项
reg_term = regularizer(W_conv1) + regularizer(W_conv2) + regularizer(W_conv3)
# 用于计算张量tensor沿着指定的数轴(tensor的某一维度)上的的平均值,主要用作降维或者计算tensor(图像)的平均值。
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
#AdamOptimizer通过使用动量(参数的移动平均数)来改善传统梯度下降
train_step = tf.train.AdamOptimizer((1e-4)).minimize(cross_entropy)
# TODO
c_entropy = tf.summary.scalar('cross_entropy', cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# TODO
acc = tf.summary.scalar('accuracy', accuracy)
# 初始化saver
saver = tf.train.Saver()
# TODO 合并到Summary中
merged = tf.summary.merge_all()
# TODO 选定可视化存储目录 ----可视化工具tensorboard使用
writer = tf.summary.FileWriter('/train', sess.graph)
test_writer = tf.summary.FileWriter('/val')
sess.run(tf.global_variables_initializer())
time_elapsed = time.time() - time_begin
print("读取图片文件耗费时间:%d秒" % time_elapsed)
time_begin = time.time()
print ("一共读取了 %s 个训练图像, %s 个标签" % (input_count, input_count))
# 设置每次训练op的输入个数和迭代次数,这里为了支持任意图片总数,定义了一个余数remainder,譬如,如果每次训练op的输入个数为60,图片总数为150张,则前面两次各输入60张,最后一次输入30张(余数30)
batch_size = 60
iterations = iterations
batches_count = int(input_count / batch_size)
remainder = input_count % batch_size
print ("训练数据集分成 %s 批, 前面每批 %s 个数据,最后一批 %s 个数据" % (batches_count+1, batch_size, remainder))
# 执行训练迭代
for it in range(iterations):
# 这里的关键是要把输入数组转为np.array
for n in range(batches_count):
train_step.run(feed_dict={x: input_images[n*batch_size:(n+1)*batch_size], y_: input_labels[n*batch_size:(n+1)*batch_size], keep_prob: 0.5})
if remainder > 0:
start_index = batches_count * batch_size;
train_step.run(feed_dict={x: input_images[start_index:input_count-1], y_: input_labels[start_index:input_count-1], keep_prob: 0.5})
# 每完成五次迭代,判断准确度是否已达到100%,达到则退出迭代循环
iterate_accuracy = 0
if it%5 == 0:
iterate_accuracy = accuracy.eval(feed_dict={x: val_images, y_: val_labels, keep_prob: 1.0})
print ('第 %d 次训练迭代: 准确率 %0.5f%%' % (it, iterate_accuracy*100))
if iterate_accuracy >= 0.9999 and it >= 150:
break;
print ('完成训练!')
time_elapsed = time.time() - time_begin
print ("训练耗费时间:%d秒" % time_elapsed)
time_begin = time.time()
# 保存训练结果
if not os.path.exists(SAVER_DIR):
print ('不存在训练数据保存目录,现在创建保存目录')
os.makedirs(SAVER_DIR)
saver_path = saver.save(sess, "%smodel.ckpt"%(SAVER_DIR))
if __name__ =='__main__' and sys.argv[1]=='predict':
saver = tf.train.import_meta_graph("%smodel.ckpt.meta"%(SAVER_DIR))
with tf.Session() as sess:
model_file=tf.train.latest_checkpoint(SAVER_DIR)
saver.restore(sess, model_file)
# 第一个卷积层
W_conv1 = sess.graph.get_tensor_by_name("W_conv1:0")
b_conv1 = sess.graph.get_tensor_by_name("b_conv1:0")
conv_strides = [1, 1, 1, 1]
kernel_size = [1, 2, 2, 1]
pool_strides = [1, 2, 2, 1]
L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')
# 第二个卷积层
W_conv2 = sess.graph.get_tensor_by_name("W_conv2:0")
b_conv2 = sess.graph.get_tensor_by_name("b_conv2:0")
conv_strides = [1, 1, 1, 1]
kernel_size = [1, 1, 1, 1]
pool_strides = [1, 1, 1, 1]
L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')
# 全连接层
W_fc1 = sess.graph.get_tensor_by_name("W_fc1:0")
b_fc1 = sess.graph.get_tensor_by_name("b_fc1:0")
h_pool2_flat = tf.reshape(L2_pool, [-1, 16 * 20*32])
h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)
# dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout层
W_fc2 = sess.graph.get_tensor_by_name("W_fc2:0")
b_fc2 = sess.graph.get_tensor_by_name("b_fc2:0")
# 定义优化器和训练op
conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
for n in range(1,2):
path = "test_images/%s.bmp" % (n)
img = Image.open(path)
width = img.size[0]
height = img.size[1]
img_data = [[0]*SIZE for i in range(1)]
for h in range(0, height):
for w in range(0, width):
if img.getpixel((w, h)) < 190:
img_data[0][w+h*width] = 1
else:
img_data[0][w+h*width] = 0
result = sess.run(conv, feed_dict = {x: np.array(img_data), keep_prob: 1.0})
max1 = 0
max2 = 0
max3 = 0
max1_index = 0
max2_index = 0
max3_index = 0
for j in range(NUM_CLASSES):
if result[0][j] > max1:
max1 = result[0][j]
max1_index = j
continue
if (result[0][j]>max2) and (result[0][j]<=max1):
max2 = result[0][j]
max2_index = j
continue
if (result[0][j]>max3) and (result[0][j]<=max2):
max3 = result[0][j]
max3_index = j
continue
nProvinceIndex = max1_index
print ("概率: [%s %0.2f%%] [%s %0.2f%%] [%s %0.2f%%]" % (PROVINCES[max1_index],max1*100, PROVINCES[max2_index],max2*100, PROVINCES[max3_index],max3*100))
print ("省份简称是: %s" % PROVINCES[nProvinceIndex])
| [
"os.walk",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.truncated_normal",
"tensorflow.compat.v1.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.constant",
"os.path.exists",
"tensorflow.compat.... | [((152, 190), 'tensorflow.compat.v1.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (188, 190), True, 'import tensorflow.compat.v1 as tf\n'), ((515, 526), 'time.time', 'time.time', ([], {}), '()\n', (524, 526), False, 'import time\n'), ((573, 619), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, SIZE]'}), '(tf.float32, shape=[None, SIZE])\n', (587, 619), True, 'import tensorflow.compat.v1 as tf\n'), ((625, 678), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, NUM_CLASSES]'}), '(tf.float32, shape=[None, NUM_CLASSES])\n', (639, 678), True, 'import tensorflow.compat.v1 as tf\n'), ((692, 729), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['x', '[-1, WIDTH, HEIGHT, 1]'], {}), '(x, [-1, WIDTH, HEIGHT, 1])\n', (702, 729), True, 'import tensorflow.compat.v1 as tf\n'), ((837, 899), 'tensorflow.compat.v1.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'W'], {'strides': 'conv_strides', 'padding': 'padding'}), '(inputs, W, strides=conv_strides, padding=padding)\n', (849, 899), True, 'import tensorflow.compat.v1 as tf\n'), ((912, 935), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['(L1_conv + b)'], {}), '(L1_conv + b)\n', (922, 935), True, 'import tensorflow.compat.v1 as tf\n'), ((945, 1030), 'tensorflow.compat.v1.nn.max_pool', 'tf.nn.max_pool', (['L1_relu'], {'ksize': 'kernel_size', 'strides': 'pool_strides', 'padding': '"""SAME"""'}), "(L1_relu, ksize=kernel_size, strides=pool_strides, padding='SAME'\n )\n", (959, 1030), True, 'import tensorflow.compat.v1 as tf\n'), ((8214, 8273), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.train.import_meta_graph', (["('%smodel.ckpt.meta' % SAVER_DIR)"], {}), "('%smodel.ckpt.meta' % SAVER_DIR)\n", (8240, 8273), True, 'import tensorflow.compat.v1 as tf\n'), ((1372, 1384), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (1379, 1384), False, 'import os\n'), ((1797, 1809), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (1804, 1809), False, 'import os\n'), ((2492, 2504), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (2499, 2504), False, 'import os\n'), ((2909, 2921), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (2916, 2921), False, 'import os\n'), ((3414, 3426), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (3424, 3426), True, 'import tensorflow.compat.v1 as tf\n'), ((4904, 4943), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['L2_pool', '[-1, 16 * 20 * 32]'], {}), '(L2_pool, [-1, 16 * 20 * 32])\n', (4914, 4943), True, 'import tensorflow.compat.v1 as tf\n'), ((5031, 5057), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5045, 5057), True, 'import tensorflow.compat.v1 as tf\n'), ((5075, 5106), 'tensorflow.compat.v1.nn.dropout', 'tf.nn.dropout', (['h_fc1', 'keep_prob'], {}), '(h_fc1, keep_prob)\n', (5088, 5106), True, 'import tensorflow.compat.v1 as tf\n'), ((5419, 5459), 'tensorflow.compat.v1.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0001)'], {}), '(0.0001)\n', (5451, 5459), True, 'import tensorflow.compat.v1 as tf\n'), ((5904, 5953), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""cross_entropy"""', 'cross_entropy'], {}), "('cross_entropy', cross_entropy)\n", (5921, 5953), True, 'import tensorflow.compat.v1 as tf\n'), ((6121, 6160), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (6138, 6160), True, 'import tensorflow.compat.v1 as tf\n'), ((6191, 6207), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6205, 6207), True, 'import tensorflow.compat.v1 as tf\n'), ((6248, 6270), 'tensorflow.compat.v1.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (6268, 6270), True, 'import tensorflow.compat.v1 as tf\n'), ((6330, 6373), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['"""/train"""', 'sess.graph'], {}), "('/train', sess.graph)\n", (6351, 6373), True, 'import tensorflow.compat.v1 as tf\n'), ((6392, 6421), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['"""/val"""'], {}), "('/val')\n", (6413, 6421), True, 'import tensorflow.compat.v1 as tf\n'), ((6582, 6593), 'time.time', 'time.time', ([], {}), '()\n', (6591, 6593), False, 'import time\n'), ((7948, 7959), 'time.time', 'time.time', ([], {}), '()\n', (7957, 7959), False, 'import time\n'), ((8281, 8293), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (8291, 8293), True, 'import tensorflow.compat.v1 as tf\n'), ((8318, 8355), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['SAVER_DIR'], {}), '(SAVER_DIR)\n', (8344, 8355), True, 'import tensorflow.compat.v1 as tf\n'), ((9202, 9241), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['L2_pool', '[-1, 16 * 20 * 32]'], {}), '(L2_pool, [-1, 16 * 20 * 32])\n', (9212, 9241), True, 'import tensorflow.compat.v1 as tf\n'), ((9329, 9355), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (9343, 9355), True, 'import tensorflow.compat.v1 as tf\n'), ((9376, 9407), 'tensorflow.compat.v1.nn.dropout', 'tf.nn.dropout', (['h_fc1', 'keep_prob'], {}), '(h_fc1, keep_prob)\n', (9389, 9407), True, 'import tensorflow.compat.v1 as tf\n'), ((1092, 1112), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['inputs', 'W'], {}), '(inputs, W)\n', (1101, 1112), True, 'import tensorflow.compat.v1 as tf\n'), ((3589, 3617), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.1)'], {'shape': '[16]'}), '(0.1, shape=[16])\n', (3600, 3617), True, 'import tensorflow.compat.v1 as tf\n'), ((4023, 4051), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.1)'], {'shape': '[32]'}), '(0.1, shape=[32])\n', (4034, 4051), True, 'import tensorflow.compat.v1 as tf\n'), ((4430, 4459), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.1)'], {'shape': '[120]'}), '(0.1, shape=[120])\n', (4441, 4459), True, 'import tensorflow.compat.v1 as tf\n'), ((4840, 4869), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.1)'], {'shape': '[512]'}), '(0.1, shape=[512])\n', (4851, 4869), True, 'import tensorflow.compat.v1 as tf\n'), ((5147, 5198), 'tensorflow.compat.v1.truncated_normal', 'tf.truncated_normal', (['[512, NUM_CLASSES]'], {'stddev': '(0.1)'}), '([512, NUM_CLASSES], stddev=0.1)\n', (5166, 5198), True, 'import tensorflow.compat.v1 as tf\n'), ((5238, 5275), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.1)'], {'shape': '[NUM_CLASSES]'}), '(0.1, shape=[NUM_CLASSES])\n', (5249, 5275), True, 'import tensorflow.compat.v1 as tf\n'), ((5348, 5376), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['h_fc1_drop', 'W_fc2'], {}), '(h_fc1_drop, W_fc2)\n', (5357, 5376), True, 'import tensorflow.compat.v1 as tf\n'), ((5694, 5759), 'tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'y_', 'logits': 'y_conv'}), '(labels=y_, logits=y_conv)\n', (5733, 5759), True, 'import tensorflow.compat.v1 as tf\n'), ((5989, 6009), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['y_conv', '(1)'], {}), '(y_conv, 1)\n', (5998, 6009), True, 'import tensorflow.compat.v1 as tf\n'), ((6011, 6027), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (6020, 6027), True, 'import tensorflow.compat.v1 as tf\n'), ((6059, 6098), 'tensorflow.compat.v1.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (6066, 6098), True, 'import tensorflow.compat.v1 as tf\n'), ((6440, 6473), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6471, 6473), True, 'import tensorflow.compat.v1 as tf\n'), ((6497, 6508), 'time.time', 'time.time', ([], {}), '()\n', (6506, 6508), False, 'import time\n'), ((7866, 7877), 'time.time', 'time.time', ([], {}), '()\n', (7875, 7877), False, 'import time\n'), ((7987, 8012), 'os.path.exists', 'os.path.exists', (['SAVER_DIR'], {}), '(SAVER_DIR)\n', (8001, 8012), False, 'import os\n'), ((8057, 8079), 'os.makedirs', 'os.makedirs', (['SAVER_DIR'], {}), '(SAVER_DIR)\n', (8068, 8079), False, 'import os\n'), ((9698, 9714), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (9708, 9714), False, 'from PIL import Image\n'), ((1888, 1908), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1898, 1908), False, 'from PIL import Image\n'), ((3000, 3020), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3010, 3020), False, 'from PIL import Image\n'), ((3516, 3561), 'tensorflow.compat.v1.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (3559, 3561), True, 'import tensorflow.compat.v1 as tf\n'), ((3950, 3995), 'tensorflow.compat.v1.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (3993, 3995), True, 'import tensorflow.compat.v1 as tf\n'), ((4357, 4402), 'tensorflow.compat.v1.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (4400, 4402), True, 'import tensorflow.compat.v1 as tf\n'), ((4769, 4814), 'tensorflow.compat.v1.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (4812, 4814), True, 'import tensorflow.compat.v1 as tf\n'), ((5822, 5852), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (5844, 5852), True, 'import tensorflow.compat.v1 as tf\n'), ((9580, 9608), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['h_fc1_drop', 'W_fc2'], {}), '(h_fc1_drop, W_fc2)\n', (9589, 9608), True, 'import tensorflow.compat.v1 as tf\n'), ((10073, 10091), 'numpy.array', 'np.array', (['img_data'], {}), '(img_data)\n', (10081, 10091), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
class Categorical(object):
def __init__(self, logits):
self.logits = logits
def logp(self, x):
return -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
def entropy(self):
a0 = self.logits - tf.reduce_max(self.logits, reduction_indices=[1], keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), reduction_indices=[1])
def kl(self, other):
a0 = self.logits - tf.reduce_max(self.logits, reduction_indices=[1], keep_dims=True)
a1 = other.logits - tf.reduce_max(other.logits, reduction_indices=[1], keep_dims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)
z1 = tf.reduce_sum(ea1, reduction_indices=[1], keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), reduction_indices=[1])
def sample(self):
return tf.multinomial(self.logits, 1)
class DiagGaussian(object):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(1, 2, flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def logp(self, x):
return - 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), reduction_indices=[1]) \
- 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[1]) \
- tf.reduce_sum(self.logstd, reduction_indices=[1])
def kl(self, other):
assert isinstance(other, DiagGaussian)
return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, reduction_indices=[1])
def entropy(self):
return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), reduction_indices=[1])
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
| [
"tensorflow.reduce_sum",
"tensorflow.square",
"numpy.log",
"tensorflow.multinomial",
"tensorflow.shape",
"tensorflow.exp",
"tensorflow.log",
"tensorflow.reduce_max",
"tensorflow.split",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
] | [((469, 479), 'tensorflow.exp', 'tf.exp', (['a0'], {}), '(a0)\n', (475, 479), True, 'import tensorflow as tf\n'), ((489, 546), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ea0'], {'reduction_indices': '[1]', 'keep_dims': '(True)'}), '(ea0, reduction_indices=[1], keep_dims=True)\n', (502, 546), True, 'import tensorflow as tf\n'), ((851, 861), 'tensorflow.exp', 'tf.exp', (['a0'], {}), '(a0)\n', (857, 861), True, 'import tensorflow as tf\n'), ((872, 882), 'tensorflow.exp', 'tf.exp', (['a1'], {}), '(a1)\n', (878, 882), True, 'import tensorflow as tf\n'), ((892, 949), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ea0'], {'reduction_indices': '[1]', 'keep_dims': '(True)'}), '(ea0, reduction_indices=[1], keep_dims=True)\n', (905, 949), True, 'import tensorflow as tf\n'), ((959, 1016), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ea1'], {'reduction_indices': '[1]', 'keep_dims': '(True)'}), '(ea1, reduction_indices=[1], keep_dims=True)\n', (972, 1016), True, 'import tensorflow as tf\n'), ((1157, 1187), 'tensorflow.multinomial', 'tf.multinomial', (['self.logits', '(1)'], {}), '(self.logits, 1)\n', (1171, 1187), True, 'import tensorflow as tf\n'), ((1296, 1316), 'tensorflow.split', 'tf.split', (['(1)', '(2)', 'flat'], {}), '(1, 2, flat)\n', (1304, 1316), True, 'import tensorflow as tf\n'), ((1390, 1404), 'tensorflow.exp', 'tf.exp', (['logstd'], {}), '(logstd)\n', (1396, 1404), True, 'import tensorflow as tf\n'), ((271, 347), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.logits', 'labels': 'x'}), '(logits=self.logits, labels=x)\n', (317, 347), True, 'import tensorflow as tf\n'), ((393, 458), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.logits'], {'reduction_indices': '[1]', 'keep_dims': '(True)'}), '(self.logits, reduction_indices=[1], keep_dims=True)\n', (406, 458), True, 'import tensorflow as tf\n'), ((684, 749), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.logits'], {'reduction_indices': '[1]', 'keep_dims': '(True)'}), '(self.logits, reduction_indices=[1], keep_dims=True)\n', (697, 749), True, 'import tensorflow as tf\n'), ((774, 840), 'tensorflow.reduce_max', 'tf.reduce_max', (['other.logits'], {'reduction_indices': '[1]', 'keep_dims': '(True)'}), '(other.logits, reduction_indices=[1], keep_dims=True)\n', (787, 840), True, 'import tensorflow as tf\n'), ((1622, 1671), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.logstd'], {'reduction_indices': '[1]'}), '(self.logstd, reduction_indices=[1])\n', (1635, 1671), True, 'import tensorflow as tf\n'), ((596, 606), 'tensorflow.log', 'tf.log', (['z0'], {}), '(z0)\n', (602, 606), True, 'import tensorflow as tf\n'), ((1089, 1099), 'tensorflow.log', 'tf.log', (['z1'], {}), '(z1)\n', (1095, 1099), True, 'import tensorflow as tf\n'), ((1994, 2020), 'numpy.log', 'np.log', (['(2.0 * np.pi * np.e)'], {}), '(2.0 * np.pi * np.e)\n', (2000, 2020), True, 'import numpy as np\n'), ((2123, 2142), 'tensorflow.shape', 'tf.shape', (['self.mean'], {}), '(self.mean)\n', (2131, 2142), True, 'import tensorflow as tf\n'), ((1466, 1503), 'tensorflow.square', 'tf.square', (['((x - self.mean) / self.std)'], {}), '((x - self.mean) / self.std)\n', (1475, 1503), True, 'import tensorflow as tf\n'), ((1553, 1572), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (1559, 1572), True, 'import numpy as np\n'), ((1071, 1081), 'tensorflow.log', 'tf.log', (['z0'], {}), '(z0)\n', (1077, 1081), True, 'import tensorflow as tf\n'), ((1587, 1598), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1595, 1598), True, 'import tensorflow as tf\n'), ((1804, 1823), 'tensorflow.square', 'tf.square', (['self.std'], {}), '(self.std)\n', (1813, 1823), True, 'import tensorflow as tf\n'), ((1826, 1859), 'tensorflow.square', 'tf.square', (['(self.mean - other.mean)'], {}), '(self.mean - other.mean)\n', (1835, 1859), True, 'import tensorflow as tf\n'), ((1870, 1890), 'tensorflow.square', 'tf.square', (['other.std'], {}), '(other.std)\n', (1879, 1890), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
import numpy as _np
from keras.datasets import mnist as _mnist # also requires tensorflow
def make_MNIST(mnist_fpath):
'''
Save the following data to .npy file:
train_images: np.array[28x28x60000]
test_images: np.array[28x28x10000]
train_labels: np.array[60000x1]
test_labels: np.array[10000x1]
Args:
mnist_fpath (str): Path and filename for data to be saved under in the \
user's Home (~) directory.
'''
# from MNIST_all import MNIST_read
# # download and save data from <NAME>'s website
# [train_imgs, train_lbls, test_imgs, test_lbls] = MNIST_read.read();
## download and save data from Keras
# directory to save image data
# im_dir = 'MNIST_all'
(train_imgs, train_lbls), (test_imgs, test_lbls) = _mnist.load_data()
mnist = {
'train_images':train_imgs,
'test_images':test_imgs,
'train_labels':train_lbls,
'test_labels':test_lbls,
}
_np.save(mnist_fpath, mnist)
print('MNIST data saved:', mnist_fpath)
if __name__ == "__main__":
make_MNIST()
# MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| [
"keras.datasets.mnist.load_data",
"numpy.save"
] | [((754, 772), 'keras.datasets.mnist.load_data', '_mnist.load_data', ([], {}), '()\n', (770, 772), True, 'from keras.datasets import mnist as _mnist\n'), ((912, 940), 'numpy.save', '_np.save', (['mnist_fpath', 'mnist'], {}), '(mnist_fpath, mnist)\n', (920, 940), True, 'import numpy as _np\n')] |
from sklearn.metrics import classification_report, confusion_matrix
import sklearn.ensemble
from sklearn import metrics
from sklearn import svm
from scipy.fftpack import fft
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import ExtraTreeClassifier
from sklearn import preprocessing
from sklearn.cluster import KMeans
import numpy as np
from scipy.fftpack import fft
import pandas as pd
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from sklearn import model_selection
from sklearn.datasets import fetch_california_housing
from myo_sign_language.data_processing.files_processing import process_from_files
import itertools
from scipy.signal import butter, lfilter, medfilt
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import f_regression
from sklearn.ensemble import ExtraTreesClassifier
from typing import Mapping, MutableMapping, Sequence, Iterable, List, Set
import more_itertools as mit
TESTING_DATA_PERCENT = 0.3
TEACHING_DATA_PERCENT = 0.7
ROWS_IN_TIME_WINDOW = 50
N_CLUSTERS = 10 # 9, 14, 17, 18, 22
SENSORS_NAMES = ["acc1", "acc2", "acc3", "gyro1", "gyro2", "gyro3",
"orientation1", "orientation2", "orientation3", "orientation4",
"emg1", "emg2", "emg3", "emg4", "emg5", "emg6", "emg7", "emg8"]
def learn():
# np.warnings.simplefilter(action='ignore', category=UserWarning)
overlaped = 5
# windows_size = 10
# clusters = 5
data_set = process_from_files('test')
print('get data set')
classes_names_as_is_in_data = create_classes_names_list(data_set)
print(f'get {len(classes_names_as_is_in_data)} classes')
files_as_nested_list = get_files_as_list_of_lists(data_set)
print(f"extract data for {len(files_as_nested_list)} files with {len(files_as_nested_list[0])} columns")
for clusters in [5, 10, 20]:
windows_sizes = [5, 10, 20]
for windows_size in windows_sizes:
if windows_size == 5:
overlaps = [1, 4]
elif windows_size == 10:
overlaps = [1, 5, 9]
elif windows_size == 15:
overlaps = [1, 7, 14]
elif windows_size == 20:
overlaps = [1, 10, 19]
elif windows_size == 25:
overlaps = [1, 13, 24]
elif windows_size == 30:
overlaps = [1, 15, 29]
elif windows_size == 35:
overlaps = [1, 19, 34]
elif windows_size == 40:
overlaps = [1, 20, 39]
for overlaped in overlaps:
X_train, X_test, _, y_test = train_test_split(files_as_nested_list, classes_names_as_is_in_data,
test_size=0.9, random_state=4564567, shuffle=True)
files_as_windows_test = get_overlapped_chunks_separated_for_files(X_test, windows_size, overlaped)
all_sliding_windows = get_all_overlapped_chunks(X_train, windows_size, overlaped)
print(f'Generate {len(all_sliding_windows)} windows to create codebook')
kmeans_models = prepare_codebook(all_sliding_windows, clusters)
print(f'create {len(kmeans_models)} models')
histograms_test = get_histogram_basic_on_kmean(clusters, kmeans_models, files_as_windows_test)
# find_the_best(X_train, X_test, y_train1, y_test1)
models = get_models()
for name, model in models:
kfold = model_selection.RepeatedKFold(n_splits=5, random_state=7, n_repeats=10)
# selection = svc_param_selection(histograms_test, y_test, kfold, model, name)
# print(selection)
cv_results = model_selection.cross_val_score(model, histograms_test, y_test, cv=kfold,
scoring='accuracy')
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# # labels = ['ME', 'OFTEN', 'RARE','PAIN','HEAD',
# # 'TOOTH','THROAT','BACK','LUNGS','VOMIT',
# # 'COUGH','RUNNY NOSE','FEVER', 'COLD', 'SWEAT',
# # 'INFLAMMATION', 'VERY', 'LITTLE']
# #
# # model.fit(X_train, y_train1)
# # # print(histograms_test)
# label = model.predict(X_test)
# # from sklearn.metrics import accuracy_score
# # print('not know data', accuracy_score(y_test1, label))
# #
# # # pickle.dump(model, open('codebook_models/kmean_provisor.sav', 'wb'))
# # label = model.predict(X_test)
# #
# # # conf_mat = confusion_matrix(label, y_test1)
# # # plot_confusion_matrix(conf_mat, labels)
# print(clusters, ',', windows_size, ', ', overlaped, ',', rezultaty)
# print(msg)
def svc_param_selection(X, y, nfolds, kn, kernel_name):
from sklearn.model_selection import GridSearchCV
# scaler = StandardScaler()
# X = preprocessing.scale(X)
# X = scaler.fit_transform(X)
C_range = 10. ** np.arange(-3, 4)
gamma_range = 10. ** np.arange(-3, 4)
# weight_options = ["uniform", "distance"]
k_range = [1, 2, 4, 8, 16, 32, 64, 128]
metrics = ['manhattan', 'minkowski', 'euclidean', 'chebyshev']
max_features = ['auto', 'sqrt', 'log2']
# classifier__criterion = ["gini", "entropy"]
n_estimators = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
# param_grid = {"max_features":max_features, 'n_estimators':n_estimators}
# param_grid = {"n_neighbors":k_range, 'metric':metrics}
param_grid = {"C": C_range, 'gamma': gamma_range}
print('griduje')
grid_search = GridSearchCV(kn, param_grid, cv=nfolds)
grid_search.fit(X, y)
grid_search.best_params_
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
score_dict = grid_search.grid_scores_
# We extract just the scores
scores = [x[1] for x in score_dict]
scores = np.array(scores).reshape(len(gamma_range), len(C_range))
# print(scores)
# Make a nice figure
# plt.figure(figsize=(8, 6))
title = "Heatmap for {}".format('SVM kernel rbf Classifier')
plt.title(title)
plt.subplots_adjust(left=0.15, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.spectral)
plt.ylabel('max features')
plt.xlabel('number of classifier')
plt.colorbar(ticks=[0, 0.3, 0.5, 0.7, 0.9, 1], label="precision")
plt.clim(0, 1)
plt.xticks(np.arange(len(C_range)), C_range, rotation=45)
plt.yticks(np.arange(len(gamma_range)), gamma_range)
plt.savefig(kernel_name)
print(grid_search.cv_results_)
print(grid_search.best_score_)
"poly 0.91734375 'gamma': 1.0, 'C': 0.001"
"linear 0.79296875 , {'gamma': 0.001, 'C': 0.10000000000000001}"
"rbf {'gamma': 0.01, 'C': 100.0} 0.92476563"
" extra tree 0.935390625 {'n_estimators': 1024, 'max_features': 'log2'}" \
"0.927734375 + {'criterion': 'entropy', 'n_estimators': 2048}"
"""0.92390625 {'n_neighbors': 2, 'metric': 'manhattan'} kneighboard"""
return grid_search.best_params_
def get_overlapped_chunks_separated_for_files(
files_as_nested_list: List[List[int]],
windows_size: int,
overlaped: int) -> List[List[List[List[int]]]]:
files_as_overplaped_chunks = []
for file in files_as_nested_list:
file_chunked = []
for index, sensor in enumerate(file):
windows = get_overlapped_chunks(sensor, windows_size, overlaped)
file_chunked.append(windows)
files_as_overplaped_chunks.append(file_chunked)
return files_as_overplaped_chunks
def get_all_overlapped_chunks(
files: List[List[int]],
window_size: int,
overlaped: int) -> List[List[List[int]]]:
number_of_sensor = len(files[0])
windowed_sensors = [[] for _ in range(number_of_sensor)]
for file in files:
for index, sensor in enumerate(file):
windows = get_overlapped_chunks(sensor, window_size, overlaped)
windowed_sensors[index].extend(windows)
return windowed_sensors
def get_overlapped_chunks(arr, window, overlap):
import numpy as np
from numpy.lib.stride_tricks import as_strided
arr = np.asarray(arr)
window_step = window - overlap
new_shape = arr.shape[:-1] + ((arr.shape[-1] - overlap) // window_step, window)
new_strides = (arr.strides[:-1] + (window_step * arr.strides[-1],) +
arr.strides[-1:])
return as_strided(arr, shape=new_shape, strides=new_strides).tolist()
def get_histogram_basic_on_kmean(
clusters_number: int,
kmeans_models: List[KMeans],
files_as_windows: List[List[Set[int]]]
) -> List[List[int]]:
# print('get histogram')
from collections import Counter
histograms = []
for file in files_as_windows:
file_histogram = []
for index, windowed_sensor in enumerate(file):
predicted_list = kmeans_models[index].predict(windowed_sensor)
histogram = dict(Counter(predicted_list))
for key in range(0, clusters_number):
if key not in histogram.keys():
histogram[key] = 0
d = dict(sorted(histogram.items()))
histogram = list(d.values())
file_histogram.extend(histogram)
histograms.append(file_histogram)
return histograms
def prepare_codebook(all_sliding_windows: List[Set[int]], number_of_clusters: int):
print('start prepare codebook')
kmeans_models = []
for index, sensor_all_windows in enumerate(all_sliding_windows):
kmean_model = KMeans(n_clusters=number_of_clusters).fit(sensor_all_windows)
# filename = '{}.sav'.format(SENSORS_NAMES[index])
# pickle.dump(kmean_model, open('codebook_models/'+filename, 'wb'))
kmeans_models.append(kmean_model)
return kmeans_models
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def find_the_best_autosklearn(X_train, X_test, y_train, y_test):
import autosklearn.classification
automl = autosklearn.classification.AutoSklearnClassifier(
per_run_time_limit=100,
tmp_folder='./models_data/autosklearn_cv_example_tmp',
output_folder='./models_data/autosklearn_cv_example_out',
delete_tmp_folder_after_terminate=False,
resampling_strategy='cv',
resampling_strategy_arguments={'folds': 5},
)
# fit() changes the data in place, but refit needs the original data. We
# therefore copy the data. In practice, one should reload the data
automl.fit(X_train.copy(), y_train.copy(), dataset_name='digits')
# During fit(), models are fit on individual cross-validation folds. To use
# all available data, we call refit() which trains all models in the
# final ensemble on the whole dataset.
automl.refit(X_train.copy(), y_train.copy())
# print(automl.show_models())
X_test = np.array(X_test)
y_test = np.array(y_test)
predictions = automl.predict(X_test)
print(automl.show_models())
print(automl.sprint_statistics())
print(automl.cv_results_)
print("Accuracy score", sklearn.metrics.accuracy_score(y_test, predictions))
def mlbox_counter():
from mlbox.preprocessing import Reader, Drift_thresholder
from mlbox.optimisation import Optimiser
from mlbox.prediction import Predictor
target_name = '601'
rd = Reader(sep=",")
df = rd.train_test_split(['train_egg.csv', 'test_egg.csv'], target_name)
# print(df)
dft = Drift_thresholder()
df = dft.fit_transform(df) # removing non-stable features (like ID,...)
opt = Optimiser(scoring="accuracy", n_folds=10)
space = {
'est__strategy': {"search": "choice",
"space": ["LightGBM"]},
'est__n_estimators': {"search": "choice",
"space": [150]},
'est__colsample_bytree': {"search": "uniform",
"space": [0.8, 0.95]},
'est__subsample': {"search": "uniform",
"space": [0.8, 0.95]},
'est__max_depth': {"search": "choice",
"space": [5, 6, 7, 8, 9]},
'est__learning_rate': {"search": "choice",
"space": [0.07]}
}
best = opt.optimise(space, df, 15)
prd = Predictor()
prd.fit_predict(best, df)
def find_the_best(X_train, X_test, y_train, y_test):
from hyperopt import tpe
import hpsklearn
import hpsklearn.demo_support
import time
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
from hpsklearn import HyperoptEstimator, random_forest, svc, knn
estimator = hpsklearn.HyperoptEstimator(
preprocessing=[],
classifier=knn('myknn'),
algo=tpe.suggest,
# trial_timeout=500.0, # seconds
max_evals=100,
)
fit_iterator = estimator.fit_iter(X_train, y_train)
fit_iterator.next()
plot_helper = hpsklearn.demo_support.PlotHelper(estimator,
mintodate_ylim=(-.01, .05))
while len(estimator.trials.trials) < estimator.max_evals:
fit_iterator.send(1) # -- try one more model
plot_helper.post_iter()
plot_helper.post_loop()
plt.show()
# -- Model selection was done on a subset of the training data.
# -- Now that we've picked a model, train on all training data.
estimator.retrain_best_model_on_full_data(X_train, y_train)
print('Best preprocessing pipeline:')
for pp in estimator._best_preprocs:
print(pp)
print()
print('Best classifier:\n', estimator._best_learner)
print(estimator.best_model())
test_predictions = estimator.predict(X_test)
acc_in_percent = 100 * np.mean(test_predictions == y_test)
print()
print('Prediction accuracy in generalization is ', acc_in_percent)
def genetic_algorithm(X_train, X_test, y_train, y_test):
from tpot import TPOTClassifier
tpot = TPOTClassifier(generations=100, population_size=20, verbosity=2)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
def get_kmeans_model(clusters, clusters_group):
return KMeans(n_clusters=clusters_group).fit(clusters)
def create_classes_names_list(training_set):
"""
:param training_set: dict(list, list)
:return: (list, list)
"""
learn_classes_list = []
for k, v in training_set.items():
learn_classes_list.extend([str(k)] * len(v))
return learn_classes_list
def get_files_as_list_of_lists(data_set):
"""
:param data_set: dict(list[list], list[[list])
:return: list(list)
"""
files = []
for k, v in data_set.items():
files.extend(v)
return files
def get_models():
models = []
models.append(('kneighboard ', KNeighborsClassifier(
algorithm='auto',
leaf_size=30,
metric='manhattan',
metric_params=None,
n_jobs=1,
n_neighbors=1,
p=1,
weights='distance')))
models.append(
('extra tree entropy', ExtraTreesClassifier(bootstrap=False, class_weight=None, criterion='gini',
max_depth=None, max_features='log2',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=1024, n_jobs=1, oob_score=False,
random_state=1,
verbose=False, warm_start=False)))
models.append(('random trees',
sklearn.ensemble.RandomForestClassifier(bootstrap=True, class_weight=None,
criterion='entropy',
max_depth=None, max_features='log2',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=231,
n_jobs=1,
oob_score=False, random_state=2, verbose=False,
warm_start=False)))
#
models.append(('svm poly', svm.SVC(kernel='rbf', gamma=1.0, C=0.001)))
models.append(('gaussian nb', GaussianNB()))
return models
if __name__ == "__main__":
learn()
| [
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"myo_sign_language.data_processing.files_processing.process_from_files",
"hpsklearn.knn",
"mlbox.preprocessing.Drift_thresholder",
"numpy.mean",
"... | [((1949, 1975), 'myo_sign_language.data_processing.files_processing.process_from_files', 'process_from_files', (['"""test"""'], {}), "('test')\n", (1967, 1975), False, 'from myo_sign_language.data_processing.files_processing import process_from_files\n'), ((6447, 6486), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['kn', 'param_grid'], {'cv': 'nfolds'}), '(kn, param_grid, cv=nfolds)\n', (6459, 6486), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6968, 6984), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6977, 6984), True, 'from matplotlib import pyplot as plt\n'), ((6989, 7054), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'right': '(0.95)', 'bottom': '(0.15)', 'top': '(0.95)'}), '(left=0.15, right=0.95, bottom=0.15, top=0.95)\n', (7008, 7054), True, 'from matplotlib import pyplot as plt\n'), ((7059, 7124), 'matplotlib.pyplot.imshow', 'plt.imshow', (['scores'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.spectral'}), "(scores, interpolation='nearest', cmap=plt.cm.spectral)\n", (7069, 7124), True, 'from matplotlib import pyplot as plt\n'), ((7129, 7155), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""max features"""'], {}), "('max features')\n", (7139, 7155), True, 'from matplotlib import pyplot as plt\n'), ((7160, 7194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of classifier"""'], {}), "('number of classifier')\n", (7170, 7194), True, 'from matplotlib import pyplot as plt\n'), ((7199, 7264), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ticks': '[0, 0.3, 0.5, 0.7, 0.9, 1]', 'label': '"""precision"""'}), "(ticks=[0, 0.3, 0.5, 0.7, 0.9, 1], label='precision')\n", (7211, 7264), True, 'from matplotlib import pyplot as plt\n'), ((7269, 7283), 'matplotlib.pyplot.clim', 'plt.clim', (['(0)', '(1)'], {}), '(0, 1)\n', (7277, 7283), True, 'from matplotlib import pyplot as plt\n'), ((7407, 7431), 'matplotlib.pyplot.savefig', 'plt.savefig', (['kernel_name'], {}), '(kernel_name)\n', (7418, 7431), True, 'from matplotlib import pyplot as plt\n'), ((9061, 9076), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (9071, 9076), True, 'import numpy as np\n'), ((11250, 11300), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (11260, 11300), True, 'from matplotlib import pyplot as plt\n'), ((11305, 11321), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (11314, 11321), True, 'from matplotlib import pyplot as plt\n'), ((11326, 11340), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11338, 11340), True, 'from matplotlib import pyplot as plt\n'), ((11386, 11430), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (11396, 11430), True, 'from matplotlib import pyplot as plt\n'), ((11435, 11466), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (11445, 11466), True, 'from matplotlib import pyplot as plt\n'), ((11772, 11790), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11788, 11790), True, 'from matplotlib import pyplot as plt\n'), ((11795, 11819), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (11805, 11819), True, 'from matplotlib import pyplot as plt\n'), ((11824, 11853), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (11834, 11853), True, 'from matplotlib import pyplot as plt\n'), ((11858, 11868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11866, 11868), True, 'from matplotlib import pyplot as plt\n'), ((12851, 12867), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (12859, 12867), True, 'import numpy as np\n'), ((12881, 12897), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (12889, 12897), True, 'import numpy as np\n'), ((13327, 13342), 'mlbox.preprocessing.Reader', 'Reader', ([], {'sep': '""","""'}), "(sep=',')\n", (13333, 13342), False, 'from mlbox.preprocessing import Reader, Drift_thresholder\n'), ((13446, 13465), 'mlbox.preprocessing.Drift_thresholder', 'Drift_thresholder', ([], {}), '()\n', (13463, 13465), False, 'from mlbox.preprocessing import Reader, Drift_thresholder\n'), ((13554, 13595), 'mlbox.optimisation.Optimiser', 'Optimiser', ([], {'scoring': '"""accuracy"""', 'n_folds': '(10)'}), "(scoring='accuracy', n_folds=10)\n", (13563, 13595), False, 'from mlbox.optimisation import Optimiser\n'), ((14271, 14282), 'mlbox.prediction.Predictor', 'Predictor', ([], {}), '()\n', (14280, 14282), False, 'from mlbox.prediction import Predictor\n'), ((14482, 14501), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (14492, 14501), True, 'import numpy as np\n'), ((14516, 14535), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (14526, 14535), True, 'import numpy as np\n'), ((14549, 14567), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (14559, 14567), True, 'import numpy as np\n'), ((14581, 14599), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (14591, 14599), True, 'import numpy as np\n'), ((14971, 15045), 'hpsklearn.demo_support.PlotHelper', 'hpsklearn.demo_support.PlotHelper', (['estimator'], {'mintodate_ylim': '(-0.01, 0.05)'}), '(estimator, mintodate_ylim=(-0.01, 0.05))\n', (15004, 15045), False, 'import hpsklearn\n'), ((15277, 15287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15285, 15287), True, 'from matplotlib import pyplot as plt\n'), ((15995, 16059), 'tpot.TPOTClassifier', 'TPOTClassifier', ([], {'generations': '(100)', 'population_size': '(20)', 'verbosity': '(2)'}), '(generations=100, population_size=20, verbosity=2)\n', (16009, 16059), False, 'from tpot import TPOTClassifier\n'), ((5840, 5856), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (5849, 5856), True, 'import numpy as np\n'), ((5882, 5898), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (5891, 5898), True, 'import numpy as np\n'), ((15769, 15804), 'numpy.mean', 'np.mean', (['(test_predictions == y_test)'], {}), '(test_predictions == y_test)\n', (15776, 15804), True, 'import numpy as np\n'), ((6764, 6780), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (6772, 6780), True, 'import numpy as np\n'), ((9318, 9371), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['arr'], {'shape': 'new_shape', 'strides': 'new_strides'}), '(arr, shape=new_shape, strides=new_strides)\n', (9328, 9371), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((14761, 14773), 'hpsklearn.knn', 'knn', (['"""myknn"""'], {}), "('myknn')\n", (14764, 14773), False, 'from hpsklearn import HyperoptEstimator, random_forest, svc, knn\n'), ((16190, 16223), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'clusters_group'}), '(n_clusters=clusters_group)\n', (16196, 16223), False, 'from sklearn.cluster import KMeans\n'), ((16814, 16960), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'algorithm': '"""auto"""', 'leaf_size': '(30)', 'metric': '"""manhattan"""', 'metric_params': 'None', 'n_jobs': '(1)', 'n_neighbors': '(1)', 'p': '(1)', 'weights': '"""distance"""'}), "(algorithm='auto', leaf_size=30, metric='manhattan',\n metric_params=None, n_jobs=1, n_neighbors=1, p=1, weights='distance')\n", (16834, 16960), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((17074, 17440), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'bootstrap': '(False)', 'class_weight': 'None', 'criterion': '"""gini"""', 'max_depth': 'None', 'max_features': '"""log2"""', 'max_leaf_nodes': 'None', 'min_impurity_decrease': '(0.0)', 'min_impurity_split': 'None', 'min_samples_leaf': '(1)', 'min_samples_split': '(2)', 'min_weight_fraction_leaf': '(0.0)', 'n_estimators': '(1024)', 'n_jobs': '(1)', 'oob_score': '(False)', 'random_state': '(1)', 'verbose': '(False)', 'warm_start': '(False)'}), "(bootstrap=False, class_weight=None, criterion='gini',\n max_depth=None, max_features='log2', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=1024,\n n_jobs=1, oob_score=False, random_state=1, verbose=False, warm_start=False)\n", (17094, 17440), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((18845, 18886), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(1.0)', 'C': '(0.001)'}), "(kernel='rbf', gamma=1.0, C=0.001)\n", (18852, 18886), False, 'from sklearn import svm\n'), ((18923, 18935), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (18933, 18935), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3100, 3222), 'sklearn.model_selection.train_test_split', 'train_test_split', (['files_as_nested_list', 'classes_names_as_is_in_data'], {'test_size': '(0.9)', 'random_state': '(4564567)', 'shuffle': '(True)'}), '(files_as_nested_list, classes_names_as_is_in_data,\n test_size=0.9, random_state=4564567, shuffle=True)\n', (3116, 3222), False, 'from sklearn.model_selection import train_test_split\n'), ((9859, 9882), 'collections.Counter', 'Counter', (['predicted_list'], {}), '(predicted_list)\n', (9866, 9882), False, 'from collections import Counter\n'), ((10455, 10492), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'number_of_clusters'}), '(n_clusters=number_of_clusters)\n', (10461, 10492), False, 'from sklearn.cluster import KMeans\n'), ((4015, 4086), 'sklearn.model_selection.RepeatedKFold', 'model_selection.RepeatedKFold', ([], {'n_splits': '(5)', 'random_state': '(7)', 'n_repeats': '(10)'}), '(n_splits=5, random_state=7, n_repeats=10)\n', (4044, 4086), False, 'from sklearn import model_selection\n'), ((4258, 4355), 'sklearn.model_selection.cross_val_score', 'model_selection.cross_val_score', (['model', 'histograms_test', 'y_test'], {'cv': 'kfold', 'scoring': '"""accuracy"""'}), "(model, histograms_test, y_test, cv=kfold,\n scoring='accuracy')\n", (4289, 4355), False, 'from sklearn import model_selection\n')] |
from logs import logDecorator as lD
from lib.odeModels import simpleODE as sOde
import json
import numpy as np
from time import time
from datetime import datetime as dt
import matplotlib.pyplot as plt
from scipy import signal
config = json.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.odeSimple.odeSimple'
def dTanh(x):
return 1-(np.tanh(x))**2
def one(x):
return x
def sqr(x):
return x**2
def Dsqr(x):
return x*2
def compareJac():
np.set_printoptions(precision=1)
t = np.linspace(0, 100, 101)
# ---------------------------------------------------------
# These can be specific to a person or to the simulation
# as a whole ...
# ---------------------------------------------------------
Nnt = 3
Nl = 3
Atimesj = [( 5, 15, 3 ),
( 35, 50, 12.5 ),
( 75, 80, 7.6 ),]
Btimesj = [( 5, 15, 3 ),
( 35, 50, 12.5 ),
( 75, 80, 7.6 ),]
fj = [2 ,2 ,2 ]
rj = [0.5 ,0.5 ,0.5 ]
mj = [0.5 ,0.5 ,0.5 ]
stress_t = t.copy()
stress_v = np.random.rand(len(t))
stress_v = np.zeros(len(t))
model = sOde.simpleODE(Nnt, Nl, Atimesj, Btimesj, fj, rj, mj, stress_t, stress_v)
y0 = np.array([1.0,1,1,2,2,2])
NNwts = [ np.random.rand(12, 4),
np.random.rand( 3, 12),
np.random.rand( 1, 3) ]
NNb = [ 0, 1, -1 ]
NNact = [ np.tanh, np.tanh, np.tanh ]
NNactD = [ dTanh, dTanh, dTanh ] # Differentiation of tanh
# NNact = [ one, one, sqr ]
# NNactD = [ one, one, Dsqr ] # Differentiation of tanh
Taus = [1, 4, 12]
i = 0
delY0 = 1e-10
dy = model.dy(y0, 0, NNwts, NNb, NNact, NNactD, Taus)
jac = model.jac(y0, 0, NNwts, NNb, NNact, NNactD, Taus)
jacFD = []
for i in range(len(y0)):
y1 = y0.copy()
y1[i] = y1[i] + delY0
dy1 = model.dy(y1, 0, NNwts, NNb, NNact, NNactD, Taus)
jacFD.append((dy1 - dy)/delY0)
jacFD = np.array(jacFD)
print('------------[Finite difference]------------')
print(jacFD)
print('------------[Calculated]------------')
print(jac)
print('------------[Ratio]------------')
print(jac/jacFD)
return
@lD.log(logBase + '.doSomething')
def doSomething(logger, plotData=False):
'''print a line
This function simply prints a single line
Parameters
----------
logger : {[type]}
[description]
'''
print('We are in odeSimple')
t = np.linspace(0, 100, 101)
# ---------------------------------------------------------
# These can be specific to a person or to the simulation
# as a whole ...
# ---------------------------------------------------------
Nnt = 3
Nl = 3
Atimesj = [( 5, 15, 3 ),
( 35, 50, 35 ),
( 50, 60, 3 ),
( 60, 75, 300 ),
( 75, 80, 7.6 ),]
Btimesj = [( 5, 15, 70 ),
( 35, 50, 12.5 ),
( 75, 80, 7.6 ),]
fj = np.array([12 ,7 ,15 ])
rj = np.array([6 ,3 ,8 ])
mj = np.array([10 ,17 ,2 ])
stress_t = t.copy()
stress_v = signal.square(2 * np.pi * t / 20.0)*50
# stress_v = np.zeros(len(t))
model = sOde.simpleODE(Nnt, Nl, Atimesj, Btimesj, fj, rj, mj, stress_t, stress_v)
allTimes = []
allTimesJac = []
for i in range(2):
y0 = np.array([1,1,1,2,2,2])
NNwts = [ np.random.rand(12, 4),
np.random.rand( 3, 12),
np.random.rand( 1, 3) ]
NNb = [ 0, 1, -1 ]
NNact = [ np.tanh, np.tanh, np.tanh ]
NNactD = [ dTanh, dTanh, dTanh ] # Differentiation of tanh
Taus = [1, 4, 12]
args = (NNwts, NNb, NNact, NNactD, Taus)
tNew = np.linspace(5, 75, 10000)
startTime = time()
result, specs = model.solveY( y0, tNew, args, full_output=True )
tDelta = time() - startTime
allTimes.append( tDelta )
print('[No Jacobian] # steps {:6d}, fn eval {:6d}, jac eval {:6d} --> '.format(
specs['nst'][-1], specs['nfe'][-1], specs['nje'][-1]), end = '')
print(tDelta)
startTime = time()
result_1, specs_1 = model.solveY( y0, tNew, args, useJac=True, full_output=True )
tDelta = time() - startTime
if i > 0:
allTimesJac.append( tDelta )
print('[ Jacobian] # steps {:6d}, fn eval {:6d}, jac eval {:6d} --> '.format(
specs_1['nst'][-1], specs_1['nfe'][-1], specs_1['nje'][-1]), end = '')
print(tDelta)
error = np.mean(np.abs(result - result_1))
print('error = {}'.format(error))
if plotData:
now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
for i in range(result.shape[1]):
plt.figure()
plt.plot(tNew, result[:, i])
plt.plot(tNew, result_1[:, i])
plt.savefig('../results/img/simpleODE-{}_{:05}.png'.format(now, i))
plt.close('all')
allTimes = np.array(allTimes)
allTimesJac = np.array(allTimesJac)
print('[No Jac] Mean = {}, Std = {}'.format( allTimes.mean(), allTimes.std() ))
print('[ Jac] Mean = {}, Std = {}'.format( allTimesJac.mean(), allTimesJac.std() ))
return
@lD.log(logBase + '.main')
def main(logger):
'''main function for module1
This function finishes all the tasks for the
main function. This is a way in which a
particular module is going to be executed.
Parameters
----------
logger : {logging.Logger}
The logger function
'''
doSomething(plotData=False)
compareJac()
return
| [
"numpy.set_printoptions",
"numpy.abs",
"numpy.tanh",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"lib.odeModels.simpleODE.simpleODE",
"time.time",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"numpy.random.rand",
"scipy.signal.square",
"logs.logDecorator.log",
"dat... | [((2322, 2354), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.doSomething')"], {}), "(logBase + '.doSomething')\n", (2328, 2354), True, 'from logs import logDecorator as lD\n'), ((5465, 5490), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.main')"], {}), "(logBase + '.main')\n", (5471, 5490), True, 'from logs import logDecorator as lD\n'), ((506, 538), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(1)'}), '(precision=1)\n', (525, 538), True, 'import numpy as np\n'), ((548, 572), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(101)'], {}), '(0, 100, 101)\n', (559, 572), True, 'import numpy as np\n'), ((1232, 1305), 'lib.odeModels.simpleODE.simpleODE', 'sOde.simpleODE', (['Nnt', 'Nl', 'Atimesj', 'Btimesj', 'fj', 'rj', 'mj', 'stress_t', 'stress_v'], {}), '(Nnt, Nl, Atimesj, Btimesj, fj, rj, mj, stress_t, stress_v)\n', (1246, 1305), True, 'from lib.odeModels import simpleODE as sOde\n'), ((1318, 1348), 'numpy.array', 'np.array', (['[1.0, 1, 1, 2, 2, 2]'], {}), '([1.0, 1, 1, 2, 2, 2])\n', (1326, 1348), True, 'import numpy as np\n'), ((2086, 2101), 'numpy.array', 'np.array', (['jacFD'], {}), '(jacFD)\n', (2094, 2101), True, 'import numpy as np\n'), ((2596, 2620), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(101)'], {}), '(0, 100, 101)\n', (2607, 2620), True, 'import numpy as np\n'), ((3160, 3181), 'numpy.array', 'np.array', (['[12, 7, 15]'], {}), '([12, 7, 15])\n', (3168, 3181), True, 'import numpy as np\n'), ((3205, 3224), 'numpy.array', 'np.array', (['[6, 3, 8]'], {}), '([6, 3, 8])\n', (3213, 3224), True, 'import numpy as np\n'), ((3247, 3268), 'numpy.array', 'np.array', (['[10, 17, 2]'], {}), '([10, 17, 2])\n', (3255, 3268), True, 'import numpy as np\n'), ((3396, 3469), 'lib.odeModels.simpleODE.simpleODE', 'sOde.simpleODE', (['Nnt', 'Nl', 'Atimesj', 'Btimesj', 'fj', 'rj', 'mj', 'stress_t', 'stress_v'], {}), '(Nnt, Nl, Atimesj, Btimesj, fj, rj, mj, stress_t, stress_v)\n', (3410, 3469), True, 'from lib.odeModels import simpleODE as sOde\n'), ((5211, 5229), 'numpy.array', 'np.array', (['allTimes'], {}), '(allTimes)\n', (5219, 5229), True, 'import numpy as np\n'), ((5248, 5269), 'numpy.array', 'np.array', (['allTimesJac'], {}), '(allTimesJac)\n', (5256, 5269), True, 'import numpy as np\n'), ((1358, 1379), 'numpy.random.rand', 'np.random.rand', (['(12)', '(4)'], {}), '(12, 4)\n', (1372, 1379), True, 'import numpy as np\n'), ((1397, 1418), 'numpy.random.rand', 'np.random.rand', (['(3)', '(12)'], {}), '(3, 12)\n', (1411, 1418), True, 'import numpy as np\n'), ((1435, 1455), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (1449, 1455), True, 'import numpy as np\n'), ((3310, 3345), 'scipy.signal.square', 'signal.square', (['(2 * np.pi * t / 20.0)'], {}), '(2 * np.pi * t / 20.0)\n', (3323, 3345), False, 'from scipy import signal\n'), ((3552, 3580), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 2, 2]'], {}), '([1, 1, 1, 2, 2, 2])\n', (3560, 3580), True, 'import numpy as np\n'), ((3947, 3972), 'numpy.linspace', 'np.linspace', (['(5)', '(75)', '(10000)'], {}), '(5, 75, 10000)\n', (3958, 3972), True, 'import numpy as np\n'), ((3993, 3999), 'time.time', 'time', ([], {}), '()\n', (3997, 3999), False, 'from time import time\n'), ((4353, 4359), 'time.time', 'time', ([], {}), '()\n', (4357, 4359), False, 'from time import time\n'), ((383, 393), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (390, 393), True, 'import numpy as np\n'), ((3594, 3615), 'numpy.random.rand', 'np.random.rand', (['(12)', '(4)'], {}), '(12, 4)\n', (3608, 3615), True, 'import numpy as np\n'), ((3637, 3658), 'numpy.random.rand', 'np.random.rand', (['(3)', '(12)'], {}), '(3, 12)\n', (3651, 3658), True, 'import numpy as np\n'), ((3679, 3699), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (3693, 3699), True, 'import numpy as np\n'), ((4091, 4097), 'time.time', 'time', ([], {}), '()\n', (4095, 4097), False, 'from time import time\n'), ((4468, 4474), 'time.time', 'time', ([], {}), '()\n', (4472, 4474), False, 'from time import time\n'), ((4766, 4791), 'numpy.abs', 'np.abs', (['(result - result_1)'], {}), '(result - result_1)\n', (4772, 4791), True, 'import numpy as np\n'), ((5177, 5193), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5186, 5193), True, 'import matplotlib.pyplot as plt\n'), ((4976, 4988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4986, 4988), True, 'import matplotlib.pyplot as plt\n'), ((5005, 5033), 'matplotlib.pyplot.plot', 'plt.plot', (['tNew', 'result[:, i]'], {}), '(tNew, result[:, i])\n', (5013, 5033), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5080), 'matplotlib.pyplot.plot', 'plt.plot', (['tNew', 'result_1[:, i]'], {}), '(tNew, result_1[:, i])\n', (5058, 5080), True, 'import matplotlib.pyplot as plt\n'), ((4875, 4883), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (4881, 4883), True, 'from datetime import datetime as dt\n')] |
# stdlib
import argparse
from pathlib import Path
# 3p
from joblib import Parallel, delayed
from tqdm import tqdm
import numpy as np
import scipy.io as sio
from scipy.sparse import csr_matrix
from sklearn import neighbors
from sklearn.utils.graph import graph_shortest_path
import trimesh
import networkx as nx
# project
import utils.shot.shot as shot
from utils.io import read_mesh
from utils.laplace_decomposition import laplace_decomposition
# SHOT's hyperparameters
NORMAL_R = 0.1
SHOT_R = 0.1
KNN = 20
def compute_geodesic_matrix(verts, faces, NN):
# get adjacency matrix
mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False)
vertex_adjacency = mesh.vertex_adjacency_graph
vertex_adjacency_matrix = nx.adjacency_matrix(vertex_adjacency, range(verts.shape[0]))
# get adjacency distance matrix
graph_x_csr = neighbors.kneighbors_graph(verts, n_neighbors=NN, mode='distance', include_self=False)
distance_adj = csr_matrix((verts.shape[0], verts.shape[0])).tolil()
distance_adj[vertex_adjacency_matrix != 0] = graph_x_csr[vertex_adjacency_matrix != 0]
# compute geodesic matrix
geodesic_x = graph_shortest_path(distance_adj, directed=False)
return geodesic_x
def process_mesh(mesh, save_dir, args):
new_name = mesh.stem
verts, faces = read_mesh(mesh)
# center shape
verts -= np.mean(verts, axis=0)
# compute decomposition
evals, evecs, evecs_trans, old_sqrt_area = laplace_decomposition(verts, faces, args.num_eigen)
# normalize area and save
verts /= old_sqrt_area
# recompute decomposition and save eigenvalues
evals, evecs, evecs_trans, sqrt_area = laplace_decomposition(verts, faces, args.num_eigen)
print(f"shape {mesh.stem} ==> old sqrt area: {old_sqrt_area :.8f} | new sqrt area: {sqrt_area :.8f}")
to_save = {"pos": verts, "faces": faces,
"evals": evals.flatten(), "evecs": evecs, "evecs_trans": evecs_trans}
# compute geodesic matrix
if args.geo:
geodesic_x = compute_geodesic_matrix(verts, faces, args.nn)
to_save["geod_dist"] = geodesic_x
# compute shot descriptors
shot_features = shot.compute(verts, NORMAL_R, SHOT_R).reshape(-1, 352)
to_save["feat"] = shot_features
# save
sio.savemat(save_dir / f"{new_name}.mat", to_save)
def main(args):
save_root = Path(args.save_dir)
save_root.mkdir(parents=True, exist_ok=True)
meshes_root = Path(args.dataroot)
meshes = list(meshes_root.iterdir())
_ = Parallel(n_jobs=args.njobs)(delayed(process_mesh)(mesh, save_root, args)
for mesh in tqdm(meshes))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Preprocess data for FMNet training.
Compute Laplacian eigen decomposition, shot features, and geodesic distance for each shape."""
)
parser.add_argument('-d', '--dataroot', required=False,
default="../data/faust/raw", help='root directory of the dataset')
parser.add_argument('-sd', '--save-dir', required=False,
default="../data/faust/processed", help='root directory to save the processed dataset')
parser.add_argument("-ne", "--num-eigen", type=int, default=100, help="number of eigenvectors kept.")
parser.add_argument("-nj", "--njobs", type=int, default=-2, help="Number of parallel processes to use.")
parser.add_argument("--nn", type=int, default=20,
help="Number of Neighbor to consider when computing geodesic matrix.")
parser.add_argument("--geo", action='store_true', help="Compute geodesic distances.")
args = parser.parse_args()
main(args)
| [
"trimesh.Trimesh",
"tqdm.tqdm",
"sklearn.utils.graph.graph_shortest_path",
"argparse.ArgumentParser",
"scipy.io.savemat",
"utils.shot.shot.compute",
"joblib.Parallel",
"pathlib.Path",
"numpy.mean",
"scipy.sparse.csr_matrix",
"utils.laplace_decomposition.laplace_decomposition",
"utils.io.read_m... | [((595, 654), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'verts', 'faces': 'faces', 'process': '(False)'}), '(vertices=verts, faces=faces, process=False)\n', (610, 654), False, 'import trimesh\n'), ((851, 941), 'sklearn.neighbors.kneighbors_graph', 'neighbors.kneighbors_graph', (['verts'], {'n_neighbors': 'NN', 'mode': '"""distance"""', 'include_self': '(False)'}), "(verts, n_neighbors=NN, mode='distance',\n include_self=False)\n", (877, 941), False, 'from sklearn import neighbors\n'), ((1148, 1197), 'sklearn.utils.graph.graph_shortest_path', 'graph_shortest_path', (['distance_adj'], {'directed': '(False)'}), '(distance_adj, directed=False)\n', (1167, 1197), False, 'from sklearn.utils.graph import graph_shortest_path\n'), ((1307, 1322), 'utils.io.read_mesh', 'read_mesh', (['mesh'], {}), '(mesh)\n', (1316, 1322), False, 'from utils.io import read_mesh\n'), ((1355, 1377), 'numpy.mean', 'np.mean', (['verts'], {'axis': '(0)'}), '(verts, axis=0)\n', (1362, 1377), True, 'import numpy as np\n'), ((1454, 1505), 'utils.laplace_decomposition.laplace_decomposition', 'laplace_decomposition', (['verts', 'faces', 'args.num_eigen'], {}), '(verts, faces, args.num_eigen)\n', (1475, 1505), False, 'from utils.laplace_decomposition import laplace_decomposition\n'), ((1659, 1710), 'utils.laplace_decomposition.laplace_decomposition', 'laplace_decomposition', (['verts', 'faces', 'args.num_eigen'], {}), '(verts, faces, args.num_eigen)\n', (1680, 1710), False, 'from utils.laplace_decomposition import laplace_decomposition\n'), ((2265, 2315), 'scipy.io.savemat', 'sio.savemat', (["(save_dir / f'{new_name}.mat')", 'to_save'], {}), "(save_dir / f'{new_name}.mat', to_save)\n", (2276, 2315), True, 'import scipy.io as sio\n'), ((2350, 2369), 'pathlib.Path', 'Path', (['args.save_dir'], {}), '(args.save_dir)\n', (2354, 2369), False, 'from pathlib import Path\n'), ((2437, 2456), 'pathlib.Path', 'Path', (['args.dataroot'], {}), '(args.dataroot)\n', (2441, 2456), False, 'from pathlib import Path\n'), ((2684, 2887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess data for FMNet training.\n Compute Laplacian eigen decomposition, shot features, and geodesic distance for each shape."""'}), '(description=\n """Preprocess data for FMNet training.\n Compute Laplacian eigen decomposition, shot features, and geodesic distance for each shape."""\n )\n', (2707, 2887), False, 'import argparse\n'), ((2507, 2534), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'args.njobs'}), '(n_jobs=args.njobs)\n', (2515, 2534), False, 'from joblib import Parallel, delayed\n'), ((957, 1001), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(verts.shape[0], verts.shape[0])'], {}), '((verts.shape[0], verts.shape[0]))\n', (967, 1001), False, 'from scipy.sparse import csr_matrix\n'), ((2158, 2195), 'utils.shot.shot.compute', 'shot.compute', (['verts', 'NORMAL_R', 'SHOT_R'], {}), '(verts, NORMAL_R, SHOT_R)\n', (2170, 2195), True, 'import utils.shot.shot as shot\n'), ((2535, 2556), 'joblib.delayed', 'delayed', (['process_mesh'], {}), '(process_mesh)\n', (2542, 2556), False, 'from joblib import Parallel, delayed\n'), ((2628, 2640), 'tqdm.tqdm', 'tqdm', (['meshes'], {}), '(meshes)\n', (2632, 2640), False, 'from tqdm import tqdm\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
import numpy as np
from random import randint, shuffle, choice
# from random import random as rand
import random
import torch
from ncc.data.tools import data_utils
from ncc.data.ncc_dataset import NccDataset
from ncc.data import constants
from ncc.data.tools.truncate import truncate_seq
from ncc import LOGGER
import sys
def find_sep(src, sep_id):
sep_pos = []
for i, v in enumerate(src):
if sep_id == v:
sep_pos.append(i)
return sep_pos
# tokens are left-padding TODO:
def docs2tensor(docs, pad_idx):
doc_sep_pos = map(lambda x: x[1], docs)
max_nsent = max(map(len, doc_sep_pos))
srcs = map(lambda x: x[0], docs)
max_seqlen = max(map(len, srcs))
bsz = len(docs)
# print('max_nsent', max_nsent)
# print('max_seqlen', max_seqlen)
src_tokens = torch.LongTensor(bsz, max_seqlen).fill_(pad_idx)
doc_pad_mask = torch.ByteTensor(bsz, max_nsent).fill_(1)
src_sent_ends = torch.LongTensor(bsz, max_nsent).fill_(0) # assume default sentence ends (for padding) are 0s
for i in range(bsz):
src, sep_pos = docs[i]
src_tokens[i, 0:len(src)] = src
doc_pad_mask[i, 0:len(sep_pos)] = 0
src_sent_ends[i, 0:len(sep_pos)] = torch.LongTensor(sep_pos)
return src_tokens, doc_pad_mask, src_sent_ends
def create_src_tok_batch(samples, sep_id, eos_idx, pad_idx):
docs = []
for sample in samples:
src = torch.LongTensor(sample['src_tokens'])
if src[-1] != sep_id:
src_len = src.size(0)
new_src = src.new(src_len+1) # TODO: check the size.
new_src[0:src_len] = src
new_src[-1] = sep_id
src = new_src
sep_pos = find_sep(src, sep_id)
docs.append((src, sep_pos))
return docs2tensor(docs, pad_idx)
def collate(samples, src_dict, tgt_dict, left_pad_source=True, left_pad_target=False):
if len(samples) == 0:
return {}
# src_tokens = torch.LongTensor([s['src_tokens'] for s in samples])
src_tokens, doc_pad_mask, src_sent_ends = create_src_tok_batch(samples, src_dict.index(constants.S_SEP), src_dict.eos(), src_dict.pad())
doc_pos_tok = torch.LongTensor(doc_pad_mask.size()).fill_(src_dict.index(constants.S_SEP))
doc_pos_tok[doc_pad_mask] = src_dict.pad()
segment_labels = torch.LongTensor([s['segment_labels'] for s in samples])
# attention_mask_unilm = torch.stack([s['attention_mask_unilm'] for s in samples])
# mask_qkv = []
# for s in samples:
# if s['mask_qkv']:
# mask_qkv.append(s['mask_qkv'])
# else:
# mask_qkv.append(None)
# mask_qkv = torch.LongTensor([s['mask_qkv'] for s in samples])
masked_ids = torch.LongTensor([s['masked_ids'] for s in samples])
# masked_pos = torch.LongTensor([s['masked_pos'] for s in samples])
masked_weights = torch.LongTensor([s['masked_weights'] for s in samples])
example = {
'net_input': {
'src_tokens': src_tokens,
'src_sent_ends': src_sent_ends,
'doc_pad_mask': doc_pad_mask,
'doc_pos_tok': doc_pos_tok,
'segment_labels': segment_labels,
# 'attention_mask_unilm': attention_mask_unilm,
# 'masked_pos': masked_pos,
# 'mask_qkv': mask_qkv
},
'target': masked_ids,
'ntokens': masked_weights.sum().item(),
'nsentences': 2,
'sample_size': masked_ids.size(0),
}
return example
class HiRobertaMaskCodeDocstringPairDataset(NccDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
max_source_positions (int, optional): max number of tokens in the
source sentence (default: 1024).
max_target_positions (int, optional): max number of tokens in the
target sentence (default: 1024).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
align_dataset (torch.utils.data.Dataset, optional): dataset
containing alignments.
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
"""
def __init__(
self, src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
shuffle=True, input_feeding=True,
remove_eos_from_source=False, append_eos_to_target=False,
align_dataset=None,
append_bos=False, eos=None,
s2s_special_token=False,
pos_shift=False,
max_pred=50,
mask_source_words=False,
skipgram_prb=0.0,
skipgram_size=0.0,
max_len=512,
mask_prob=0.15,
num_qkv=0,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones((max_len, max_len), dtype=torch.long))
self.shuffle = shuffle
# self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
# self.align_dataset = align_dataset
# if self.align_dataset is not None:
# assert self.tgt_sizes is not None, "Both source and target needed when alignments are provided"
self.append_bos = append_bos
# self.eos = (eos if eos is not None else src_dict.eos())
self.s2s_special_token = s2s_special_token
self.pos_shift = pos_shift
self.max_pred = max_pred
self.mask_source_words = mask_source_words
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_prob = mask_prob # masking probability
self.num_qkv = num_qkv
def __getitem__(self, index):
# => tensor([1 3 54 654]), self.src.lines[index]=>str('▁Returns ▁a ▁hash ▁in ▁the ...')
src_item = self.src[index]
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item, tgt_item, _, _ = truncate_seq(src_item, tgt_item, self.max_len - 3,
self.max_source_positions, self.max_target_positions)
# TODO: below operators should be considered into truncate_seq, as src/tgt seq has been truncated already
# Append EOS to end of tgt sentence if it does not have an EOS
# and remove EOS from end of src sentence if it exists.
# This is useful when we use existing datasets for opposite directions
# i.e., when we want to use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][-1] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
if self.pos_shift:
tgt_item = torch.cat([torch.LongTensor(self.tgt_dict.index(constants.S2S_BOS)), tgt_item])
# Add Special Tokens
# if self.s2s_special_token:
# item = ['[S2S_CLS]'] + src_item + \
# ['[S2S_SEP]'] + tgt_item + ['[SEP]']
# else:
# item = ['[CLS]'] + src_item + ['[SEP]'] + tgt_item + ['[SEP]']
if self.s2s_special_token:
item = torch.cat([src_item, torch.LongTensor([self.src_dict.index(constants.S2S_SEP)]), tgt_item,
torch.LongTensor([self.src_dict.index(constants.SEP)])])
else:
# <CLS> + S1 + <SEP> + S2 + <SEP>
item = torch.cat([
torch.LongTensor([self.src_dict.index(constants.CLS)]),
src_item,
torch.LongTensor([self.src_dict.index(constants.S_SEP)]),
tgt_item,
torch.LongTensor([self.src_dict.index(constants.S_SEP)]),
])
# TODO: assign segment ids to each code statement
segment_ids = [4] * (len(src_item) + 2) + [5] * (len(tgt_item) + 1)
if self.pos_shift: # pos_shift is set to True only when fine-tuning
n_pred = min(self.max_pred, len(tgt_item))
masked_pos = [len(src_item) + 1 + i for i in range(len(tgt_item))]
masked_weights = [1] * n_pred
masked_ids = tgt_item.tolist()[1:] + [self.src_dict.index(constants.SEP)]
else:
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = len(tgt_item)
if self.mask_source_words:
effective_length += len(src_item)
n_pred = min(self.max_pred, max(1, int(round(effective_length * self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(item.tolist()):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
if (i >= len(src_item) + 2) and (tk != self.tgt_dict.index(constants.S2S_BOS)):
cand_pos.append(i)
elif self.mask_source_words and (i < len(src_item) + 1) and (tk != self.src_dict.index(constants.CLS)) \
and (not self.src_dict.symbols[tk].startswith('<SEP')):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = cand_pos[:n_pred]
masked_ids = [item.tolist()[pos] for pos in masked_pos]
for pos in masked_pos:
if random.random() < 0.8: # 80%
item[pos] = self.src_dict.index(constants.T_MASK) # '[MASK]' #
elif random.random() < 0.5: # 10%
# get random word
item[pos] = randint(0, len(self.src_dict) - 1)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1] * len(masked_ids)
# Token Indexing: the item has converted into ids
# input_ids = self.indexer(tokens)
input_ids = item.tolist()
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([self.tgt_dict.pad_index] * n_pad)
segment_ids.extend([self.tgt_dict.pad_index] * n_pad)
# if self.num_qkv > 1:
# mask_qkv = [0] * (len(src_item) + 1) + [1] * (len(tgt_item) + 1)
# mask_qkv.extend([0] * n_pad)
# else:
# mask_qkv = None
# sys.exit()
# input_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(src_item) + 2].fill_(1)
# second_st, second_end = len(src_item) + 2, len(src_item) + len(tgt_item) + 3
# input_mask[second_st:second_end, second_st:second_end]. \
# copy_(self._tril_matrix[:second_end - second_st, :second_end - second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([self.src_dict.pad_index] * n_pad)
if masked_pos is not None:
masked_pos.extend([0] * n_pad)
if masked_weights is not None:
masked_weights.extend([0] * n_pad)
example = {
'src_tokens': input_ids, # list
'segment_labels': segment_ids, # list
# 'attention_mask_unilm': input_mask, # LongTensor
# 'mask_qkv': mask_qkv, # list
'masked_ids': masked_ids, # list
# 'masked_pos': masked_pos, # list
'masked_weights': masked_weights, # list
}
return example
def __len__(self):
return len(self.src)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
"""
# return collate(
# samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos,
# left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
# input_feeding=self.input_feeding,
# )
return collate(
samples, src_dict=self.src_dict, tgt_dict=self.tgt_dict,
left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
# input_feeding=self.input_feeding,
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
# return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
return self.src_sizes[index] + self.tgt_sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]
return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]
@property
def supports_prefetch(self):
return (
getattr(self.src, 'supports_prefetch', False)
and (getattr(self.tgt, 'supports_prefetch', False) or self.tgt is None)
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
| [
"torch.ones",
"torch.LongTensor",
"torch.ByteTensor",
"random.shuffle",
"numpy.argsort",
"random.random",
"numpy.array",
"ncc.data.tools.truncate.truncate_seq"
] | [((2536, 2592), 'torch.LongTensor', 'torch.LongTensor', (["[s['segment_labels'] for s in samples]"], {}), "([s['segment_labels'] for s in samples])\n", (2552, 2592), False, 'import torch\n'), ((2934, 2986), 'torch.LongTensor', 'torch.LongTensor', (["[s['masked_ids'] for s in samples]"], {}), "([s['masked_ids'] for s in samples])\n", (2950, 2986), False, 'import torch\n'), ((3080, 3136), 'torch.LongTensor', 'torch.LongTensor', (["[s['masked_weights'] for s in samples]"], {}), "([s['masked_weights'] for s in samples])\n", (3096, 3136), False, 'import torch\n'), ((1449, 1474), 'torch.LongTensor', 'torch.LongTensor', (['sep_pos'], {}), '(sep_pos)\n', (1465, 1474), False, 'import torch\n'), ((1645, 1683), 'torch.LongTensor', 'torch.LongTensor', (["sample['src_tokens']"], {}), "(sample['src_tokens'])\n", (1661, 1683), False, 'import torch\n'), ((6375, 6394), 'numpy.array', 'np.array', (['src_sizes'], {}), '(src_sizes)\n', (6383, 6394), True, 'import numpy as np\n'), ((7981, 8090), 'ncc.data.tools.truncate.truncate_seq', 'truncate_seq', (['src_item', 'tgt_item', '(self.max_len - 3)', 'self.max_source_positions', 'self.max_target_positions'], {}), '(src_item, tgt_item, self.max_len - 3, self.\n max_source_positions, self.max_target_positions)\n', (7993, 8090), False, 'from ncc.data.tools.truncate import truncate_seq\n'), ((1040, 1073), 'torch.LongTensor', 'torch.LongTensor', (['bsz', 'max_seqlen'], {}), '(bsz, max_seqlen)\n', (1056, 1073), False, 'import torch\n'), ((1108, 1140), 'torch.ByteTensor', 'torch.ByteTensor', (['bsz', 'max_nsent'], {}), '(bsz, max_nsent)\n', (1124, 1140), False, 'import torch\n'), ((1170, 1202), 'torch.LongTensor', 'torch.LongTensor', (['bsz', 'max_nsent'], {}), '(bsz, max_nsent)\n', (1186, 1202), False, 'import torch\n'), ((6420, 6439), 'numpy.array', 'np.array', (['tgt_sizes'], {}), '(tgt_sizes)\n', (6428, 6439), True, 'import numpy as np\n'), ((6819, 6867), 'torch.ones', 'torch.ones', (['(max_len, max_len)'], {'dtype': 'torch.long'}), '((max_len, max_len), dtype=torch.long)\n', (6829, 6867), False, 'import torch\n'), ((11959, 11976), 'random.shuffle', 'shuffle', (['cand_pos'], {}), '(cand_pos)\n', (11966, 11976), False, 'from random import randint, shuffle, choice\n'), ((17322, 17375), 'numpy.argsort', 'np.argsort', (['self.src_sizes[indices]'], {'kind': '"""mergesort"""'}), "(self.src_sizes[indices], kind='mergesort')\n", (17332, 17375), True, 'import numpy as np\n'), ((17244, 17297), 'numpy.argsort', 'np.argsort', (['self.tgt_sizes[indices]'], {'kind': '"""mergesort"""'}), "(self.tgt_sizes[indices], kind='mergesort')\n", (17254, 17297), True, 'import numpy as np\n'), ((12143, 12158), 'random.random', 'random.random', ([], {}), '()\n', (12156, 12158), False, 'import random\n'), ((8771, 8794), 'torch.LongTensor', 'torch.LongTensor', (['[eos]'], {}), '([eos])\n', (8787, 8794), False, 'import torch\n'), ((8999, 9022), 'torch.LongTensor', 'torch.LongTensor', (['[bos]'], {}), '([bos])\n', (9015, 9022), False, 'import torch\n'), ((9162, 9185), 'torch.LongTensor', 'torch.LongTensor', (['[bos]'], {}), '([bos])\n', (9178, 9185), False, 'import torch\n'), ((12281, 12296), 'random.random', 'random.random', ([], {}), '()\n', (12294, 12296), False, 'import random\n')] |
"""
Задача 4:
Найти в массиве те элементы, значение которых меньше среднего арифметического, взятого от
всех элементов массива.
"""
from random import randint
import numpy as np
def main():
try:
n = int(input("Введите кол-во строк в матрице -> "))
m = int(input("Введите кол-во столбцов в матрице -> "))
except ValueError:
print("Некорректный ввод данных!")
return
# Генерация данных
matrix = np.matrix([[randint(-100, 100) for x in range(m)] for y in range(n)])
print("Сгенерированная матрица:\n", matrix)
mean = np.mean(matrix)
print("Среднее арифметическое: {}".format(mean))
for e in np.nditer(matrix):
if e < mean:
print("Элемент {} меньше сред. арифметического".format(e))
if __name__ == "__main__":
main()
| [
"numpy.nditer",
"numpy.mean",
"random.randint"
] | [((575, 590), 'numpy.mean', 'np.mean', (['matrix'], {}), '(matrix)\n', (582, 590), True, 'import numpy as np\n'), ((657, 674), 'numpy.nditer', 'np.nditer', (['matrix'], {}), '(matrix)\n', (666, 674), True, 'import numpy as np\n'), ((458, 476), 'random.randint', 'randint', (['(-100)', '(100)'], {}), '(-100, 100)\n', (465, 476), False, 'from random import randint\n')] |
import numpy as np
from numbers import Number
from pytest import raises, warns
from hypothesis import given, strategies, unlimited
from hypothesis import settings as hyp_settings
from hypothesis import HealthCheck
from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, \
GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel
from kernelmethods.utils import check_callable
from kernelmethods.base import KernelMatrix, KernelFromCallable, BaseKernelFunction
from kernelmethods.operations import is_positive_semidefinite
default_feature_dim = 10
range_feature_dim = [10, 50]
range_num_samples = [50, 100]
range_polynomial_degree = [2, 10] # degree=1 is tested in LinearKernel()
np.random.seed(42)
# choosing skip_input_checks=False will speed up test runs
# default values for parameters
num_tests_psd_kernel = 3
def gen_random_array(dim):
"""To better control precision and type of floats"""
# TODO input sparse arrays for test
return np.random.rand(dim)
def gen_random_sample(num_samples, sample_dim):
"""To better control precision and type of floats"""
# TODO input sparse arrays for test
return np.random.rand(num_samples, sample_dim)
def _test_for_all_kernels(kernel, sample_dim, check_PSDness=True):
"""Common tests that all kernels must pass."""
x = gen_random_array(sample_dim)
y = gen_random_array(sample_dim)
try:
result = kernel(x, y)
except Exception:
raise RuntimeError('{} unable to calculate!\n'
' on x {}\n y{}'.format(kernel, x, y))
if not isinstance(result, Number):
raise ValueError('result {} of type {} is not a number!\n'
'x={}\ny={}\nkernel={}\n'
''.format(result, type(result), x, y, kernel))
if kernel(y, x) != result:
raise ValueError('{} is not symmetric!'
'x={}\n y={}\n kernel={}\n'
''.format(kernel.name, x, y, kernel))
if check_PSDness:
# ensuring it produces a PSD KM
kernel.is_psd()
def test_kernel_design():
"""
Every kernel must be
1. must have a name defined
2. must be callable with two samples
3. returns a number
"""
for kernel in DEFINED_KERNEL_FUNCS:
# must be callable with 2 args
check_callable(kernel, min_num_args=2)
if not hasattr(kernel, 'name'):
raise TypeError('{} does not have name attribute!'.format(kernel))
# only numeric data is accepted and other dtypes must raise an error
for non_num in ['string',
[object, object] ]:
with raises(ValueError):
_ = kernel(non_num, non_num)
def _test_func_is_valid_kernel(kernel, sample_dim, num_samples):
"""A func is a valid kernel if the kernel matrix generated by it is PSD.
Not including this in tests for all kernels to allow for non-PSD kernels in the future
"""
KM = KernelMatrix(kernel, name='TestKM')
KM.attach_to(gen_random_sample(num_samples, sample_dim))
is_psd = is_positive_semidefinite(KM.full, verbose=True)
if not is_psd:
raise ValueError('{} is not PSD'.format(str(KM)))
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
timeout=unlimited, suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.integers(range_polynomial_degree[0], range_polynomial_degree[1]),
strategies.floats(min_value=0, max_value=1e3,
allow_nan=False, allow_infinity=False))
def test_polynomial_kernel(sample_dim, num_samples,
poly_degree, poly_intercept):
"""Tests specific for Polynomial kernel."""
poly = PolyKernel(degree=poly_degree, b=poly_intercept, skip_input_checks=False)
_test_for_all_kernels(poly, sample_dim)
_test_func_is_valid_kernel(poly, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
timeout=unlimited, suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_gaussian_kernel(sample_dim, num_samples, sigma):
"""Tests specific for Gaussian kernel."""
gaussian = GaussianKernel(sigma=sigma, skip_input_checks=False)
_test_for_all_kernels(gaussian, sample_dim)
_test_func_is_valid_kernel(gaussian, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
timeout=unlimited, suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]))
def test_linear_kernel(sample_dim, num_samples):
"""Tests specific for Linear kernel."""
linear = LinearKernel(skip_input_checks=False)
_test_for_all_kernels(linear, sample_dim)
_test_func_is_valid_kernel(linear, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
timeout=unlimited, suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_laplacian_kernel(sample_dim, num_samples, gamma):
"""Tests specific for Laplacian kernel."""
laplacian = LaplacianKernel(gamma=gamma, skip_input_checks=False)
_test_for_all_kernels(laplacian, sample_dim)
_test_func_is_valid_kernel(laplacian, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
timeout=unlimited, suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False)
)
def test_sigmoid_kernel(sample_dim, num_samples, gamma, offset):
"""Tests specific for sigmoid kernel."""
sigmoid = SigmoidKernel(gamma=gamma, offset=offset, skip_input_checks=False)
# sigmoid is not always PSD
_test_for_all_kernels(sigmoid, sample_dim, check_PSDness=False)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
timeout=unlimited, suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_chi2_kernel(sample_dim, num_samples, gamma):
"""Tests specific for Laplacian kernel."""
chi2 = Chi2Kernel(gamma=gamma, skip_input_checks=False)
_test_for_all_kernels(chi2, sample_dim)
_test_func_is_valid_kernel(chi2, sample_dim, num_samples)
| [
"kernelmethods.base.KernelMatrix",
"numpy.random.seed",
"kernelmethods.utils.check_callable",
"kernelmethods.operations.is_positive_semidefinite",
"kernelmethods.numeric_kernels.LaplacianKernel",
"kernelmethods.numeric_kernels.SigmoidKernel",
"kernelmethods.numeric_kernels.LinearKernel",
"pytest.raise... | [((723, 741), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (737, 741), True, 'import numpy as np\n'), ((997, 1016), 'numpy.random.rand', 'np.random.rand', (['dim'], {}), '(dim)\n', (1011, 1016), True, 'import numpy as np\n'), ((1175, 1214), 'numpy.random.rand', 'np.random.rand', (['num_samples', 'sample_dim'], {}), '(num_samples, sample_dim)\n', (1189, 1214), True, 'import numpy as np\n'), ((3014, 3049), 'kernelmethods.base.KernelMatrix', 'KernelMatrix', (['kernel'], {'name': '"""TestKM"""'}), "(kernel, name='TestKM')\n", (3026, 3049), False, 'from kernelmethods.base import KernelMatrix, KernelFromCallable, BaseKernelFunction\n'), ((3124, 3171), 'kernelmethods.operations.is_positive_semidefinite', 'is_positive_semidefinite', (['KM.full'], {'verbose': '(True)'}), '(KM.full, verbose=True)\n', (3148, 3171), False, 'from kernelmethods.operations import is_positive_semidefinite\n'), ((3904, 3977), 'kernelmethods.numeric_kernels.PolyKernel', 'PolyKernel', ([], {'degree': 'poly_degree', 'b': 'poly_intercept', 'skip_input_checks': '(False)'}), '(degree=poly_degree, b=poly_intercept, skip_input_checks=False)\n', (3914, 3977), False, 'from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel\n'), ((3396, 3459), 'hypothesis.strategies.integers', 'strategies.integers', (['range_feature_dim[0]', 'range_feature_dim[1]'], {}), '(range_feature_dim[0], range_feature_dim[1])\n', (3415, 3459), False, 'from hypothesis import given, strategies, unlimited\n'), ((3468, 3531), 'hypothesis.strategies.integers', 'strategies.integers', (['range_num_samples[0]', 'range_num_samples[1]'], {}), '(range_num_samples[0], range_num_samples[1])\n', (3487, 3531), False, 'from hypothesis import given, strategies, unlimited\n'), ((3540, 3615), 'hypothesis.strategies.integers', 'strategies.integers', (['range_polynomial_degree[0]', 'range_polynomial_degree[1]'], {}), '(range_polynomial_degree[0], range_polynomial_degree[1])\n', (3559, 3615), False, 'from hypothesis import given, strategies, unlimited\n'), ((3624, 3715), 'hypothesis.strategies.floats', 'strategies.floats', ([], {'min_value': '(0)', 'max_value': '(1000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0, max_value=1000.0, allow_nan=False,\n allow_infinity=False)\n', (3641, 3715), False, 'from hypothesis import given, strategies, unlimited\n'), ((4606, 4658), 'kernelmethods.numeric_kernels.GaussianKernel', 'GaussianKernel', ([], {'sigma': 'sigma', 'skip_input_checks': '(False)'}), '(sigma=sigma, skip_input_checks=False)\n', (4620, 4658), False, 'from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel\n'), ((4231, 4294), 'hypothesis.strategies.integers', 'strategies.integers', (['range_feature_dim[0]', 'range_feature_dim[1]'], {}), '(range_feature_dim[0], range_feature_dim[1])\n', (4250, 4294), False, 'from hypothesis import given, strategies, unlimited\n'), ((4303, 4366), 'hypothesis.strategies.integers', 'strategies.integers', (['range_num_samples[0]', 'range_num_samples[1]'], {}), '(range_num_samples[0], range_num_samples[1])\n', (4322, 4366), False, 'from hypothesis import given, strategies, unlimited\n'), ((4375, 4469), 'hypothesis.strategies.floats', 'strategies.floats', ([], {'min_value': '(0)', 'max_value': '(1000000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0, max_value=1000000.0, allow_nan=False,\n allow_infinity=False)\n', (4392, 4469), False, 'from hypothesis import given, strategies, unlimited\n'), ((5163, 5200), 'kernelmethods.numeric_kernels.LinearKernel', 'LinearKernel', ([], {'skip_input_checks': '(False)'}), '(skip_input_checks=False)\n', (5175, 5200), False, 'from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel\n'), ((4919, 4982), 'hypothesis.strategies.integers', 'strategies.integers', (['range_feature_dim[0]', 'range_feature_dim[1]'], {}), '(range_feature_dim[0], range_feature_dim[1])\n', (4938, 4982), False, 'from hypothesis import given, strategies, unlimited\n'), ((4991, 5054), 'hypothesis.strategies.integers', 'strategies.integers', (['range_num_samples[0]', 'range_num_samples[1]'], {}), '(range_num_samples[0], range_num_samples[1])\n', (5010, 5054), False, 'from hypothesis import given, strategies, unlimited\n'), ((5836, 5889), 'kernelmethods.numeric_kernels.LaplacianKernel', 'LaplacianKernel', ([], {'gamma': 'gamma', 'skip_input_checks': '(False)'}), '(gamma=gamma, skip_input_checks=False)\n', (5851, 5889), False, 'from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel\n'), ((5458, 5521), 'hypothesis.strategies.integers', 'strategies.integers', (['range_feature_dim[0]', 'range_feature_dim[1]'], {}), '(range_feature_dim[0], range_feature_dim[1])\n', (5477, 5521), False, 'from hypothesis import given, strategies, unlimited\n'), ((5530, 5593), 'hypothesis.strategies.integers', 'strategies.integers', (['range_num_samples[0]', 'range_num_samples[1]'], {}), '(range_num_samples[0], range_num_samples[1])\n', (5549, 5593), False, 'from hypothesis import given, strategies, unlimited\n'), ((5602, 5696), 'hypothesis.strategies.floats', 'strategies.floats', ([], {'min_value': '(0)', 'max_value': '(1000000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0, max_value=1000000.0, allow_nan=False,\n allow_infinity=False)\n', (5619, 5696), False, 'from hypothesis import given, strategies, unlimited\n'), ((6659, 6725), 'kernelmethods.numeric_kernels.SigmoidKernel', 'SigmoidKernel', ([], {'gamma': 'gamma', 'offset': 'offset', 'skip_input_checks': '(False)'}), '(gamma=gamma, offset=offset, skip_input_checks=False)\n', (6672, 6725), False, 'from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel\n'), ((6153, 6216), 'hypothesis.strategies.integers', 'strategies.integers', (['range_feature_dim[0]', 'range_feature_dim[1]'], {}), '(range_feature_dim[0], range_feature_dim[1])\n', (6172, 6216), False, 'from hypothesis import given, strategies, unlimited\n'), ((6225, 6288), 'hypothesis.strategies.integers', 'strategies.integers', (['range_num_samples[0]', 'range_num_samples[1]'], {}), '(range_num_samples[0], range_num_samples[1])\n', (6244, 6288), False, 'from hypothesis import given, strategies, unlimited\n'), ((6297, 6391), 'hypothesis.strategies.floats', 'strategies.floats', ([], {'min_value': '(0)', 'max_value': '(1000000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0, max_value=1000000.0, allow_nan=False,\n allow_infinity=False)\n', (6314, 6391), False, 'from hypothesis import given, strategies, unlimited\n'), ((6415, 6509), 'hypothesis.strategies.floats', 'strategies.floats', ([], {'min_value': '(0)', 'max_value': '(1000000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0, max_value=1000000.0, allow_nan=False,\n allow_infinity=False)\n', (6432, 6509), False, 'from hypothesis import given, strategies, unlimited\n'), ((7341, 7389), 'kernelmethods.numeric_kernels.Chi2Kernel', 'Chi2Kernel', ([], {'gamma': 'gamma', 'skip_input_checks': '(False)'}), '(gamma=gamma, skip_input_checks=False)\n', (7351, 7389), False, 'from kernelmethods.numeric_kernels import DEFINED_KERNEL_FUNCS, PolyKernel, GaussianKernel, LinearKernel, LaplacianKernel, SigmoidKernel, Chi2Kernel\n'), ((6973, 7036), 'hypothesis.strategies.integers', 'strategies.integers', (['range_feature_dim[0]', 'range_feature_dim[1]'], {}), '(range_feature_dim[0], range_feature_dim[1])\n', (6992, 7036), False, 'from hypothesis import given, strategies, unlimited\n'), ((7045, 7108), 'hypothesis.strategies.integers', 'strategies.integers', (['range_num_samples[0]', 'range_num_samples[1]'], {}), '(range_num_samples[0], range_num_samples[1])\n', (7064, 7108), False, 'from hypothesis import given, strategies, unlimited\n'), ((7117, 7211), 'hypothesis.strategies.floats', 'strategies.floats', ([], {'min_value': '(0)', 'max_value': '(1000000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0, max_value=1000000.0, allow_nan=False,\n allow_infinity=False)\n', (7134, 7211), False, 'from hypothesis import given, strategies, unlimited\n'), ((2362, 2400), 'kernelmethods.utils.check_callable', 'check_callable', (['kernel'], {'min_num_args': '(2)'}), '(kernel, min_num_args=2)\n', (2376, 2400), False, 'from kernelmethods.utils import check_callable\n'), ((3370, 3387), 'hypothesis.HealthCheck.all', 'HealthCheck.all', ([], {}), '()\n', (3385, 3387), False, 'from hypothesis import HealthCheck\n'), ((4205, 4222), 'hypothesis.HealthCheck.all', 'HealthCheck.all', ([], {}), '()\n', (4220, 4222), False, 'from hypothesis import HealthCheck\n'), ((4893, 4910), 'hypothesis.HealthCheck.all', 'HealthCheck.all', ([], {}), '()\n', (4908, 4910), False, 'from hypothesis import HealthCheck\n'), ((5432, 5449), 'hypothesis.HealthCheck.all', 'HealthCheck.all', ([], {}), '()\n', (5447, 5449), False, 'from hypothesis import HealthCheck\n'), ((6127, 6144), 'hypothesis.HealthCheck.all', 'HealthCheck.all', ([], {}), '()\n', (6142, 6144), False, 'from hypothesis import HealthCheck\n'), ((6947, 6964), 'hypothesis.HealthCheck.all', 'HealthCheck.all', ([], {}), '()\n', (6962, 6964), False, 'from hypothesis import HealthCheck\n'), ((2694, 2712), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2700, 2712), False, 'from pytest import raises, warns\n')] |
"""
Date: Oct 2018
Author: <NAME>
This is a script that outputs a list of names of samples for training development and test sets.
This list is then used to create batches for training validation and testing.
The speakers / session names in each subset have been hard-coded.
The sample names are listed and in the case of training data are randomised.
"""
import os
import sys
import random
import pandas as pd
import numpy as np
from ultrasync.create_experiment_data_utils import get_sync_file_names, split_name
random.seed(2018)
np.random.seed(2018)
def split_val_and_test(df):
half = len(df)//2
df1 = df.iloc[:half, :]
df2 = df.iloc[half:, :]
return df1, df2
def get_train_val_test_splits(df_info):
# upx
# the training data excludes speakers 01F and 15M and session BL3
df_train_1 = df_info[(df_info.dataset == "upx") &
(df_info.speaker != '01F') & (df_info.speaker != '15M') &
(df_info.session != 'BL3')]
# there are two validation sets: one with all the data of one held out speaker (01F)
# and the other with a held out session from first half of the remaining speakers (excluding 01F, of course)
# ~ 11% of upx
df_val_1 = df_info[(df_info.dataset == "upx") & (df_info.speaker == '01F')] # 6% of the data
df_val_2 = df_info[(df_info.dataset == "upx") & (df_info.session == 'BL3') &
df_info.speaker.isin(["02F", "03F", "04M", "05M", "06M", "07M", "08M", "09M", "10M"])] # 5%
# there are two test sets: one with all the data of one held out speaker (15M)
# and the other with a held out session from second half of the remaining speakers (excluding 15M, of course)
# ~ 9% of upx
df_test_1 = df_info[(df_info.dataset == "upx") & (df_info.speaker == '15M')] # 5% of the data
df_test_2 = df_info[(df_info.dataset == "upx") & (df_info.session == 'BL3') &
df_info.speaker.isin(["11M", "12M", "13M", "14M", "16M", "17M", "18F", "19M", "20M"])] # 4%
# uxssd
# the training data excludes speakers 01M and 07F and session Mid
df_train_2 = df_info[(df_info.dataset == "uxssd") &
(df_info.speaker != '01M') & (df_info.speaker != '07F') &
(df_info.session != 'Mid')]
# there are two validation sets: one with all the data of one held out speaker (01M)
# and the other with a held out session from first half of the remaining speakers (excluding 01M, of course)
# 11%
df_val_3 = df_info[(df_info.dataset == "uxssd") & (df_info.speaker == '01M')]
df_val_4 = df_info[(df_info.dataset == "uxssd") & (df_info.session == 'Mid') &
df_info.speaker.isin(['02M', '03F', '04M'])]
# there are two test sets: one with all the data of one held out speaker (07F)
# and the other with a held out session from second half of the remaining speakers (excluding 15M, of course)
# 11%
df_test_3 = df_info[(df_info.dataset == "uxssd") & (df_info.speaker == '07F')]
df_test_4 = df_info[(df_info.dataset == "uxssd") & (df_info.session == 'Mid') &
df_info.speaker.isin(['05M', '06M', '08M'])]
# uxtd
# we hold out some speakers which make up 20% of the data
uxtd_val_speakers = ["07F", "08M", "12M", "13F", "26F"] # val speakers 10% of uxtd
uxtd_test_speakers = ["30F", "38M", "43F", "45M", "47M", "52F", "53F", "55M"] # test speakers 10% of uxtd
# 80 %
df_train_3 = df_info[(df_info.dataset == "uxtd") &
(~df_info.speaker.isin(uxtd_val_speakers)) &
(~df_info.speaker.isin(uxtd_test_speakers))]
# 10%
df_val_5 = df_info[(df_info.dataset == "uxtd") & (df_info.speaker.isin(uxtd_val_speakers))]
# 10%
df_test_5 = df_info[(df_info.dataset == "uxtd") & (df_info.speaker.isin(uxtd_test_speakers))]
splits = [df_train_1, df_train_2, df_train_3,
df_val_1, df_val_2, df_val_3, df_val_4, df_val_5,
df_test_1, df_test_2, df_test_3, df_test_4, df_test_5]
return splits
def main():
path = sys.argv[1] # '/disk/scratch_big/../SyncDataSmall../'
dest_path = sys.argv[2] # '/disk/scratch_big/../experiments/sync_10/'
files = get_sync_file_names(path)
df_info = pd.DataFrame(data=[split_name(i) for i in files])
col_order = ['filename', 'dataset', 'speaker', 'session', 'utterance', 'chunk']
df_info = df_info[col_order]
df_info = df_info.sort_values(by="filename")
docs = os.path.join(dest_path, 'docs')
if not os.path.exists(docs):
os.makedirs(docs)
pd.DataFrame.to_csv(df_info, os.path.join(docs, 'file_names.csv'), index=False)
[df_train_1, df_train_2, df_train_3,
df_val_1, df_val_2, df_val_3, df_val_4, df_val_5,
df_test_1, df_test_2, df_test_3, df_test_4, df_test_5] = get_train_val_test_splits(df_info)
# save the splits
pd.DataFrame.to_csv(df_train_1, os.path.join(docs, "df_train_1.csv"), index=False)
pd.DataFrame.to_csv(df_train_2, os.path.join(docs, "df_train_2.csv"), index=False)
pd.DataFrame.to_csv(df_train_3, os.path.join(docs, "df_train_3.csv"), index=False)
pd.DataFrame.to_csv(df_val_1, os.path.join(docs, "df_val_1.csv"), index=False)
pd.DataFrame.to_csv(df_val_2, os.path.join(docs, "df_val_2.csv"), index=False)
pd.DataFrame.to_csv(df_val_3, os.path.join(docs, "df_val_3.csv"), index=False)
pd.DataFrame.to_csv(df_val_4, os.path.join(docs, "df_val_4.csv"), index=False)
pd.DataFrame.to_csv(df_val_5, os.path.join(docs, "df_val_5.csv"), index=False)
pd.DataFrame.to_csv(df_test_1, os.path.join(docs, "df_test_1.csv"), index=False)
pd.DataFrame.to_csv(df_test_2, os.path.join(docs, "df_test_2.csv"), index=False)
pd.DataFrame.to_csv(df_test_3, os.path.join(docs, "df_test_3.csv"), index=False)
pd.DataFrame.to_csv(df_test_4, os.path.join(docs, "df_test_4.csv"), index=False)
pd.DataFrame.to_csv(df_test_5, os.path.join(docs, "df_test_5.csv"), index=False)
df_train = pd.concat([df_train_1, df_train_2, df_train_3])
# shuffle training data
stem = list(set(df_train.filename.apply(lambda x: '_'.join(x.split("_")[:-1]))))
np.random.shuffle(stem)
shuffled_files = []
for s in stem:
shuffled_files.append(s + '_neg')
shuffled_files.append(s + '_pos')
assert len(shuffled_files) == len(df_train)
df_train_shuffled = pd.DataFrame(data=[split_name(i) for i in shuffled_files])
pd.DataFrame.to_csv(df_train_shuffled, os.path.join(docs, "df_train_shuffled.csv"), index=False)
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"os.makedirs",
"ultrasync.create_experiment_data_utils.split_name",
"os.path.exists",
"random.seed",
"ultrasync.create_experiment_data_utils.get_sync_file_names",
"os.path.join",
"pandas.concat",
"numpy.random.shuffle"
] | [((519, 536), 'random.seed', 'random.seed', (['(2018)'], {}), '(2018)\n', (530, 536), False, 'import random\n'), ((537, 557), 'numpy.random.seed', 'np.random.seed', (['(2018)'], {}), '(2018)\n', (551, 557), True, 'import numpy as np\n'), ((4259, 4284), 'ultrasync.create_experiment_data_utils.get_sync_file_names', 'get_sync_file_names', (['path'], {}), '(path)\n', (4278, 4284), False, 'from ultrasync.create_experiment_data_utils import get_sync_file_names, split_name\n'), ((4527, 4558), 'os.path.join', 'os.path.join', (['dest_path', '"""docs"""'], {}), "(dest_path, 'docs')\n", (4539, 4558), False, 'import os\n'), ((6039, 6086), 'pandas.concat', 'pd.concat', (['[df_train_1, df_train_2, df_train_3]'], {}), '([df_train_1, df_train_2, df_train_3])\n', (6048, 6086), True, 'import pandas as pd\n'), ((6206, 6229), 'numpy.random.shuffle', 'np.random.shuffle', (['stem'], {}), '(stem)\n', (6223, 6229), True, 'import numpy as np\n'), ((4570, 4590), 'os.path.exists', 'os.path.exists', (['docs'], {}), '(docs)\n', (4584, 4590), False, 'import os\n'), ((4600, 4617), 'os.makedirs', 'os.makedirs', (['docs'], {}), '(docs)\n', (4611, 4617), False, 'import os\n'), ((4652, 4688), 'os.path.join', 'os.path.join', (['docs', '"""file_names.csv"""'], {}), "(docs, 'file_names.csv')\n", (4664, 4688), False, 'import os\n'), ((4956, 4992), 'os.path.join', 'os.path.join', (['docs', '"""df_train_1.csv"""'], {}), "(docs, 'df_train_1.csv')\n", (4968, 4992), False, 'import os\n'), ((5043, 5079), 'os.path.join', 'os.path.join', (['docs', '"""df_train_2.csv"""'], {}), "(docs, 'df_train_2.csv')\n", (5055, 5079), False, 'import os\n'), ((5130, 5166), 'os.path.join', 'os.path.join', (['docs', '"""df_train_3.csv"""'], {}), "(docs, 'df_train_3.csv')\n", (5142, 5166), False, 'import os\n'), ((5216, 5250), 'os.path.join', 'os.path.join', (['docs', '"""df_val_1.csv"""'], {}), "(docs, 'df_val_1.csv')\n", (5228, 5250), False, 'import os\n'), ((5299, 5333), 'os.path.join', 'os.path.join', (['docs', '"""df_val_2.csv"""'], {}), "(docs, 'df_val_2.csv')\n", (5311, 5333), False, 'import os\n'), ((5382, 5416), 'os.path.join', 'os.path.join', (['docs', '"""df_val_3.csv"""'], {}), "(docs, 'df_val_3.csv')\n", (5394, 5416), False, 'import os\n'), ((5465, 5499), 'os.path.join', 'os.path.join', (['docs', '"""df_val_4.csv"""'], {}), "(docs, 'df_val_4.csv')\n", (5477, 5499), False, 'import os\n'), ((5548, 5582), 'os.path.join', 'os.path.join', (['docs', '"""df_val_5.csv"""'], {}), "(docs, 'df_val_5.csv')\n", (5560, 5582), False, 'import os\n'), ((5633, 5668), 'os.path.join', 'os.path.join', (['docs', '"""df_test_1.csv"""'], {}), "(docs, 'df_test_1.csv')\n", (5645, 5668), False, 'import os\n'), ((5718, 5753), 'os.path.join', 'os.path.join', (['docs', '"""df_test_2.csv"""'], {}), "(docs, 'df_test_2.csv')\n", (5730, 5753), False, 'import os\n'), ((5803, 5838), 'os.path.join', 'os.path.join', (['docs', '"""df_test_3.csv"""'], {}), "(docs, 'df_test_3.csv')\n", (5815, 5838), False, 'import os\n'), ((5888, 5923), 'os.path.join', 'os.path.join', (['docs', '"""df_test_4.csv"""'], {}), "(docs, 'df_test_4.csv')\n", (5900, 5923), False, 'import os\n'), ((5973, 6008), 'os.path.join', 'os.path.join', (['docs', '"""df_test_5.csv"""'], {}), "(docs, 'df_test_5.csv')\n", (5985, 6008), False, 'import os\n'), ((6534, 6577), 'os.path.join', 'os.path.join', (['docs', '"""df_train_shuffled.csv"""'], {}), "(docs, 'df_train_shuffled.csv')\n", (6546, 6577), False, 'import os\n'), ((4318, 4331), 'ultrasync.create_experiment_data_utils.split_name', 'split_name', (['i'], {}), '(i)\n', (4328, 4331), False, 'from ultrasync.create_experiment_data_utils import get_sync_file_names, split_name\n'), ((6450, 6463), 'ultrasync.create_experiment_data_utils.split_name', 'split_name', (['i'], {}), '(i)\n', (6460, 6463), False, 'from ultrasync.create_experiment_data_utils import get_sync_file_names, split_name\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.