code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# from matplotlib.animation import FuncAnimation
# from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import fsolve
#################Definition of the Four Bar Linkage
# The values are placeholder. Please change the values below.
# Please leave theta4=0 and N=360
a = 19.5 # The crank
b = 45.0 # The coupler
c = 60.0 # The rocker
d = 40.0 # The fixed link
e = 90.0 # The wing
theta4 = 0 # The orientation of the fixed link
N = 360 # Number of simulation points
#######################################################
# Initializing
##########################################
ox = 0 # coordinates of the start point
oy = -d
link_length = np.array([[a], [b], [c], [d]])
s = min(link_length)
l = max(link_length)
t1 = np.linspace(0, 360 * np.pi / 180, N) # crank rotation range
if a + max(link_length) > b + c + d - max(link_length):
print("Grashoff criterion is not satisfied")
else:
print("Grashoff criterion is satisfied")
# initializing x and y coordinates of all the links
x1 = np.zeros((t1.shape[0]))
x2 = np.zeros((t1.shape[0]))
x3 = np.zeros((t1.shape[0]))
x4 = np.zeros((t1.shape[0]))
x5 = np.zeros((t1.shape[0]))
x6 = np.zeros((t1.shape[0]))
y1 = np.zeros((t1.shape[0]))
y2 = np.zeros((t1.shape[0]))
y3 = np.zeros((t1.shape[0]))
y4 = np.zeros((t1.shape[0]))
y5 = np.zeros((t1.shape[0]))
y6 = np.zeros((t1.shape[0]))
t2 = np.zeros((t1.shape[0]))
t3 = np.zeros((t1.shape[0]))
v_tip = np.zeros((t1.shape[0]))
# initializing the figure and plotting parameters
fig, ax = plt.subplots()
def plot_initialize():
plt.xlim(-105, 105)
plt.ylim(-105, 105)
plt.gca().set_aspect('equal', adjustable='box')
# Define the set of nonlinear equations that need to be solved
def func(x, a, b, c, d, theta):
theta2 = x[0]
theta3 = x[1]
re = a * np.cos(theta) + b * np.cos(theta2) + c * np.cos(theta3) # Eq1
im = a * np.sin(theta) + b * np.sin(theta2) + c * np.sin(theta3) - d # Eq2
return (re, im)
def vfunc(x, a, b, c, d, theta, x1, x2, w1):
theta2 = x[0]
theta3 = x[1]
re = a * w1 * np.cos(theta) + b * theta2 * np.cos(x1) + c * theta3 * np.cos(x2) # Eq1
im = a * w1 * np.sin(theta) + b * theta2 * np.sin(x1) + c * theta3 * np.sin(x2) # Eq2
return (re, im)
w = 900 * 2 * np.pi / 60 # input velocity of the crank
y0 = [0, 0]
i = 0
fr = 0
for theta1 in t1: # for the range of input crank equations
if i > 1:
x0 = [t2[i - 1], t3[i - 1]]
# theta0=[0,0]#theta2 and theta3 initial guesses are assigned to the previous iteration
else:
x0 = [np.pi / 2, np.pi]
sol = fsolve(func, x0, args=(a, b, c, d, theta1), full_output=True) # nonlinear solver that solves Eq1 and Eq2
exit_flag = sol[2] # if exit_flag==1, then the solution has reached and the algorithm is successful
theta2 = sol[0][0]
theta3 = sol[0][1]
t2[i] = theta2
t3[i] = theta3
# Finding the tip velocity
sol2 = fsolve(vfunc, y0, args=(a, b, c, d, theta1, theta2, theta3, w),
full_output=True) # nonlinear solver that solves Eq1 and Eq2
exit_flag2 = sol2[2] # if exit_flag==1, then the solution has reached and the algorithm is successful
v1 = sol2[0][0]
v2 = sol2[0][1]
v_tip[i] = abs(v2 * e)
if exit_flag == 1: # evaluating the x and y coordinates of the solved problem
x1[fr] = ox
y1[fr] = oy
x2[fr] = x1[fr] + a * np.cos(theta1)
y2[fr] = y1[fr] + a * np.sin(theta1)
x3[fr] = x2[fr] + b * np.cos(theta2)
y3[fr] = y2[fr] + b * np.sin(theta2)
x4[fr] = x1[fr]
y4[fr] = y1[fr] + d
x5[fr] = x4[fr] + e * np.cos(theta3)
y5[fr] = y4[fr] + e * np.sin(theta3)
x6[fr] = x4[fr] - e * np.cos(theta3)
y6[fr] = y4[fr] - e * np.sin(theta3)
fr = fr + 1
# plt.plot([x1,x2,x3,x4,x1],[y1,y2,y3,y4,y1])
i = i + 1
if i == 1:
line, = ax.plot([x1[fr], x2[fr], x3[fr], x5[fr], x6[fr], x4[fr], x1[fr]],
[y1[fr], y2[fr], y3[fr], y5[fr], y6[fr], y4[fr], y1[fr]], 'r')
# Range of angular motion of the wing
theta3_range = (max(t3) - min(t3)) * 180 / np.pi
print('range = ')
print(theta3_range)
tip_vel = max(v_tip)
print('tip_vel=')
print(tip_vel)
def animation_frame(p):
line.set_data([x1[p], x2[p], x3[p], x5[p], x6[p], x4[p], x1[p]], [y1[p], y2[p], y3[p], y5[p], y6[p], y4[p], y1[p]])
return line
ani = animation.FuncAnimation(fig, func=animation_frame, init_func=plot_initialize, frames=np.arange(0, fr),
interval=100, repeat=True)
plt.show()
# def func(E,V_0):
# s = sqrt(c_sqr * (1 - E / V_0))
# f = s / tan(s) + sqrt(c_sqr - s**2)
# f = E**2 -V_0
# return f
# VV=4.
# guess = 9
# sol=fsolve(func, guess, args=(VV),full_output=True)
| [
"scipy.optimize.fsolve",
"matplotlib.pyplot.gca",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((756, 786), 'numpy.array', 'np.array', (['[[a], [b], [c], [d]]'], {}), '([[a], [b], [c], [d]])\n', (764, 786), True, 'import numpy as np\n'), ((834, 870), 'numpy.linspace', 'np.linspace', (['(0)', '(360 * np.pi / 180)', 'N'], {}), '(0, 360 * np.pi / 180, N)\n', (845, 870), True, 'import numpy as np\n'), ((1110, 1131), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1118, 1131), True, 'import numpy as np\n'), ((1139, 1160), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1147, 1160), True, 'import numpy as np\n'), ((1168, 1189), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1176, 1189), True, 'import numpy as np\n'), ((1197, 1218), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1205, 1218), True, 'import numpy as np\n'), ((1226, 1247), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1234, 1247), True, 'import numpy as np\n'), ((1255, 1276), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1263, 1276), True, 'import numpy as np\n'), ((1284, 1305), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1292, 1305), True, 'import numpy as np\n'), ((1313, 1334), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1321, 1334), True, 'import numpy as np\n'), ((1342, 1363), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1350, 1363), True, 'import numpy as np\n'), ((1371, 1392), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1379, 1392), True, 'import numpy as np\n'), ((1400, 1421), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1408, 1421), True, 'import numpy as np\n'), ((1429, 1450), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1437, 1450), True, 'import numpy as np\n'), ((1458, 1479), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1466, 1479), True, 'import numpy as np\n'), ((1487, 1508), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1495, 1508), True, 'import numpy as np\n'), ((1519, 1540), 'numpy.zeros', 'np.zeros', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (1527, 1540), True, 'import numpy as np\n'), ((1604, 1618), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1616, 1618), True, 'import matplotlib.pyplot as plt\n'), ((4655, 4665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4663, 4665), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1667), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-105)', '(105)'], {}), '(-105, 105)\n', (1656, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1691), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-105)', '(105)'], {}), '(-105, 105)\n', (1680, 1691), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2740), 'scipy.optimize.fsolve', 'fsolve', (['func', 'x0'], {'args': '(a, b, c, d, theta1)', 'full_output': '(True)'}), '(func, x0, args=(a, b, c, d, theta1), full_output=True)\n', (2685, 2740), False, 'from scipy.optimize import fsolve\n'), ((3017, 3103), 'scipy.optimize.fsolve', 'fsolve', (['vfunc', 'y0'], {'args': '(a, b, c, d, theta1, theta2, theta3, w)', 'full_output': '(True)'}), '(vfunc, y0, args=(a, b, c, d, theta1, theta2, theta3, w), full_output\n =True)\n', (3023, 3103), False, 'from scipy.optimize import fsolve\n'), ((4579, 4595), 'numpy.arange', 'np.arange', (['(0)', 'fr'], {}), '(0, fr)\n', (4588, 4595), True, 'import numpy as np\n'), ((1696, 1705), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1703, 1705), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1945), 'numpy.cos', 'np.cos', (['theta3'], {}), '(theta3)\n', (1937, 1945), True, 'import numpy as np\n'), ((2209, 2219), 'numpy.cos', 'np.cos', (['x2'], {}), '(x2)\n', (2215, 2219), True, 'import numpy as np\n'), ((2300, 2310), 'numpy.sin', 'np.sin', (['x2'], {}), '(x2)\n', (2306, 2310), True, 'import numpy as np\n'), ((1890, 1903), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1896, 1903), True, 'import numpy as np\n'), ((1910, 1924), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (1916, 1924), True, 'import numpy as np\n'), ((2007, 2021), 'numpy.sin', 'np.sin', (['theta3'], {}), '(theta3)\n', (2013, 2021), True, 'import numpy as np\n'), ((2154, 2167), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2160, 2167), True, 'import numpy as np\n'), ((2183, 2193), 'numpy.cos', 'np.cos', (['x1'], {}), '(x1)\n', (2189, 2193), True, 'import numpy as np\n'), ((2245, 2258), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2251, 2258), True, 'import numpy as np\n'), ((2274, 2284), 'numpy.sin', 'np.sin', (['x1'], {}), '(x1)\n', (2280, 2284), True, 'import numpy as np\n'), ((3489, 3503), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (3495, 3503), True, 'import numpy as np\n'), ((3534, 3548), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (3540, 3548), True, 'import numpy as np\n'), ((3579, 3593), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3585, 3593), True, 'import numpy as np\n'), ((3624, 3638), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (3630, 3638), True, 'import numpy as np\n'), ((3721, 3735), 'numpy.cos', 'np.cos', (['theta3'], {}), '(theta3)\n', (3727, 3735), True, 'import numpy as np\n'), ((3766, 3780), 'numpy.sin', 'np.sin', (['theta3'], {}), '(theta3)\n', (3772, 3780), True, 'import numpy as np\n'), ((3811, 3825), 'numpy.cos', 'np.cos', (['theta3'], {}), '(theta3)\n', (3817, 3825), True, 'import numpy as np\n'), ((3856, 3870), 'numpy.sin', 'np.sin', (['theta3'], {}), '(theta3)\n', (3862, 3870), True, 'import numpy as np\n'), ((1966, 1979), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1972, 1979), True, 'import numpy as np\n'), ((1986, 2000), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (1992, 2000), True, 'import numpy as np\n')] |
from queue import Queue
import numpy as np
import pandas as pd
from .problem import Problem
class Solver:
def __init__(self):
pass
def solve(self, problem: Problem):
# 初期化
cells = problem.cells.copy()
H, W = cells.shape
sy, sx = -1, -1
n_foods = 0
for y in range(H):
for x in range(W):
if cells[y, x] == 'S':
sy, sx = y, x
cells[y, x] = '.'
if cells[y, x] == 'o':
n_foods += 1
y, x = sy, sx
INF = 100000
drs = [(-1, 0), (0, -1), (1, 0), (0, 1)]
commands = []
# solve
for d in range(n_foods):
print(f"solving.. {d}")
# 幅優先探索を行う
dists = np.full((H, W), INF)
q = Queue()
q.put((y, x, 0))
while not q.empty():
y, x, d = q.get()
if dists[y, x] != INF:
continue
dists[y, x] = d
if cells[y, x] == 'o':
cells[y, x] = '.'
break
for di, (dy, dx) in enumerate(drs):
yy, xx = y + dy, x + dx
if 0 <= yy < H and 0 <= xx < W and \
cells[yy, xx] != '#' and dists[yy, xx] == INF:
q.put((yy, xx, d + 1))
# パスを再現する
cmds = []
yr, xr = y, x
for d in range(dists[yr, xr] - 1, -1, -1):
for di, (dy, dx) in enumerate(drs):
yy, xx = yr + dy, xr + dx
if (0 <= yy < H and 0 <= xx < W and
cells[yy, xx] != '#' and dists[yy, xx] == d):
cmds.append(di)
yr, xr = yy, xx
break
cmds = reversed(cmds)
cmds = [(di + 2) % 4 for di in cmds]
commands += cmds
return commands, None
if __name__ == "__main__":
problem = Problem()
solver = Solver()
solver.solve(problem)
| [
"numpy.full",
"queue.Queue"
] | [((797, 817), 'numpy.full', 'np.full', (['(H, W)', 'INF'], {}), '((H, W), INF)\n', (804, 817), True, 'import numpy as np\n'), ((834, 841), 'queue.Queue', 'Queue', ([], {}), '()\n', (839, 841), False, 'from queue import Queue\n')] |
#! /usr/bin/env python
"""
Reads Darknet config and weights and creates Keras model with TF backend.
"""
import argparse
import configparser
import io
import os
from collections import defaultdict
import math
import numpy as np
from keras import backend as K
from keras.layers import (Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer)
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from keras.utils.vis_utils import plot_model as plot
useRounding = False
'''
toggle to use rounding algorithms
'''
useRounding = True
# problem with model with rounding
'''
def roundingAlgo(x):
# first one that works with model_1 & model_2
# problem - this rounding function is slow: model_2 = 3 hours / epoch
# comparison, model_0 = 20 mins / epoch
# in addition, off by half with integer inputs (lower than actual value, e.g. floor(2) ≈ 1.5, floor(2.01) ≈ 2)
# source: https://en.wikipedia.org/wiki/Floor_and_ceiling_functions#Continuity_and_series_expansions
if True:
result = x - 0.5
for p in range(1, 7):
result = result + K.sin(x * p * 2 * math.pi) / (p * math.pi)
return result
# '''
'''
def roundingAlgo(x):
# second one that works with model_2
# problem - this rounding function is slower than first working algo: model_2 = 4,2 hours / epoch
# comparison, model_0 = 20 mins / epoch
# source: self
return x - x % 1
# '''
# '''
def roundingAlgo(x):
# simplification of the first algo loop by simplifying the expression for range(1,7)
# problem - rounding function is still slow = 2,5 hours / epoch
# all non-speed problem of first algo still applies
result = x - 0.5
resultCos = K.cos(2 * math.pi * x)
return result + K.sin(2 * math.pi * x) * (1 + resultCos) * (13 + 2 * resultCos - 18 * K.pow(resultCos, 2) - 32 * K.pow(resultCos, 3) + 80 * K.pow(resultCos, 4)) / 15
# '''
'''
def roundingAlgo(x):
# made to fool the engine to have a gradient
return 0 * x + K.round(x)
# '''
# check https://github.com/keras-team/keras/issues/2218
# check https://github.com/keras-team/keras/issues/2221
# https://www.tensorflow.org/api_docs/python/tf/custom_gradient
class RoundClampQ7_12(Layer):
def __init__(self, **kwargs):
super(RoundClampQ7_12, self).__init__(**kwargs)
self.trainable = False
def build(self, input_shape):
super(RoundClampQ7_12, self).build(input_shape)
def call(self, X):
return K.clip(roundingAlgo(X * 4096), -524288, 524287) / 4096.0
def get_config(self):
config = {"name": self.__class__.__name__}
base_config = super(RoundClampQ7_12, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RoundOverflowQ7_12(Layer):
def __init__(self, **kwargs):
super(RoundOverflowQ7_12, self).__init__(**kwargs)
self.trainable = False
def build(self, input_shape):
super(RoundOverflowQ7_12, self).build(input_shape)
def call(self, X):
return (((roundingAlgo(X * 4096) + 524288) % 1048576) - 524288) / 4096.0
def get_config(self):
config = {"name": self.__class__.__name__}
base_config = super(RoundOverflowQ7_12, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RoundClampQ3_4(Layer):
def __init__(self, **kwargs):
super(RoundClampQ3_4, self).__init__(**kwargs)
self.trainable = False
def build(self, input_shape):
super(RoundClampQ3_4, self).build(input_shape)
def call(self, X):
return K.clip(roundingAlgo(X * 16), -128, 127) / 16.0
def get_config(self):
config = {"name": self.__class__.__name__}
base_config = super(RoundClampQ3_4, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RoundOverflowQ3_4(Layer):
def __init__(self, **kwargs):
super(RoundOverflowQ3_4, self).__init__(**kwargs)
self.trainable = False
def build(self, input_shape):
super(RoundOverflowQ3_4, self).build(input_shape)
def call(self, X):
return (((roundingAlgo(X * 16) + 128) % 256) - 128) / 16.0
def get_config(self):
config = {"name": self.__class__.__name__}
base_config = super(RoundOverflowQ3_4, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def rounding(currentLayer):
roundingFunction = None
'''
pick one
'''
# roundingFunction = RoundClampQ7_12()
# roundingFunction = RoundClampQ3_4()
# roundingFunction = RoundOverflowQ7_12()
roundingFunction = RoundOverflowQ3_4()
if not useRounding or roundingFunction is None:
return currentLayer
else:
return roundingFunction(currentLayer)
parser = argparse.ArgumentParser(description='Darknet To Keras Converter.')
parser.add_argument('config_path', help='Path to Darknet cfg file.')
parser.add_argument('weights_path', help='Path to Darknet weights file.')
parser.add_argument('output_path', help='Path to output Keras model file.')
parser.add_argument(
'-p',
'--plot_model',
help='Plot generated Keras model and save as image.',
action='store_true')
parser.add_argument(
'-w',
'--weights_only',
help='Save as Keras weights file instead of model file.',
action='store_true')
def unique_config_sections(config_file):
"""Convert all config sections to have unique names.
Adds unique suffixes to config sections for compability with configparser.
"""
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
# %%
def _main(args):
config_path = os.path.expanduser(args.config_path)
weights_path = os.path.expanduser(args.weights_path)
assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
config_path)
assert weights_path.endswith(
'.weights'), '{} is not a .weights file'.format(weights_path)
output_path = os.path.expanduser(args.output_path)
assert output_path.endswith(
'.h5'), 'output path {} is not a .h5 file'.format(output_path)
output_root = os.path.splitext(output_path)[0]
# Load weights and config.
print('Loading weights.')
weights_file = open(weights_path, 'rb')
major, minor, revision = np.ndarray(
shape=(3, ), dtype='int32', buffer=weights_file.read(12))
if (major*10+minor)>=2 and major<1000 and minor<1000:
seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8))
else:
seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4))
print('Weights Header: ', major, minor, revision, seen)
print('Parsing Darknet config.')
unique_config_file = unique_config_sections(config_path)
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(unique_config_file)
print('Creating Keras model.')
input_layer = Input(shape=(None, None, 3))
prev_layer = input_layer
all_layers = []
weight_decay = float(cfg_parser['net_0']['decay']
) if 'net_0' in cfg_parser.sections() else 5e-4
count = 0
out_index = []
model_name = "model_4"
convolutional_counter = 0
batchnorm_counter = 0
conv_index=[
"layer0_branch",
"layer1_branch",
"layer2_branch",
"layer3_branch",
"layer4_branch",
"layer5_branch0",
"layer6_branch0",
"layer7_branch0",
"layer8_branch01",
None,
"layer8_branch00",
"layer9_branch1",
None
]
bn_index=[
"layer0_branch",
"layer1_branch",
"layer2_branch",
"layer3_branch",
"layer4_branch",
"layer5_branch0",
"layer6_branch0",
"layer7_branch0",
"layer8_branch01",
"layer8_branch00",
"layer9_branch1",
None,
None
]
for section in cfg_parser.sections():
print('Parsing section {}'.format(section))
if section.startswith('convolutional'):
filters = int(cfg_parser[section]['filters'])
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
pad = int(cfg_parser[section]['pad'])
activation = cfg_parser[section]['activation']
batch_normalize = 'batch_normalize' in cfg_parser[section]
padding = 'same' if pad == 1 and stride == 1 else 'valid'
# Setting weights.
# Darknet serializes convolutional weights as:
# [bias/beta, [gamma, mean, variance], conv_weights]
prev_layer_shape = K.int_shape(prev_layer)
weights_shape = (size, size, prev_layer_shape[-1], filters)
darknet_w_shape = (filters, weights_shape[2], size, size)
weights_size = np.product(weights_shape)
print('conv2d', 'bn'
if batch_normalize else ' ', activation, weights_shape)
conv_bias = np.ndarray(
shape=(filters, ),
dtype='float32',
buffer=weights_file.read(filters * 4))
count += filters
if batch_normalize:
bn_weights = np.ndarray(
shape=(3, filters),
dtype='float32',
buffer=weights_file.read(filters * 12))
count += 3 * filters
bn_weight_list = [
bn_weights[0], # scale gamma
conv_bias, # shift beta
bn_weights[1], # running mean
bn_weights[2] # running var
]
conv_weights = np.ndarray(
shape=darknet_w_shape,
dtype='float32',
buffer=weights_file.read(weights_size * 4))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
conv_weights = [conv_weights] if batch_normalize else [
conv_weights, conv_bias
]
# Handle activation.
act_fn = None
if activation == 'leaky':
pass # Add advanced activation later.
elif activation != 'linear':
raise ValueError(
'Unknown activation function `{}` in section {}'.format(
activation, section))
# Create Conv2D layer
if stride>1:
# Darknet uses left and top padding instead of 'same' mode
prev_layer = ZeroPadding2D(((1,0),(1,0)))(prev_layer)
conv_name = None
if conv_index[convolutional_counter] is not None:
conv_name = "Conv2D_" + model_name + "_" + conv_index[convolutional_counter]
conv_layer = (Conv2D(
filters, (size, size),
strides=(stride, stride),
kernel_regularizer=l2(weight_decay),
use_bias=not batch_normalize,
weights=conv_weights,
name=conv_name,
activation=act_fn,
padding=padding))(prev_layer)
bn_name = None
if bn_index[batchnorm_counter] is not None:
bn_name = "BatchNorm_" + model_name + "_" + bn_index[batchnorm_counter]
if batch_normalize:
conv_layer = (BatchNormalization(weights=bn_weight_list, name=bn_name))(rounding(conv_layer))
batchnorm_counter = batchnorm_counter + 1
prev_layer = rounding(conv_layer)
if activation == 'linear':
all_layers.append(prev_layer)
elif activation == 'leaky':
act_layer = LeakyReLU(alpha=0.1)(prev_layer)
prev_layer = rounding(act_layer)
all_layers.append(act_layer)
convolutional_counter = convolutional_counter + 1
elif section.startswith('route'):
ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
layers = [all_layers[i] for i in ids]
if len(layers) > 1:
print('Concatenating route layers:', layers)
concatenate_layer = Concatenate()(layers)
all_layers.append(concatenate_layer)
prev_layer = concatenate_layer
else:
skip_layer = layers[0] # only one layer to route
all_layers.append(skip_layer)
prev_layer = skip_layer
elif section.startswith('maxpool'):
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
all_layers.append(
MaxPooling2D(
pool_size=(size, size),
strides=(stride, stride),
padding='same')(prev_layer))
prev_layer = all_layers[-1]
elif section.startswith('shortcut'):
index = int(cfg_parser[section]['from'])
activation = cfg_parser[section]['activation']
assert activation == 'linear', 'Only linear activation supported.'
all_layers.append(Add()([all_layers[index], prev_layer]))
prev_layer = all_layers[-1]
elif section.startswith('upsample'):
stride = int(cfg_parser[section]['stride'])
assert stride == 2, 'Only stride=2 supported.'
all_layers.append(UpSampling2D(stride)(prev_layer))
prev_layer = all_layers[-1]
elif section.startswith('yolo'):
out_index.append(len(all_layers)-1)
all_layers.append(None)
prev_layer = all_layers[-1]
elif section.startswith('net'):
pass
else:
raise ValueError(
'Unsupported section header type: {}'.format(section))
# Create and save model.
if len(out_index)==0: out_index.append(len(all_layers)-1)
model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])
with open(output_path[:-2] + "txt", "wt") as summaryText:
model.summary(print_fn=lambda x: summaryText.write(x + "\n"), line_length=200)
if args.weights_only:
model.save_weights('{}'.format(output_path))
print('Saved Keras weights to {}'.format(output_path))
else:
model.save('{}'.format(output_path))
print('Saved Keras model to {}'.format(output_path))
# Check to see if all weights have been read.
remaining_weights = len(weights_file.read()) / 4
weights_file.close()
print('Read {} of {} from Darknet weights.'.format(count, count +
remaining_weights))
if remaining_weights > 0:
print('Warning: {} unused weights'.format(remaining_weights))
if args.plot_model:
plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
print('Saved model plot to {}.png'.format(output_root))
if __name__ == '__main__':
_main(parser.parse_args())
| [
"numpy.product",
"configparser.ConfigParser",
"keras.backend.sin",
"argparse.ArgumentParser",
"keras.backend.pow",
"keras.models.Model",
"keras.layers.advanced_activations.LeakyReLU",
"io.StringIO",
"keras.layers.ZeroPadding2D",
"os.path.expanduser",
"keras.layers.Add",
"keras.layers.MaxPoolin... | [((4940, 5006), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Darknet To Keras Converter."""'}), "(description='Darknet To Keras Converter.')\n", (4963, 5006), False, 'import argparse\n'), ((1845, 1867), 'keras.backend.cos', 'K.cos', (['(2 * math.pi * x)'], {}), '(2 * math.pi * x)\n', (1850, 1867), True, 'from keras import backend as K\n'), ((5710, 5726), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5721, 5726), False, 'from collections import defaultdict\n'), ((5747, 5760), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5758, 5760), False, 'import io\n'), ((6215, 6251), 'os.path.expanduser', 'os.path.expanduser', (['args.config_path'], {}), '(args.config_path)\n', (6233, 6251), False, 'import os\n'), ((6271, 6308), 'os.path.expanduser', 'os.path.expanduser', (['args.weights_path'], {}), '(args.weights_path)\n', (6289, 6308), False, 'import os\n'), ((6526, 6562), 'os.path.expanduser', 'os.path.expanduser', (['args.output_path'], {}), '(args.output_path)\n', (6544, 6562), False, 'import os\n'), ((7339, 7366), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (7364, 7366), False, 'import configparser\n'), ((7466, 7494), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)'}), '(shape=(None, None, 3))\n', (7471, 7494), False, 'from keras.layers import Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer\n'), ((14735, 14804), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': '[all_layers[i] for i in out_index]'}), '(inputs=input_layer, outputs=[all_layers[i] for i in out_index])\n', (14740, 14804), False, 'from keras.models import Model\n'), ((6685, 6714), 'os.path.splitext', 'os.path.splitext', (['output_path'], {}), '(output_path)\n', (6701, 6714), False, 'import os\n'), ((9198, 9221), 'keras.backend.int_shape', 'K.int_shape', (['prev_layer'], {}), '(prev_layer)\n', (9209, 9221), True, 'from keras import backend as K\n'), ((9392, 9417), 'numpy.product', 'np.product', (['weights_shape'], {}), '(weights_shape)\n', (9402, 9417), True, 'import numpy as np\n'), ((10666, 10706), 'numpy.transpose', 'np.transpose', (['conv_weights', '[2, 3, 1, 0]'], {}), '(conv_weights, [2, 3, 1, 0])\n', (10678, 10706), True, 'import numpy as np\n'), ((1888, 1910), 'keras.backend.sin', 'K.sin', (['(2 * math.pi * x)'], {}), '(2 * math.pi * x)\n', (1893, 1910), True, 'from keras import backend as K\n'), ((11344, 11375), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['((1, 0), (1, 0))'], {}), '(((1, 0), (1, 0)))\n', (11357, 11375), False, 'from keras.layers import Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer\n'), ((12168, 12224), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'weights': 'bn_weight_list', 'name': 'bn_name'}), '(weights=bn_weight_list, name=bn_name)\n', (12186, 12224), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2012, 2031), 'keras.backend.pow', 'K.pow', (['resultCos', '(4)'], {}), '(resultCos, 4)\n', (2017, 2031), True, 'from keras import backend as K\n'), ((11719, 11735), 'keras.regularizers.l2', 'l2', (['weight_decay'], {}), '(weight_decay)\n', (11721, 11735), False, 'from keras.regularizers import l2\n'), ((12506, 12526), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (12515, 12526), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((12994, 13007), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (13005, 13007), False, 'from keras.layers import Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer\n'), ((1985, 2004), 'keras.backend.pow', 'K.pow', (['resultCos', '(3)'], {}), '(resultCos, 3)\n', (1990, 2004), True, 'from keras import backend as K\n'), ((13486, 13564), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(size, size)', 'strides': '(stride, stride)', 'padding': '"""same"""'}), "(pool_size=(size, size), strides=(stride, stride), padding='same')\n", (13498, 13564), False, 'from keras.layers import Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer\n'), ((1958, 1977), 'keras.backend.pow', 'K.pow', (['resultCos', '(2)'], {}), '(resultCos, 2)\n', (1963, 1977), True, 'from keras import backend as K\n'), ((13946, 13951), 'keras.layers.Add', 'Add', ([], {}), '()\n', (13949, 13951), False, 'from keras.layers import Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer\n'), ((14217, 14237), 'keras.layers.UpSampling2D', 'UpSampling2D', (['stride'], {}), '(stride)\n', (14229, 14237), False, 'from keras.layers import Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate, Layer\n')] |
# -*- coding: utf-8 -*-
import numpy as np
def check_images(fusioned, original):
assert len(fusioned) == len(original), "Supplied images have different sizes " + \
str(fusioned.shape) + " and " + str(original.shape)
if(len(fusioned.shape) == len(original.shape)):
estado = 'mtom'
if(len(fusioned.shape) == 2):
fusioned = fusioned[:,:,np.newaxis]
original = original[:,:,np.newaxis]
else:
assert fusioned.shape[2] == original.shape[2], "Supplied images have different number of bands "
else:
estado = 'mtop'
return estado, fusioned, original
def mse(fusioned, original):
"""calculates mean squared error (mse).
:param GT: first (original) input image.
:param P: second (deformed) input image.
:returns: float -- mse value.
"""
array_mse = []
mode, fusioned, original = check_images(fusioned, original)
if(mode == 'mtom'):
for i in range(fusioned.shape[2]):
aux_val = np.mean((fusioned[:,:,i].astype(np.float64)-original[:,:,i].astype(np.float64))**2)
array_mse.append(aux_val)
else:
for i in range(fusioned.shape[2]):
aux_val = np.mean((fusioned[:,:,i].astype(np.float64)-original.astype(np.float64))**2)
array_mse.append(aux_val)
return np.array(array_mse)
def rmse(fusioned, original):
return np.sqrt(mse(fusioned, original))
def bias(fusioned, original):
array_bias = []
mode, fusioned, original = check_images(fusioned, original)
if(mode == 'mtom'):
for i in range(fusioned.shape[2]):
aux_val = 1 - ((np.mean(fusioned[:,:,i].astype(np.float64)))/ (np.mean(original[:,:,i].astype(np.float64))))
array_bias.append(aux_val)
else:
for i in range(fusioned.shape[2]):
aux_val = 1 - ((np.mean(fusioned[:,:,i].astype(np.float64)))/ (np.mean(original.astype(np.float64))))
array_bias.append(aux_val)
return array_bias
def correlation_coeff(fusioned, original):
array_corrcoef = []
mode, fusioned, original = check_images(fusioned, original)
if(mode == 'mtom'):
for i in range(fusioned.shape[2]):
aux_val = np.corrcoef(fusioned[:,:,i].astype(np.float64).flat, original[:,:,i].astype(np.float64).flat)
array_corrcoef.append(aux_val[0][1])
else:
for i in range(fusioned.shape[2]):
aux_val = np.corrcoef(fusioned[:,:,i].astype(np.float64).flat, original.astype(np.float64).flat)
array_corrcoef.append(aux_val[0][1])
return array_corrcoef
| [
"numpy.array"
] | [((1333, 1352), 'numpy.array', 'np.array', (['array_mse'], {}), '(array_mse)\n', (1341, 1352), True, 'import numpy as np\n')] |
import re
import pandas as pd
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import numpy as np
import seaborn as sns; sns.set()
from scipy.spatial.distance import squareform
from scipy.spatial.distance import pdist, euclidean
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime, timedelta
from io import StringIO, BytesIO
from app.models import Country, CountryStatus
import base64
import plotly.figure_factory as ff
data_dir = 'data/'
def get_all_places(level='countries'):
# df_places = pd.read_csv(data_dir + 'all_{}_compare.csv'.format(level))
df_places = Country.all_countries_names_as_df()
return list(df_places['Name'])
def get_all_countries_response():
df_places = pd.read_csv(data_dir + 'all_countries_response.csv')
return list(df_places['Country'])
def get_df_similar_places(place, level = 'countries'):
# if level == 'cities':
# df_sim = pd.read_csv(data_dir + 'all_{}_similarity.csv'.format(level))
# df_sim = df_sim[df_sim['CityBase'] == place]
# df_sim = df_sim[['Name', 'gap', 'dist', 'Similarity']].set_index('Name')
# return df_sim
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
df_orig_piv_day = df_orig.pivot(index='Name', columns='Day', values='TotalDeaths')
df_orig_piv_day = df_orig_piv_day.fillna(0)
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
# place_start_cases = (df_orig.set_index('Name').loc[place,].set_index('Day')['Total'] > 0).idxmax()
days_ahead = 14 #if level == 'countries' else 5
df_places_ahead = df_orig_piv_day[df_orig_piv_day.loc[:, max(place_start - days_ahead,0)] > 0.0]
df_places_rate_norm = df_orig_piv_day.loc[df_places_ahead.index, :]
# df_places_rate_norm = df_orig_piv_day.loc[['France', 'Italy'], :]
df_places_rate_norm = df_places_rate_norm.append(df_orig_piv_day.loc[place,])
# reverse order to keep base place on top
df_places_rate_norm = df_places_rate_norm.iloc[::-1]
sr_place = df_orig_piv_day.loc[place,]
# place_start = (sr_place > 0).idxmax()
# sr_place_compare = sr_place.loc[place_start:].dropna()
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
sr_place_compare = sr_place.loc[place_start:].dropna()
df_places_gap = pd.DataFrame({'Name': [], 'gap': [], 'dist': []})
df_places_gap = df_places_gap.append(pd.Series([place, 0.0, -1], index=df_places_gap.columns),
ignore_index=True)
for other_place in df_places_rate_norm.index[1:]:
sr_other_place = df_places_rate_norm.loc[other_place,].fillna(0)
min_dist = np.inf
min_pos = 0
for i in range(0, 1 + len(sr_other_place) - len(sr_place_compare)):
sr_other_place_compare = sr_other_place[i: i + len(sr_place_compare)]
dist = euclidean(sr_place_compare, sr_other_place_compare)
if (dist < min_dist):
min_dist = dist
min_pos = i
day_place2 = sr_other_place.index[min_pos]
gap = day_place2 - place_start
df_places_gap = df_places_gap.append(
pd.Series([other_place, gap, min_dist], index=df_places_gap.columns),
ignore_index=True)
df_places_gap = df_places_gap.set_index('Name')
similar_places = df_places_gap.sort_values('dist')
dist_max = euclidean(sr_place_compare, np.zeros(len(sr_place_compare)))
similar_places['Similarity'] = similar_places['dist'].apply(lambda x: (1.0 - x / dist_max) if x >= 0 else 1)
return similar_places
# get similar places based on alighment of death curve
def get_similar_places(place, level = 'countries'):
similar_places = get_df_similar_places(place, level = level)
# print(similar_places)
tuples = [tuple(x) for x in similar_places[1:8].reset_index().to_numpy()]
return tuples
#get similar places based on socioeconomic features
def get_similar_places_socio(place, level = 'countries'):
df_socio_stats_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level)).drop('score', axis=1)
if not len(df_socio_stats_orig.query('Name == "{}"'.format(place))): return []
df_socio_stats_orig_piv = df_socio_stats_orig.pivot(index='Name', columns='variable')
df_socio_stats_orig_piv = df_socio_stats_orig_piv.fillna(df_socio_stats_orig_piv.mean())
scaler = MinMaxScaler() # feature_range=(-1, 1)
df_socio_stats_orig_piv_norm = pd.DataFrame(scaler.fit_transform(df_socio_stats_orig_piv),
columns=df_socio_stats_orig_piv.columns,
index=df_socio_stats_orig_piv.index)
df_dist = pd.DataFrame(squareform(pdist(df_socio_stats_orig_piv_norm)), index=df_socio_stats_orig_piv_norm.index,
columns=df_socio_stats_orig_piv_norm.index)
df_sim = df_dist.loc[:, place].to_frame(name='dist')
df_sim['similarity'] = 1 - (df_sim['dist'] / df_sim['dist'].max())
df_sim = df_sim.sort_values('similarity', ascending=False).drop('dist', axis=1)
tuples = [tuple(x) for x in df_sim[1:11].reset_index().to_numpy()]
return tuples
def get_places_by_variable(type = 'socio', level = 'countries', variable = 'Population', ascending = False):
if type == 'socio':
df_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level)).drop('score', axis=1)
else:
df_orig = pd.read_csv(data_dir + 'live_stats_{}.csv'.format(level))
# df_orig = df_orig.groupby(['Name', 'Date']).tail(1)
df_orig = df_orig[df_orig['variable'] == variable].pivot(index='Name', columns='variable', values='value').reset_index()
df_orig = df_orig[['Name', variable]].sort_values(variable, ascending = ascending).head(10)
tuples = [tuple(x) for x in df_orig.reset_index(drop=True).to_numpy()]
return tuples
def get_fig_compare_rates(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority)
fig = make_chart_comparison(df_places_to_show, level = level, scale=scale, y=y, mode=mode)
return fig
def get_html_compare_response(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
# df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority, type = 'response')
data_dir = 'data/'
df_orig = pd.read_csv(data_dir + 'response/official_response_countries.csv', parse_dates=['Date'])
cols = list(df_orig.columns[df_orig.dtypes.eq('float64')][:15]) + ['ConfirmedDeaths']
df_orig[cols] = df_orig[cols].astype(pd.Int64Dtype())
countries = [place, place2]
df_orig = df_orig[df_orig['Name'].isin(countries)]
df_gantt = df_orig[['Name', 'Date', 'StringencyIndexForDisplay', 'ConfirmedDeaths']].rename(
columns={'Date': 'Start', 'Name': 'Task'})
df_gantt['StringencyIndexForDisplay'] = df_gantt['StringencyIndexForDisplay'].fillna(0)
df_gantt['Finish'] = df_gantt['Start'] + timedelta(days=1)
df_gantt['Description'] = df_orig.apply(lambda
x: "Stringency Index: {StringencyIndexForDisplay}<br>Confirmed Deaths: {ConfirmedDeaths}<br>School closing: {C1_School closing}<br>Workplace closing: {C2_Workplace closing}<br>Cancel public events: {C3_Cancel public events}<br>Restrictions on gatherings: {C4_Restrictions on gatherings}<br>Close public transport: {C5_Close public transport}<br>Stay at home requirements: {C6_Stay at home requirements}<br>Restrictions on internal movement: {C7_Restrictions on internal movement}<br>International travel controls: {C8_International travel controls}".format(
**x), axis=1)
df_gantt['ConfirmedDeaths'] = np.log(df_gantt['ConfirmedDeaths'])
df_gantt = df_gantt.replace([-np.inf], 0)
df_gantt['DeathsNorm'] = 0.7 * (df_gantt['ConfirmedDeaths'] - df_gantt['ConfirmedDeaths'].min()) / (
df_gantt['ConfirmedDeaths'].max() - df_gantt['ConfirmedDeaths'].min()) - 0.35
df_gantt_c1 = df_gantt[df_gantt['Task'] == place]
df_gantt_c1['DeathsNorm'] = df_gantt_c1['DeathsNorm'] + 1
df_gantt_c2 = df_gantt[df_gantt['Task'] == place2]
fig = make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level = level, scale=scale, y=y, mode=mode)
return fig
def get_html_compare_response_econ(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
# df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority, type = 'response')
data_dir = 'data/'
df_orig = pd.read_csv(data_dir + 'response/official_response_economic_countries.csv', parse_dates=['Date'])
# cols = list(df_orig.columns[df_orig.dtypes.eq('float64')][:15]) + ['ConfirmedDeaths']
# df_orig[cols] = df_orig[cols].astype(pd.Int64Dtype())
countries = [place, place2]
df_orig = df_orig[df_orig['Name'].isin(countries)]
df_gantt = df_orig[['Name', 'Date', 'EconomicSupportIndexForDisplay', 'ConfirmedDeaths', 'Description']].rename(
columns={'Date': 'Start', 'Name': 'Task'})
df_gantt['EconomicSupportIndexForDisplay'] = df_gantt['EconomicSupportIndexForDisplay'].fillna(0)
df_gantt['Finish'] = df_gantt['Start'] + timedelta(days=1)
df_gantt['ConfirmedDeaths'] = np.log(df_gantt['ConfirmedDeaths'])
df_gantt = df_gantt.replace([-np.inf], 0)
df_gantt['DeathsNorm'] = 0.7 * (df_gantt['ConfirmedDeaths'] - df_gantt['ConfirmedDeaths'].min()) / (
df_gantt['ConfirmedDeaths'].max() - df_gantt['ConfirmedDeaths'].min()) - 0.35
df_gantt_c1 = df_gantt[df_gantt['Task'] == place]
df_gantt_c1['DeathsNorm'] = df_gantt_c1['DeathsNorm'] + 1
df_gantt_c2 = df_gantt[df_gantt['Task'] == place2]
fig = make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level = level, scale=scale, y=y, mode=mode, var='EconomicSupportIndexForDisplay')
return fig
def get_fig_compare_doubling_rates(place, place2, level = 'countries'):
df_places_to_show = get_place_comparison_df(place, place2, level = level)
fig = make_chart_comparison_growth(df_places_to_show, level = level)
return fig
def get_fig_response(country):
df_orig_response = pd.read_csv(data_dir + 'pollution_countries_raw.csv', parse_dates=['Date'])
df_orig_cases = pd.read_csv(data_dir + 'total_cases_countries_normalized.csv', parse_dates=['Date']).rename(
columns={'Name': 'Country'})
df_orig = pd.merge(df_orig_response, df_orig_cases, how='left')
df_to_show = df_orig[df_orig['Country'] == country][['Country', 'City', 'Date', 'no2', 'TotalDeaths']].sort_values('Date')
deaths_start = 10
start_deaths = (df_to_show['TotalDeaths'] >= deaths_start).idxmax()
avg_before_deaths = df_to_show.loc[:start_deaths, 'no2'].mean()
start_display = max(start_deaths - 60, 0)
df_to_show = df_to_show.loc[start_display:, ]
df_to_show['no2'] = df_to_show[['no2']].rolling(5).mean()
fig = make_chart_response(country, deaths_start, avg_before_deaths, df_to_show)
return fig
def get_places_gap_df(df_orig, place, place2, priority = 'now'):
df_places_gap = pd.DataFrame({'Name': [], 'gap': [], 'dist': []})
df_places_gap = df_places_gap.append(pd.Series([place, 0.0, -1], index=df_places_gap.columns),
ignore_index=True)
df_orig = df_orig.set_index('Name')
if not ((df_orig.loc[place,'TotalDeaths'].max()>0) and (df_orig.loc[place2,'TotalDeaths'].max()>0)):
# one of the places has 0 deaths
min_dist = 0 # means nothing here
dist_max = 1 # means nothing here
gap = 0
elif priority != 'now':
# must align based on beginning of deaths
day_place = (df_orig.loc[place,:].set_index('Day')['TotalDeaths'] > 10).idxmax()
day_place2 = (df_orig.loc[place2,:].set_index('Day')['TotalDeaths'] > 10).idxmax()
min_dist = 0 # means nothing here
dist_max = 1 # means nothing here
gap = day_place2 - day_place
else:
# similarity alignment
df_orig_piv_day = df_orig.reset_index().pivot(index='Name', columns='Day', values='TotalDeaths')
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
sr_place_compare = sr_place.loc[place_start:].dropna()
sr_other_place = df_orig_piv_day.loc[place2,].fillna(0)
min_dist = np.inf
min_pos = 0
for i in range(0, 1 + len(sr_other_place) - len(sr_place_compare)):
sr_other_place_compare = sr_other_place[i: i + len(sr_place_compare)]
dist = euclidean(sr_place_compare, sr_other_place_compare)
if (dist < min_dist):
min_dist = dist
min_pos = i
dist_max = euclidean(sr_place_compare, np.zeros(len(sr_place_compare)))
day_place2 = sr_other_place.index[min_pos]
# gap = min_pos - place_start
gap = day_place2 - place_start
df_places_gap = df_places_gap.append(
pd.Series([place2, gap, min_dist], index=df_places_gap.columns),
ignore_index=True)
df_places_gap = df_places_gap.set_index('Name')#.sort_values('dist')
df_places_gap['Similarity'] = df_places_gap['dist'].apply(lambda x: (1.0 - x / dist_max) if x >= 0 else 1)
return df_places_gap
def get_total_cases_df_adjusted(df_orig, df_places_gap, place, place2):
df_total_cases = df_orig.set_index('Name')
df_total_cases_top = df_total_cases.join(df_places_gap)
df_total_cases_top['DayAdj'] = ((df_total_cases_top['Day'] - df_total_cases_top['gap']) - 1).astype(int)
# df_total_cases_top.loc[place2, 'DayAdj'] = ((df_total_cases_top.loc[place2, 'Day'] - df_total_cases_top.loc[place2, 'gap']) - 1)
# df_total_cases_top['DayAdj'] = df_total_cases_top['DayAdj'].astype(int)
return df_total_cases_top
def get_place_comparison_df(place, place2, level = 'countries', priority = 'now'):
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
# to force place order
df_orig_c1 = df_orig[df_orig['Name'] == place]
df_orig_c2 = df_orig[df_orig['Name'] == place2]
len_c1 = len(df_orig_c1[df_orig_c1['TotalDeaths'] > 0])
len_c2 = len(df_orig_c2[df_orig_c2['TotalDeaths'] > 0])
# place has to be the one with smallest number of values for Deaths
if (len_c1 > len_c2):
place, place2 = place2, place
df_orig = pd.concat([df_orig_c2, df_orig_c1])
else:
df_orig = pd.concat([df_orig_c1, df_orig_c2])
df_countries_gap = get_places_gap_df(df_orig, place, place2, priority)
df_total_cases_top = get_total_cases_df_adjusted(df_orig, df_countries_gap, place, place2)
place_start_cases = (df_orig.set_index('Name').loc[place,].set_index('Day')['Total'] > 0).idxmax()
df_total_cases_top = df_total_cases_top[df_total_cases_top['DayAdj'] >= place_start_cases]
return df_total_cases_top.reset_index()
def make_chart_comparison(df_places_to_show, level='countries', scale='log', y='total', mode='static'):
week = mdates.WeekdayLocator(interval=2) # every year
months = mdates.MonthLocator() # every month
month_fmt = mdates.DateFormatter('%b-%d')
var_y_suffix = '' if y == 'total' else 'Per100k'
label_y_scale = ' (log)' if scale == 'log' else ''
label_y_y = '' if y == 'total' else ' per 100k'
# get last date from dataframe
date = df_places_to_show['Date'].max() # datetime.today().strftime('%Y-%m-%d')
gap = int(df_places_to_show['gap'].min())
y_lim = df_places_to_show['Total' + var_y_suffix].max() #* 1.2
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(8, 5))
ax = fig.subplots()
places_to_show = df_places_to_show['Name'].unique()[:2]
place_name = 'Country' if level == 'countries' else 'City'
df_places_to_show = df_places_to_show.rename(columns={'Name': place_name})
ax.set_title('{} Comparison - COVID-19 Cases vs. Deaths - {}'.format(place_name, date), fontsize=14)
sns.scatterplot(x="DayAdj", y='Total' + var_y_suffix, hue=place_name, lw=6, alpha=0.8, data=df_places_to_show,
ax=ax)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_fmt)
ax.legend(loc='upper left', title="Confirmed cases", frameon=True)
ax.set(ylabel='Total confirmed cases{}{}'.format(label_y_y, label_y_scale),
xlabel="Date for {} ({}'s data shifted {} days to align death curves)".format(places_to_show[0],
places_to_show[1], gap))
ax.set_ylim(0.5, y_lim) if scale == 'log' else ax.set_ylim(-5, y_lim)
ax2 = ax.twinx()
if scale == 'log':
ax.set_yscale('log')
ax2.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax2.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax2.grid(False)
sns.lineplot(x="DayAdj", y='TotalDeaths' + var_y_suffix, hue=place_name, alpha=0.7, lw=6, ax=ax2,
data=df_places_to_show)
ax2.legend(loc='lower right', title="Deaths", frameon=True)
ax2.set(ylabel='Total deaths{}{}'.format(label_y_y, label_y_scale))
ax2.set_ylim(0.5, y_lim) if scale == 'log' else ax2.set_ylim(-5, y_lim)
logo = plt.imread('./static/img/new_logo_site.png')
ax.figure.figimage(logo, 95, 70, alpha=.35, zorder=1)
fig.tight_layout()
# display(fig)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level='countries', scale='log', y='total', mode='static', var='StringencyIndexForDisplay'):
# to force place order
df_gantt = pd.concat([df_gantt_c1, df_gantt_c2])
fig = ff.create_gantt(df_gantt, colors=['#93e4c1', '#333F44'], index_col=var,
show_colorbar=False, bar_width=0.2, showgrid_x=True, showgrid_y=True, group_tasks=True,
title='Comparing response',
height=350
)
fig.add_scatter(x=df_gantt_c1['Start'], y=df_gantt_c1['DeathsNorm'], hoverinfo='skip',
line=dict(color='rgb(222, 132, 82)', width=4))
fig.add_scatter(x=df_gantt_c2['Start'], y=df_gantt_c2['DeathsNorm'], hoverinfo='skip',
line=dict(color='rgb(222, 132, 82)', width=4))
fig.update_layout(
xaxis=dict(
showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
type="date"
),
yaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
showticklabels=True,
autorange=True,
),
autosize=False,
margin=dict(
autoexpand=False,
l=100,
r=20,
t=110,
),
showlegend=False,
plot_bgcolor='white'
)
annotations = []
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.13,
xanchor='center', yanchor='top',
text='Date',
font=dict(family='Arial',
size=12,
color='rgb(150,150,150)'),
showarrow=False))
fig.update_layout(annotations=annotations)
# fig.write_html("gantt.html")
# fig.show()
html = fig.to_html(full_html=False, include_plotlyjs=False, )
return html
def make_chart_comparison_growth(df_places_to_show, level='countries'):
# get last date from dataframe
date = df_places_to_show['Date'].max() # datetime.today().strftime('%Y-%m-%d')
gap = int(df_places_to_show['gap'].min())
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(8, 6))
axs = fig.subplots(nrows=2)
place_name = 'Country' if level == 'countries' else 'City'
axs[0].set_title('{} Comparison - COVID-19 Weekly Growth (%) - {}'.format(place_name, date), fontsize=14)
places_to_show = df_places_to_show['Name'].unique()[:2]
df_places_to_show = df_places_to_show.rename(columns={'Name': place_name})
sns.lineplot(x="DayAdj", y='WeeklyGrowth', hue=place_name, lw = 6, alpha = 0.8, ax=axs[0], data=df_places_to_show)
axs[0].set(ylabel='Weekly growth of cases', xlabel='')
axs[0].set_ylim(0, 500)
sns.lineplot(x="DayAdj", y='WeeklyGrowthDeaths', hue=place_name, alpha = 0.7, lw = 6, ax=axs[1], data=df_places_to_show)
axs[1].set(ylabel='Weekly growth of deaths', xlabel="Day ({}'s data shifted {} days for the death curves to align)".format(places_to_show[1], gap))
axs[1].set_ylim(0, 500)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def make_chart_response(country, deaths_start, avg_before_deaths, df_to_show):
city = df_to_show['City'].iloc[0]
df_quar = pd.read_csv(data_dir + 'all_countries_response.csv', parse_dates = ['Quarantine'])
quarantine = df_quar[df_quar['Country'] == country]['Quarantine'].iloc[0]
week = mdates.WeekdayLocator(interval=2) # every year
months = mdates.MonthLocator() # every month
month_fmt = mdates.DateFormatter('%b-%d')
y_lim = df_to_show['TotalDeaths'].max() * 1.2
y2_lim = df_to_show['no2'].max() * 1.8
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(10, 5))
ax = fig.subplots()
ax.set_title('Assessing quarantine implementation - ' + country, fontsize=16, loc='left')
if not pd.isnull(quarantine): ax.axvline(x=quarantine, color='k', linestyle='--', lw=3, label='Official quarantine')
ax.scatter(df_to_show['Date'], df_to_show['TotalDeaths'], color='black', alpha = 0.7, label = 'Confirmed deaths')
ax.xaxis.set_major_locator(week)
ax.xaxis.set_major_formatter(month_fmt)
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax.set_ylim(1, y_lim)
ax.set(ylabel='Confirmed deaths')
ax2 = ax.twinx()
sns.lineplot(x="Date", y='no2', alpha = 0.7, lw = 6, label = 'Daily $\mathrm{{NO}}_2$ pollution *', ax=ax2, data=df_to_show)
sns.lineplot(x="Date", y=avg_before_deaths, alpha = 0.7, lw = 6, label = 'Average pollution **', ax=ax2, data=df_to_show)
ax2.grid(False)
ax2.xaxis.set_major_locator(week)
ax2.xaxis.set_major_formatter(month_fmt)
ax2.set_ylim(1, y2_lim)
ax2.set(ylabel='$\mathrm{{NO}}_2$ pollution')
# ask matplotlib for the plotted objects and their labels
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='upper left')
annotation = """* Median of $\mathrm{{NO}}_2$ measurements in the most affected city ({city}), 5 days rolling average over time series\n** Average daily $\mathrm{{NO}}_2$ measurements from the begining of 2020 until the first day after {deaths_start} deaths""".format(city=city, deaths_start = deaths_start)
ax.annotate(annotation, (0,0), (0, -30), xycoords='axes fraction', textcoords='offset points', va='top')
logo = plt.imread('./static/img/new_logo_site.png')
ax.figure.figimage(logo, 100, 110, alpha=.35, zorder=1)
fig.tight_layout()
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def get_timeline_list(place, place2, level = 'countries'):
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
# to force place order
df_orig_c1 = df_orig[df_orig['Name'] == place]
df_orig_c2 = df_orig[df_orig['Name'] == place2]
len_c1 = len(df_orig_c1[df_orig_c1['TotalDeaths'] > 0])
len_c2 = len(df_orig_c2[df_orig_c2['TotalDeaths'] > 0])
# place has to be the one with smallest number of values for Deaths
if (len_c1 > len_c2):
place, place2 = place2, place
df_orig = pd.concat([df_orig_c2, df_orig_c1])
else:
df_orig = pd.concat([df_orig_c1, df_orig_c2])
df_places_gap = get_places_gap_df(df_orig, place, place2)
df_total_cases_top = get_total_cases_df_adjusted(df_orig, df_places_gap, place, place2)
places = [place, place2]
df_places_to_show = df_total_cases_top.loc[places, :]
places_to_show = list(df_places_to_show.index.unique())
df_events_owd = pd.DataFrame({'Date': [], 'Name': [], 'Desc': [], 'FullText': [], 'Highlight': []})
today = df_places_to_show['Date'].max()
for c in places_to_show:
df_place = df_places_to_show.loc[c,]
# df_events_owd = df_events_owd.append(pd.DataFrame({'Date':['2019-12-31'], 'Name': [c], 'Desc':['Begining of epidemic'], 'FullText':['First day of data tracking.']}))
df_events_owd = df_events_owd.append(
pd.Series([(df_place.set_index('Date')['Total'] > 0).idxmax(), c, '1st Confirmed Case', '', 1],
index=df_events_owd.columns), ignore_index=True)
df_events_owd = df_events_owd.append(
pd.Series([(df_place.set_index('Date')['TotalDeaths'] > 0).idxmax(), c, '1st Death', '', 5],
index=df_events_owd.columns), ignore_index=True)
msg = """{} is approximately {} days behind {}'s epidemic progression.
This is an estimate based on matching their death growth curves.""".format(place, abs(
df_places_gap.loc[place2, 'gap']), place2)
df_events_owd = df_events_owd.append(pd.Series([today, c, 'Today', msg, 1], index=df_events_owd.columns),
ignore_index=True)
df_events_owd['Source'] = 'Our World in Data'
# Adding data from Situation Reports
if level == 'countries':
df_events_sr = pd.read_csv(data_dir + 'situation_reports_countries_highlight.csv')
else:
df_events_sr = pd.DataFrame({'Name':[]})
df_events_sr = df_events_sr[df_events_sr['Name'].isin([place, place2])]
df_events = pd.concat([df_events_owd, df_events_sr], sort=True)
# Groups events that happen on the same day
df_events_grouped = pd.DataFrame(df_events.groupby(['Date', 'Name'])['Desc'].apply(lambda x: "\n".join(x)))
df_events_grouped['FullText'] = df_events.groupby(['Date', 'Name'])['FullText'].apply(lambda x: "\n".join(x))
df_events_grouped['Source'] = df_events.groupby(['Date', 'Name'])['Source'].apply(lambda x: "\n".join(x))
df_events_grouped['Highlight'] = df_events.groupby(['Date', 'Name'])['Highlight'].max()
df_events_adj = pd.merge(df_events_grouped, df_places_to_show[['Date', 'DayAdj']].reset_index(), how='left',
on=['Date', 'Name'])
df_events_adj['Highlight'] = df_events_adj['Highlight'].astype(int)
df_places_events = pd.merge(df_events_adj[['Name', 'DayAdj', 'Desc', 'FullText', 'Highlight', 'Source']],
df_places_to_show.reset_index(), how='outer', on=['DayAdj', 'Name'])
df_places_events = df_places_events.set_index('Name')
df_places_events_merged = pd.merge(df_places_events.loc[place, :].reset_index(),
df_places_events.loc[place2, :].reset_index(), on='DayAdj', how='outer',
suffixes=('', '2'))
df_places_events_merged = df_places_events_merged.set_index('DayAdj').sort_index()
start_events = min(df_places_events_merged['Desc'].first_valid_index(),
df_places_events_merged['Desc2'].first_valid_index())
end_events = max(df_places_events_merged['TotalDeaths'].last_valid_index(),
df_places_events_merged['TotalDeaths2'].last_valid_index())
df_places_events_trimed = df_places_events_merged.loc[start_events:end_events]
df_places_events_trimed = df_places_events_trimed[
['Name', 'Date', 'Desc', 'FullText', 'Highlight', 'Source', 'Total', 'TotalDeaths', 'GrowthRate',
'GrowthRateDeaths', 'DaysToDouble', 'DaysToDoubleDeaths', 'Date2', 'Name2', 'Desc2', 'FullText2',
'Highlight2', 'Source2', 'Total2', 'TotalDeaths2', 'GrowthRate2', 'GrowthRateDeaths2', 'DaysToDouble2',
'DaysToDoubleDeaths2', ]]
# Fill place name for 1st place
df_places_events_trimed['Name'] = df_places_events_trimed['Name'].ffill()
# Fill place name for 2nd place
df_places_events_trimed['Name2'] = df_places_events_trimed['Name2'].ffill()
# Fill TotalDeath
# df_places_events_trimed['TotalDeaths'] = df_places_events_trimed['TotalDeaths'].ffill()
# df_places_events_trimed['TotalDeaths2'] = df_places_events_trimed['TotalDeaths2'].ffill()
# Fill dates for 1st place
# sr_days = pd.to_datetime(df_places_events_trimed['Date'].ffill())
# sr_adj_days = df_places_events_trimed.groupby(df_places_events_trimed['Date'].notnull().cumsum()).cumcount()
# df_places_events_trimed['Date'] = (sr_days + pd.to_timedelta(sr_adj_days, unit='d')).dt.strftime('%Y-%m-%d')
# Fill dates for 2nd place
# sr_days = pd.to_datetime(df_places_events_trimed['Date2'].ffill())
# sr_adj_days = df_places_events_trimed.groupby(df_places_events_trimed['Date2'].notnull().cumsum()).cumcount()
# df_places_events_trimed['Date2'] = (sr_days + pd.to_timedelta(sr_adj_days, unit='d')).dt.strftime('%Y-%m-%d')
df_places_events_trimed = df_places_events_trimed.fillna('').replace({'NaT': ''})
return df_places_events_trimed.to_dict('records')
def fix_variable_names(series):
new_names = series.apply(lambda x: re.sub("([a-z])([A-Z])","\g<1> \g<2>", x))
new_names = new_names.apply(lambda x: re.sub("^Total$","Total Confirmed Cases", x))
new_names = new_names.apply(lambda x: re.sub("Per100k","per 100k", x))
new_names = new_names.apply(lambda x: re.sub("^Weekly Growth$","Weekly Growth (%)", x))
new_names = new_names.apply(lambda x: re.sub("^Weekly Growth Deaths$","Weekly Growth Deaths (%)", x))
new_names = new_names.str.capitalize()
return new_names
def get_place_live_stats(place, level = 'countries'):
df_live_stats_orig = pd.read_csv(data_dir + 'live_stats_{}.csv'.format(level))
variables = fix_variable_names(pd.Series(df_live_stats_orig['variable'].unique(), name='variable'))
df_live_stats_place = df_live_stats_orig[df_live_stats_orig['Name'] == place]
if not len(df_live_stats_place):
df_live_stats_place = pd.merge(variables, df_live_stats_place, how='left')
df_live_stats_place.variable = fix_variable_names(df_live_stats_place['variable'])
return df_live_stats_place[['variable', 'value', 'change']].to_dict('records')
def get_place_socio_stats(place, level = 'countries'):
df_socio_stats_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level))
variables = fix_variable_names(pd.Series(df_socio_stats_orig['variable'].unique(), name='variable'))
df_socio_stats_place = df_socio_stats_orig[df_socio_stats_orig['Name'] == place]
if not len(df_socio_stats_place):
df_socio_stats_place = pd.merge(variables, df_socio_stats_place, how='left')
df_socio_stats_place.variable = fix_variable_names(df_socio_stats_place['variable'])
return df_socio_stats_place[['variable', 'value', 'score']].to_dict('records') | [
"pandas.read_csv",
"plotly.figure_factory.create_gantt",
"pandas.Int64Dtype",
"numpy.log",
"io.BytesIO",
"seaborn.scatterplot",
"datetime.timedelta",
"seaborn.set",
"app.models.Country.all_countries_as_df",
"matplotlib.dates.WeekdayLocator",
"pandas.DataFrame",
"app.models.Country.all_countrie... | [((255, 264), 'seaborn.set', 'sns.set', ([], {}), '()\n', (262, 264), True, 'import seaborn as sns\n'), ((732, 767), 'app.models.Country.all_countries_names_as_df', 'Country.all_countries_names_as_df', ([], {}), '()\n', (765, 767), False, 'from app.models import Country, CountryStatus\n'), ((854, 906), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'all_countries_response.csv')"], {}), "(data_dir + 'all_countries_response.csv')\n", (865, 906), True, 'import pandas as pd\n'), ((1372, 1401), 'app.models.Country.all_countries_as_df', 'Country.all_countries_as_df', ([], {}), '()\n', (1399, 1401), False, 'from app.models import Country, CountryStatus\n'), ((2538, 2587), 'pandas.DataFrame', 'pd.DataFrame', (["{'Name': [], 'gap': [], 'dist': []}"], {}), "({'Name': [], 'gap': [], 'dist': []})\n", (2550, 2587), True, 'import pandas as pd\n'), ((4633, 4647), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4645, 4647), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((6750, 6842), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'response/official_response_countries.csv')"], {'parse_dates': "['Date']"}), "(data_dir + 'response/official_response_countries.csv',\n parse_dates=['Date'])\n", (6761, 6842), True, 'import pandas as pd\n'), ((8095, 8130), 'numpy.log', 'np.log', (["df_gantt['ConfirmedDeaths']"], {}), "(df_gantt['ConfirmedDeaths'])\n", (8101, 8130), True, 'import numpy as np\n'), ((8965, 9066), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'response/official_response_economic_countries.csv')"], {'parse_dates': "['Date']"}), "(data_dir + 'response/official_response_economic_countries.csv',\n parse_dates=['Date'])\n", (8976, 9066), True, 'import pandas as pd\n'), ((9674, 9709), 'numpy.log', 'np.log', (["df_gantt['ConfirmedDeaths']"], {}), "(df_gantt['ConfirmedDeaths'])\n", (9680, 9709), True, 'import numpy as np\n'), ((10588, 10663), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'pollution_countries_raw.csv')"], {'parse_dates': "['Date']"}), "(data_dir + 'pollution_countries_raw.csv', parse_dates=['Date'])\n", (10599, 10663), True, 'import pandas as pd\n'), ((10829, 10882), 'pandas.merge', 'pd.merge', (['df_orig_response', 'df_orig_cases'], {'how': '"""left"""'}), "(df_orig_response, df_orig_cases, how='left')\n", (10837, 10882), True, 'import pandas as pd\n'), ((11522, 11571), 'pandas.DataFrame', 'pd.DataFrame', (["{'Name': [], 'gap': [], 'dist': []}"], {}), "({'Name': [], 'gap': [], 'dist': []})\n", (11534, 11571), True, 'import pandas as pd\n'), ((14432, 14461), 'app.models.Country.all_countries_as_df', 'Country.all_countries_as_df', ([], {}), '()\n', (14459, 14461), False, 'from app.models import Country, CountryStatus\n'), ((15504, 15537), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'interval': '(2)'}), '(interval=2)\n', (15525, 15537), True, 'import matplotlib.dates as mdates\n'), ((15565, 15586), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (15584, 15586), True, 'import matplotlib.dates as mdates\n'), ((15618, 15647), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b-%d"""'], {}), "('%b-%d')\n", (15638, 15647), True, 'import matplotlib.dates as mdates\n'), ((16107, 16129), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (16113, 16129), False, 'from matplotlib.figure import Figure\n'), ((16470, 16591), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""DayAdj"""', 'y': "('Total' + var_y_suffix)", 'hue': 'place_name', 'lw': '(6)', 'alpha': '(0.8)', 'data': 'df_places_to_show', 'ax': 'ax'}), "(x='DayAdj', y='Total' + var_y_suffix, hue=place_name, lw=6,\n alpha=0.8, data=df_places_to_show, ax=ax)\n", (16485, 16591), True, 'import seaborn as sns\n'), ((17446, 17571), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""DayAdj"""', 'y': "('TotalDeaths' + var_y_suffix)", 'hue': 'place_name', 'alpha': '(0.7)', 'lw': '(6)', 'ax': 'ax2', 'data': 'df_places_to_show'}), "(x='DayAdj', y='TotalDeaths' + var_y_suffix, hue=place_name,\n alpha=0.7, lw=6, ax=ax2, data=df_places_to_show)\n", (17458, 17571), True, 'import seaborn as sns\n'), ((17812, 17856), 'matplotlib.pyplot.imread', 'plt.imread', (['"""./static/img/new_logo_site.png"""'], {}), "('./static/img/new_logo_site.png')\n", (17822, 17856), True, 'import matplotlib.pyplot as plt\n'), ((18007, 18016), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (18014, 18016), False, 'from io import StringIO, BytesIO\n'), ((18279, 18316), 'pandas.concat', 'pd.concat', (['[df_gantt_c1, df_gantt_c2]'], {}), '([df_gantt_c1, df_gantt_c2])\n', (18288, 18316), True, 'import pandas as pd\n'), ((18328, 18535), 'plotly.figure_factory.create_gantt', 'ff.create_gantt', (['df_gantt'], {'colors': "['#93e4c1', '#333F44']", 'index_col': 'var', 'show_colorbar': '(False)', 'bar_width': '(0.2)', 'showgrid_x': '(True)', 'showgrid_y': '(True)', 'group_tasks': '(True)', 'title': '"""Comparing response"""', 'height': '(350)'}), "(df_gantt, colors=['#93e4c1', '#333F44'], index_col=var,\n show_colorbar=False, bar_width=0.2, showgrid_x=True, showgrid_y=True,\n group_tasks=True, title='Comparing response', height=350)\n", (18343, 18535), True, 'import plotly.figure_factory as ff\n'), ((20630, 20652), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (20636, 20652), False, 'from matplotlib.figure import Figure\n'), ((21008, 21122), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""DayAdj"""', 'y': '"""WeeklyGrowth"""', 'hue': 'place_name', 'lw': '(6)', 'alpha': '(0.8)', 'ax': 'axs[0]', 'data': 'df_places_to_show'}), "(x='DayAdj', y='WeeklyGrowth', hue=place_name, lw=6, alpha=0.8,\n ax=axs[0], data=df_places_to_show)\n", (21020, 21122), True, 'import seaborn as sns\n'), ((21217, 21337), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""DayAdj"""', 'y': '"""WeeklyGrowthDeaths"""', 'hue': 'place_name', 'alpha': '(0.7)', 'lw': '(6)', 'ax': 'axs[1]', 'data': 'df_places_to_show'}), "(x='DayAdj', y='WeeklyGrowthDeaths', hue=place_name, alpha=0.7,\n lw=6, ax=axs[1], data=df_places_to_show)\n", (21229, 21337), True, 'import seaborn as sns\n'), ((21567, 21576), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (21574, 21576), False, 'from io import StringIO, BytesIO\n'), ((21775, 21860), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'all_countries_response.csv')"], {'parse_dates': "['Quarantine']"}), "(data_dir + 'all_countries_response.csv', parse_dates=['Quarantine']\n )\n", (21786, 21860), True, 'import pandas as pd\n'), ((21948, 21981), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'interval': '(2)'}), '(interval=2)\n', (21969, 21981), True, 'import matplotlib.dates as mdates\n'), ((22010, 22031), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (22029, 22031), True, 'import matplotlib.dates as mdates\n'), ((22063, 22092), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b-%d"""'], {}), "('%b-%d')\n", (22083, 22092), True, 'import matplotlib.dates as mdates\n'), ((22250, 22273), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (22256, 22273), False, 'from matplotlib.figure import Figure\n'), ((22921, 23045), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': '"""no2"""', 'alpha': '(0.7)', 'lw': '(6)', 'label': '"""Daily $\\\\mathrm{{NO}}_2$ pollution *"""', 'ax': 'ax2', 'data': 'df_to_show'}), "(x='Date', y='no2', alpha=0.7, lw=6, label=\n 'Daily $\\\\mathrm{{NO}}_2$ pollution *', ax=ax2, data=df_to_show)\n", (22933, 23045), True, 'import seaborn as sns\n'), ((23051, 23171), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': 'avg_before_deaths', 'alpha': '(0.7)', 'lw': '(6)', 'label': '"""Average pollution **"""', 'ax': 'ax2', 'data': 'df_to_show'}), "(x='Date', y=avg_before_deaths, alpha=0.7, lw=6, label=\n 'Average pollution **', ax=ax2, data=df_to_show)\n", (23063, 23171), True, 'import seaborn as sns\n'), ((24025, 24069), 'matplotlib.pyplot.imread', 'plt.imread', (['"""./static/img/new_logo_site.png"""'], {}), "('./static/img/new_logo_site.png')\n", (24035, 24069), True, 'import matplotlib.pyplot as plt\n'), ((24201, 24210), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (24208, 24210), False, 'from io import StringIO, BytesIO\n'), ((24437, 24466), 'app.models.Country.all_countries_as_df', 'Country.all_countries_as_df', ([], {}), '()\n', (24464, 24466), False, 'from app.models import Country, CountryStatus\n'), ((25302, 25389), 'pandas.DataFrame', 'pd.DataFrame', (["{'Date': [], 'Name': [], 'Desc': [], 'FullText': [], 'Highlight': []}"], {}), "({'Date': [], 'Name': [], 'Desc': [], 'FullText': [],\n 'Highlight': []})\n", (25314, 25389), True, 'import pandas as pd\n'), ((26926, 26977), 'pandas.concat', 'pd.concat', (['[df_events_owd, df_events_sr]'], {'sort': '(True)'}), '([df_events_owd, df_events_sr], sort=True)\n', (26935, 26977), True, 'import pandas as pd\n'), ((2630, 2686), 'pandas.Series', 'pd.Series', (['[place, 0.0, -1]'], {'index': 'df_places_gap.columns'}), '([place, 0.0, -1], index=df_places_gap.columns)\n', (2639, 2686), True, 'import pandas as pd\n'), ((6972, 6987), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (6985, 6987), True, 'import pandas as pd\n'), ((7364, 7381), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (7373, 7381), False, 'from datetime import datetime, timedelta\n'), ((9622, 9639), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9631, 9639), False, 'from datetime import datetime, timedelta\n'), ((11613, 11669), 'pandas.Series', 'pd.Series', (['[place, 0.0, -1]'], {'index': 'df_places_gap.columns'}), '([place, 0.0, -1], index=df_places_gap.columns)\n', (11622, 11669), True, 'import pandas as pd\n'), ((13403, 13466), 'pandas.Series', 'pd.Series', (['[place2, gap, min_dist]'], {'index': 'df_places_gap.columns'}), '([place2, gap, min_dist], index=df_places_gap.columns)\n', (13412, 13466), True, 'import pandas as pd\n'), ((14870, 14905), 'pandas.concat', 'pd.concat', (['[df_orig_c2, df_orig_c1]'], {}), '([df_orig_c2, df_orig_c1])\n', (14879, 14905), True, 'import pandas as pd\n'), ((14934, 14969), 'pandas.concat', 'pd.concat', (['[df_orig_c1, df_orig_c2]'], {}), '([df_orig_c1, df_orig_c2])\n', (14943, 14969), True, 'import pandas as pd\n'), ((22407, 22428), 'pandas.isnull', 'pd.isnull', (['quarantine'], {}), '(quarantine)\n', (22416, 22428), True, 'import pandas as pd\n'), ((24875, 24910), 'pandas.concat', 'pd.concat', (['[df_orig_c2, df_orig_c1]'], {}), '([df_orig_c2, df_orig_c1])\n', (24884, 24910), True, 'import pandas as pd\n'), ((24939, 24974), 'pandas.concat', 'pd.concat', (['[df_orig_c1, df_orig_c2]'], {}), '([df_orig_c1, df_orig_c2])\n', (24948, 24974), True, 'import pandas as pd\n'), ((26705, 26772), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'situation_reports_countries_highlight.csv')"], {}), "(data_dir + 'situation_reports_countries_highlight.csv')\n", (26716, 26772), True, 'import pandas as pd\n'), ((26806, 26832), 'pandas.DataFrame', 'pd.DataFrame', (["{'Name': []}"], {}), "({'Name': []})\n", (26818, 26832), True, 'import pandas as pd\n'), ((31331, 31383), 'pandas.merge', 'pd.merge', (['variables', 'df_live_stats_place'], {'how': '"""left"""'}), "(variables, df_live_stats_place, how='left')\n", (31339, 31383), True, 'import pandas as pd\n'), ((31959, 32012), 'pandas.merge', 'pd.merge', (['variables', 'df_socio_stats_place'], {'how': '"""left"""'}), "(variables, df_socio_stats_place, how='left')\n", (31967, 32012), True, 'import pandas as pd\n'), ((3108, 3159), 'scipy.spatial.distance.euclidean', 'euclidean', (['sr_place_compare', 'sr_other_place_compare'], {}), '(sr_place_compare, sr_other_place_compare)\n', (3117, 3159), False, 'from scipy.spatial.distance import pdist, euclidean\n'), ((3403, 3471), 'pandas.Series', 'pd.Series', (['[other_place, gap, min_dist]'], {'index': 'df_places_gap.columns'}), '([other_place, gap, min_dist], index=df_places_gap.columns)\n', (3412, 3471), True, 'import pandas as pd\n'), ((4982, 5017), 'scipy.spatial.distance.pdist', 'pdist', (['df_socio_stats_orig_piv_norm'], {}), '(df_socio_stats_orig_piv_norm)\n', (4987, 5017), False, 'from scipy.spatial.distance import pdist, euclidean\n'), ((10685, 10774), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'total_cases_countries_normalized.csv')"], {'parse_dates': "['Date']"}), "(data_dir + 'total_cases_countries_normalized.csv', parse_dates=\n ['Date'])\n", (10696, 10774), True, 'import pandas as pd\n'), ((26427, 26494), 'pandas.Series', 'pd.Series', (["[today, c, 'Today', msg, 1]"], {'index': 'df_events_owd.columns'}), "([today, c, 'Today', msg, 1], index=df_events_owd.columns)\n", (26436, 26494), True, 'import pandas as pd\n'), ((30469, 30513), 're.sub', 're.sub', (['"""([a-z])([A-Z])"""', '"""\\\\g<1> \\\\g<2>"""', 'x'], {}), "('([a-z])([A-Z])', '\\\\g<1> \\\\g<2>', x)\n", (30475, 30513), False, 'import re\n'), ((30554, 30599), 're.sub', 're.sub', (['"""^Total$"""', '"""Total Confirmed Cases"""', 'x'], {}), "('^Total$', 'Total Confirmed Cases', x)\n", (30560, 30599), False, 'import re\n'), ((30642, 30674), 're.sub', 're.sub', (['"""Per100k"""', '"""per 100k"""', 'x'], {}), "('Per100k', 'per 100k', x)\n", (30648, 30674), False, 'import re\n'), ((30717, 30766), 're.sub', 're.sub', (['"""^Weekly Growth$"""', '"""Weekly Growth (%)"""', 'x'], {}), "('^Weekly Growth$', 'Weekly Growth (%)', x)\n", (30723, 30766), False, 'import re\n'), ((30809, 30872), 're.sub', 're.sub', (['"""^Weekly Growth Deaths$"""', '"""Weekly Growth Deaths (%)"""', 'x'], {}), "('^Weekly Growth Deaths$', 'Weekly Growth Deaths (%)', x)\n", (30815, 30872), False, 'import re\n'), ((12996, 13047), 'scipy.spatial.distance.euclidean', 'euclidean', (['sr_place_compare', 'sr_other_place_compare'], {}), '(sr_place_compare, sr_other_place_compare)\n', (13005, 13047), False, 'from scipy.spatial.distance import pdist, euclidean\n')] |
import numpy as np
import copy
import scipy.sparse as spsparse
def closestNumber(n, m):
# Find the quotient
q = int(n / m)
# 1st possible closest number
n1 = m * q
# 2nd possible closest number
if (n * m) > 0:
n2 = m * (q + 1)
else:
n2 = m * (q - 1)
# if true, then n1 is the required closest number
if abs(n - n1) < abs(n - n2):
return n1
# else n2 is the required closest number
return n2
def make_init_points(bbox, rank, size, axis, h0, dim):
"""Create a structured grid in parallel of the entire domain
Each processor owns a part of the domain.
"""
_bbox = copy.deepcopy(bbox)
for i in range(dim):
if i == axis:
new_lims = np.linspace(_bbox[i, 0], _bbox[i, 1], size + 1)
_bbox[i, :] = new_lims[rank : rank + 2]
if rank != 0:
# starting point must be lasts + h0
prev_lims = new_lims[rank - 1 : rank - 1 + 2]
tmp = np.mgrid[slice(prev_lims[0], prev_lims[1] + h0, h0)]
_bbox[i, 0] = tmp[-1] + h0
points = np.mgrid[tuple(slice(min, max + h0, h0) for min, max in _bbox)].astype(
float
)
points = points.reshape(dim, -1).T
return points
def __setdiff_rows(A, B, return_index=False):
"""
Similar to MATLAB's setdiff(A, B, 'rows'), this returns C, I
where C are the row of A that are not in B and I satisfies
C = A[I,:].
Returns I if return_index is True.
"""
A = np.require(A, requirements="C")
B = np.require(B, requirements="C")
assert A.ndim == 2, "array must be 2-dim'l"
assert B.ndim == 2, "array must be 2-dim'l"
assert A.shape[1] == B.shape[1], "arrays must have the same number of columns"
assert A.dtype == B.dtype, "arrays must have the same data type"
# NumPy provides setdiff1d, which operates only on one dimensional
# arrays. To make the array one-dimensional, we interpret each row
# as being a string of characters of the appropriate length.
orig_dtype = A.dtype
ncolumns = A.shape[1]
dtype = np.dtype((np.character, orig_dtype.itemsize * ncolumns))
C = (
np.setdiff1d(A.view(dtype), B.view(dtype))
.view(A.dtype)
.reshape((-1, ncolumns), order="C")
)
if return_index:
raise NotImplementedError
else:
return C
def unique_rows(ar):
ar_row_view = ar.view("|S%d" % (ar.itemsize * ar.shape[1]))
_, unique_row_indices = np.unique(ar_row_view, return_index=True)
ar_out = ar[unique_row_indices]
return ar_out
def dense(Ix, J, S, shape=None, dtype=None):
"""
Similar to MATLAB's SPARSE(I, J, S, ...), but instead returning a
dense array.
Usage
-----
>>> shape = (m, n)
>>> A = dense(I, J, S, shape, dtype)
"""
# Advanced usage: allow J and S to be scalars.
if np.isscalar(J):
x = J
J = np.empty(Ix.shape, dtype=int)
J.fill(x)
if np.isscalar(S):
x = S
S = np.empty(Ix.shape)
S.fill(x)
# Turn these into 1-d arrays for processing.
S = S.flat
II = Ix.flat
J = J.flat
return spsparse.coo_matrix((S, (II, J)), shape, dtype).toarray()
| [
"numpy.isscalar",
"numpy.unique",
"numpy.require",
"numpy.linspace",
"numpy.empty",
"scipy.sparse.coo_matrix",
"copy.deepcopy",
"numpy.dtype"
] | [((656, 675), 'copy.deepcopy', 'copy.deepcopy', (['bbox'], {}), '(bbox)\n', (669, 675), False, 'import copy\n'), ((1524, 1555), 'numpy.require', 'np.require', (['A'], {'requirements': '"""C"""'}), "(A, requirements='C')\n", (1534, 1555), True, 'import numpy as np\n'), ((1564, 1595), 'numpy.require', 'np.require', (['B'], {'requirements': '"""C"""'}), "(B, requirements='C')\n", (1574, 1595), True, 'import numpy as np\n'), ((2116, 2172), 'numpy.dtype', 'np.dtype', (['(np.character, orig_dtype.itemsize * ncolumns)'], {}), '((np.character, orig_dtype.itemsize * ncolumns))\n', (2124, 2172), True, 'import numpy as np\n'), ((2504, 2545), 'numpy.unique', 'np.unique', (['ar_row_view'], {'return_index': '(True)'}), '(ar_row_view, return_index=True)\n', (2513, 2545), True, 'import numpy as np\n'), ((2894, 2908), 'numpy.isscalar', 'np.isscalar', (['J'], {}), '(J)\n', (2905, 2908), True, 'import numpy as np\n'), ((2991, 3005), 'numpy.isscalar', 'np.isscalar', (['S'], {}), '(S)\n', (3002, 3005), True, 'import numpy as np\n'), ((2936, 2965), 'numpy.empty', 'np.empty', (['Ix.shape'], {'dtype': 'int'}), '(Ix.shape, dtype=int)\n', (2944, 2965), True, 'import numpy as np\n'), ((3033, 3051), 'numpy.empty', 'np.empty', (['Ix.shape'], {}), '(Ix.shape)\n', (3041, 3051), True, 'import numpy as np\n'), ((747, 794), 'numpy.linspace', 'np.linspace', (['_bbox[i, 0]', '_bbox[i, 1]', '(size + 1)'], {}), '(_bbox[i, 0], _bbox[i, 1], size + 1)\n', (758, 794), True, 'import numpy as np\n'), ((3178, 3225), 'scipy.sparse.coo_matrix', 'spsparse.coo_matrix', (['(S, (II, J))', 'shape', 'dtype'], {}), '((S, (II, J)), shape, dtype)\n', (3197, 3225), True, 'import scipy.sparse as spsparse\n')] |
""" Run basic Metropolis-Hastings sampling. """
from logging import Logger
import numpy as np
import os
import time
from typing_extensions import Literal
# noinspection PyPackageRequirements
from tap import Tap
from tqdm import tqdm
from conformation.funnel_sampler import funnel_pdf, funnel_sample
from conformation.gmm_sampler import gmm_pdf, gmm_sample
class Args(Tap):
"""
System arguments.
"""
num_samples: int = 1000 # Number of samples
proposal_std: float = 0.1 # Isotropic MCMC proposal std
target_distribution: Literal["funnel", "gmm"] = "funnel" # Target distribution for MCMC sampling
num_funnel_x_vars: int = 9 # Number of x variables for funnel
subsample_frequency: int = 100 # Subsample frequency
log_frequency: int = 1000 # Log frequency
save_dir: str = None # Save directory
def basic_metropolis(args: Args, logger: Logger) -> None:
"""
Perform Metropolis-Hastings sampling.
:param args: System parameters.
:param logger: System logger.
:return: None.
"""
# Set up logger
debug, info = logger.debug, logger.info
print(args)
debug("Starting MCMC search...")
# Specify the target distribution pdf function
if args.target_distribution == "funnel":
target_pdf = funnel_pdf
target_sample = funnel_sample
elif args.target_distribution == "gmm":
target_pdf = gmm_pdf
target_sample = gmm_sample
# Samples list
samples = []
# Generate an initial sample from the base space
if args.target_distribution == "funnel":
# noinspection PyUnboundLocalVariable
current_sample = target_sample(args.num_funnel_x_vars)
elif args.target_distribution == "gmm":
# noinspection PyUnboundLocalVariable
current_sample = target_sample([(0, 0), (0, 10), (10, 0), (10, 10)])
# noinspection PyUnboundLocalVariable
current_probability = target_pdf(current_sample)
samples.append(current_sample)
debug(f'Running MC steps...')
num_accepted = 0
start_time = time.time()
for step in tqdm(range(args.num_samples)):
# Generate an isotropic proposal in the base space
proposed_sample = current_sample + np.random.normal(0, args.proposal_std, current_sample.shape[0])
proposed_probability = target_pdf(proposed_sample)
# Apply Metropolis-Hastings acceptance criterion
prob_ratio = proposed_probability / current_probability
mu = np.random.uniform(0, 1)
if mu <= prob_ratio:
current_sample = proposed_sample
current_probability = proposed_probability
num_accepted += 1
if step % args.subsample_frequency == 0:
samples.append(current_sample)
if step % args.log_frequency == 0:
if num_accepted == 0:
acceptance_percentage = 0.0
else:
acceptance_percentage = float(num_accepted) / float(step + 1) * 100.0
debug(f'Steps completed: {step}, acceptance percentage: {acceptance_percentage}')
end_time = time.time()
debug(f'Total Time (s): {end_time - start_time}')
debug(f'% Moves Accepted: {num_accepted / args.num_samples}')
# Save samples
samples = np.array(samples)
np.save(os.path.join(args.save_dir, "samples.npy"), samples)
| [
"numpy.random.normal",
"os.path.join",
"numpy.array",
"numpy.random.uniform",
"time.time"
] | [((2124, 2135), 'time.time', 'time.time', ([], {}), '()\n', (2133, 2135), False, 'import time\n'), ((3179, 3190), 'time.time', 'time.time', ([], {}), '()\n', (3188, 3190), False, 'import time\n'), ((3350, 3367), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (3358, 3367), True, 'import numpy as np\n'), ((2551, 2574), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2568, 2574), True, 'import numpy as np\n'), ((3381, 3423), 'os.path.join', 'os.path.join', (['args.save_dir', '"""samples.npy"""'], {}), "(args.save_dir, 'samples.npy')\n", (3393, 3423), False, 'import os\n'), ((2288, 2351), 'numpy.random.normal', 'np.random.normal', (['(0)', 'args.proposal_std', 'current_sample.shape[0]'], {}), '(0, args.proposal_std, current_sample.shape[0])\n', (2304, 2351), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
from pyorbital import tlefile, orbital
from pyorbital.tlefile import Tle
#def read_all_tles_from_file(platform, tles_file):
# platform = platform.strip().upper()
# tles = []
# fp = open(tles_file)
# for l0 in fp:
# l1, l2 = fp.next(), fp.next()
# if l0.strip()[0:2] == '0 ' and l0.strip()[2:] == platform:
# tles.append(Tle(platform, line1=l1, line2=l2))
# fp.close()
# return tles
#read_all_tles_from_file('METOP-A', 'current.tle')
#time_slot = datetime(2015, 01, 28, 00, 01, 3)
#print time_slot
#print 'check tles'
#print read_tle_from_file_db('METOP-A', 'current.tle',time_slot)
#tle cache
import requests
# gets tle from https://www.space-track.org
# tle from time_range_end -3 days .. time_range_end
def get_tle_spacetrack(time_range_end, noradids ,login, password):
payload = {'identity' : login, 'password': password}
base_url = "https://www.space-track.org"
range_text = (time_range_end - datetime.timedelta(seconds=3600*24*3)).strftime('%Y-%m-%d') + "--" + time_range_end.strftime('%Y-%m-%d')
r1 = requests.post('%s/auth/login' % base_url, data= payload )
tle_url = '%s/basicspacedata/query/class/tle/EPOCH/%s/NORAD_CAT_ID/%s/orderby/EPOCH ASC/format/3le' % (base_url, range_text, noradids)
r2 = requests.get(tle_url, cookies=r1.cookies)
if r2.status_code != 200:
raise IOError('Spacetrack login error')
#print (r1.status_code, r2.status_code)
return r2.text
def read_tle_from_file_db(platform, tles_file, time_slot):
import re
platform = platform.strip().upper()
tle = None
last_tle = None
fp = open(tles_file)
for l0 in fp:
l1, l2 = fp.next(), fp.next()
if l0.strip() == platform or (l0.strip()[0:2] == '0 ' and l0.strip()[2:] == platform): # update for line 3 format
l1 = re.sub(r'\+(\d|\.)', ' \\1', l1) # hack for old tle files from 2012
l2 = re.sub(r'\+(\d|\.)', ' \\1', l2) # hack for old tle files from 2012
tle = Tle(platform, line1=l1, line2=l2) #read every tle and find the most suitable by the tle.epoch
#print tle.epoch
if tle.epoch > time_slot:
return last_tle
last_tle = tle
fp.close()
if tle == None:
return last_tle
return tle
def get_avhrr_nadir_ll(tle, time_slot):
from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt
from pyorbital.geoloc_instrument_definitions import avhrr
scan_geom = avhrr(1, np.array([1023]), decimate=0)
s_times = scan_geom.times(time_slot)
pixels_pos = compute_pixels((tle.line1, tle.line2), scan_geom, s_times)
nadir_first_point = get_lonlatalt(pixels_pos, s_times)
return nadir_first_point[0:2]
def get_scan_avhrr_area(tle, time_slot, slots_count=1):
from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt
from pyorbital.geoloc_instrument_definitions import avhrr
#first point used to normalize
scan_geom = avhrr(1, np.array([1023]), decimate=0)
s_times = scan_geom.times(time_slot)
pixels_pos = compute_pixels((tle.line1, tle.line2), scan_geom, s_times)
nadir_first_point = get_lonlatalt(pixels_pos, s_times)
scan_geom = avhrr((36 * slots_count)+1, np.array([0, 2047]), decimate=10)
s_times = scan_geom.times(time_slot)
pixels_pos = compute_pixels((tle.line1, tle.line2), scan_geom, s_times)
pos_arr_edge = get_lonlatalt(pixels_pos, s_times)
scan_geom = avhrr(2, np.arange(24, 2048, 40), decimate=36*10*slots_count )
s_times = scan_geom.times(time_slot)
pixels_pos = compute_pixels((tle.line1, tle.line2), scan_geom, s_times)
pos_arr_ss = get_lonlatalt(pixels_pos, s_times)
lines_edge = np.array([pos_arr_edge[0], pos_arr_edge[1]]).T
lines_ss = np.array([pos_arr_ss[0], pos_arr_ss[1]]).T
points_arr = np.concatenate( (lines_edge[0::2], lines_ss[pos_arr_ss[0].size/2:], lines_edge[-1::-2], lines_ss[:pos_arr_ss[0].size/2][::-1]))
# normalize area aka dirty hack
if nadir_first_point[0][0] < -120.0:
points_arr = map(lambda xy: xy if xy[0] < 120.0 else (xy[0] - 360.0, xy[1] ), points_arr)
return points_arr
#time_slot = datetime(2015, 01, 28, 00, 01, 3)
#points_arr = get_scan_avhrr_area('METOP-A', time_slot)
from shapely.geometry import mapping,Polygon
import fiona
from fiona.crs import from_string
def write_shp(filename, schema, features, crs_string):
crs_ = from_string(crs_string)
with fiona.open(filename, 'w', 'ESRI Shapefile', schema=schema, crs = crs_ ) as c:
for feature in features:
c.write(feature)
#schema = {
# 'geometry': 'Polygon','properties': {'platform': 'str','time_slot': 'str','in_balt': 'int',},
#}
#features = [
#{
# 'geometry': mapping(Polygon(balt_arr)),
# 'properties': {'platform': 'balt', 'time_slot': "", 'in_balt': 1},
# },
# ]
#write_shp("out.shp", schema, features)
# platform is TLE platform
# aoi_polygon is shapely plygon
# aoi_polygon_proj is projection in meters of aoi to compute area
# time_range_start datetime
# time_range_end datetime
from shapely.geometry import mapping, Polygon
from shapely.ops import transform
from shapely.geos import TopologicalError
from functools import partial
import pyproj
from geopy.distance import great_circle
def generate_avhrr_platform_passes_over_aoi(platform_tle, aoi_polygon, aoi_polygon_proj_string, time_range_start, time_range_end, max_distance_km):
ll_proj = pyproj.Proj(init='epsg:4326')
aoi_polygon_proj = pyproj.Proj(aoi_polygon_proj_string)
ll2aoi_partial = partial(pyproj.transform,ll_proj,aoi_polygon_proj)
aoi2ll_partial = partial(pyproj.transform,aoi_polygon_proj,ll_proj)
aoi_area = aoi_polygon.area / 1000000
aoi_centroid_ll = transform(aoi2ll_partial, aoi_polygon.centroid)
aoi_timeslots = []
current_pass = []
granules_intersect_area = 0.0
time_slot = time_range_start
last_intersects = False
while True:
in_aoi = False
# first, compute distance between aoi centroid and start of granule nadir
nadir_ll = get_avhrr_nadir_ll(platform_tle, time_slot)
distance = great_circle(nadir_ll, (aoi_centroid_ll.x,aoi_centroid_ll.y)).km
if distance < max_distance_km:
#print "%s %.0f" % (time_slot.strftime('%Y-%m-%d %H:%M:%S UTC'), distance)
granule_polygon = transform(ll2aoi_partial, Polygon(get_scan_avhrr_area(platform_tle, time_slot)))
in_aoi = aoi_polygon.intersects(granule_polygon)
if in_aoi == True:
intersection_proj = aoi_polygon.intersection(granule_polygon)
granules_intersect_area += intersection_proj.area / 1000000.0 # m^2 -> km^2
current_pass.append(time_slot)
#print "%s %d" % (time_slot.strftime('%Y-%m-%d %H:%M:%S UTC'), in_aoi)
#print "%s,%.0f,%d" % (time_slot.strftime('%Y-%m-%d %H:%M:%S'), distance, in_aoi)
if last_intersects == True and in_aoi == False: # pass ends
int_percent = (granules_intersect_area / aoi_area * 100.0)
if len(current_pass) > 0 and int_percent > 0.0:
aoi_timeslots.append({ 'time_slot': current_pass[0], 'slots': len(current_pass), 'aoi_cover': int_percent})
current_pass = []
granules_intersect_area = 0.0
if last_intersects == False and in_aoi == False and time_slot > time_range_end:
break
last_intersects = in_aoi
time_slot = time_slot + datetime.timedelta(seconds=60)
return aoi_timeslots
# gets granule pass over aoi from predicted aoi_timeslots list
# if granule is not over aoi return None
def get_pass_for_granule(granule_time_slot_start, granule_time_slot_end, aoi_timeslots):
for aoit in aoi_timeslots:
if granule_time_slot_start < aoit['time_slot'] and granule_time_slot_end > aoit['time_slot']:
return aoit
if granule_time_slot_start >= aoit['time_slot'] and granule_time_slot_start < (aoit['time_slot'] + datetime.timedelta(seconds = aoit['slots'] * 60) ):
return aoit
return None
# write sattelite passes as shapefile
def save_passes_as_shp(filemane, aoi_polygon, aoi_polygon_proj_string, aoi_timeslots, tles):
from shapely.geometry import mapping, Polygon
aoi_polygon_proj = pyproj.Proj(aoi_polygon_proj_string)
schema = {'geometry': 'Polygon','properties': {'platform': 'str','time_slot': 'str','slots': 'int', 'cover': 'float'}}
features = []
for platform in aoi_timeslots.keys():
for aoit in aoi_timeslots[platform]:
poly = transform(aoi_polygon_proj, Polygon(get_scan_avhrr_area( tles[platform], aoit['time_slot'], aoit['slots'] )))
feat = {'geometry': mapping(poly),'properties': {'platform': platform, 'time_slot': aoit['time_slot'].strftime('%Y-%m-%d %H:%M:%S UTC'), 'slots': aoit['slots'], 'cover': aoit['aoi_cover'] }}
features.append(feat)
feat = {'geometry': mapping(aoi_polygon),'properties': {'platform': "aoi", 'time_slot': "-", 'slots': 0, 'cover': -999 }}
features.append(feat)
write_shp(filemane, schema, features, aoi_polygon_proj_string)
| [
"pyorbital.geoloc.get_lonlatalt",
"requests.post",
"shapely.ops.transform",
"pyorbital.tlefile.Tle",
"requests.get",
"fiona.crs.from_string",
"shapely.geometry.mapping",
"numpy.array",
"re.sub",
"functools.partial",
"geopy.distance.great_circle",
"numpy.concatenate",
"pyproj.Proj",
"fiona.... | [((1103, 1158), 'requests.post', 'requests.post', (["('%s/auth/login' % base_url)"], {'data': 'payload'}), "('%s/auth/login' % base_url, data=payload)\n", (1116, 1158), False, 'import requests\n'), ((1303, 1344), 'requests.get', 'requests.get', (['tle_url'], {'cookies': 'r1.cookies'}), '(tle_url, cookies=r1.cookies)\n', (1315, 1344), False, 'import requests\n'), ((2476, 2534), 'pyorbital.geoloc.compute_pixels', 'compute_pixels', (['(tle.line1, tle.line2)', 'scan_geom', 's_times'], {}), '((tle.line1, tle.line2), scan_geom, s_times)\n', (2490, 2534), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((2556, 2590), 'pyorbital.geoloc.get_lonlatalt', 'get_lonlatalt', (['pixels_pos', 's_times'], {}), '(pixels_pos, s_times)\n', (2569, 2590), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((2952, 3010), 'pyorbital.geoloc.compute_pixels', 'compute_pixels', (['(tle.line1, tle.line2)', 'scan_geom', 's_times'], {}), '((tle.line1, tle.line2), scan_geom, s_times)\n', (2966, 3010), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((3032, 3066), 'pyorbital.geoloc.get_lonlatalt', 'get_lonlatalt', (['pixels_pos', 's_times'], {}), '(pixels_pos, s_times)\n', (3045, 3066), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((3195, 3253), 'pyorbital.geoloc.compute_pixels', 'compute_pixels', (['(tle.line1, tle.line2)', 'scan_geom', 's_times'], {}), '((tle.line1, tle.line2), scan_geom, s_times)\n', (3209, 3253), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((3270, 3304), 'pyorbital.geoloc.get_lonlatalt', 'get_lonlatalt', (['pixels_pos', 's_times'], {}), '(pixels_pos, s_times)\n', (3283, 3304), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((3435, 3493), 'pyorbital.geoloc.compute_pixels', 'compute_pixels', (['(tle.line1, tle.line2)', 'scan_geom', 's_times'], {}), '((tle.line1, tle.line2), scan_geom, s_times)\n', (3449, 3493), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((3508, 3542), 'pyorbital.geoloc.get_lonlatalt', 'get_lonlatalt', (['pixels_pos', 's_times'], {}), '(pixels_pos, s_times)\n', (3521, 3542), False, 'from pyorbital.geoloc import ScanGeometry, compute_pixels, get_lonlatalt\n'), ((3677, 3811), 'numpy.concatenate', 'np.concatenate', (['(lines_edge[0::2], lines_ss[pos_arr_ss[0].size / 2:], lines_edge[-1::-2],\n lines_ss[:pos_arr_ss[0].size / 2][::-1])'], {}), '((lines_edge[0::2], lines_ss[pos_arr_ss[0].size / 2:],\n lines_edge[-1::-2], lines_ss[:pos_arr_ss[0].size / 2][::-1]))\n', (3691, 3811), True, 'import numpy as np\n'), ((4250, 4273), 'fiona.crs.from_string', 'from_string', (['crs_string'], {}), '(crs_string)\n', (4261, 4273), False, 'from fiona.crs import from_string\n'), ((5251, 5280), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (5262, 5280), False, 'import pyproj\n'), ((5301, 5337), 'pyproj.Proj', 'pyproj.Proj', (['aoi_polygon_proj_string'], {}), '(aoi_polygon_proj_string)\n', (5312, 5337), False, 'import pyproj\n'), ((5357, 5409), 'functools.partial', 'partial', (['pyproj.transform', 'll_proj', 'aoi_polygon_proj'], {}), '(pyproj.transform, ll_proj, aoi_polygon_proj)\n', (5364, 5409), False, 'from functools import partial\n'), ((5426, 5478), 'functools.partial', 'partial', (['pyproj.transform', 'aoi_polygon_proj', 'll_proj'], {}), '(pyproj.transform, aoi_polygon_proj, ll_proj)\n', (5433, 5478), False, 'from functools import partial\n'), ((5538, 5585), 'shapely.ops.transform', 'transform', (['aoi2ll_partial', 'aoi_polygon.centroid'], {}), '(aoi2ll_partial, aoi_polygon.centroid)\n', (5547, 5585), False, 'from shapely.ops import transform\n'), ((7845, 7881), 'pyproj.Proj', 'pyproj.Proj', (['aoi_polygon_proj_string'], {}), '(aoi_polygon_proj_string)\n', (7856, 7881), False, 'import pyproj\n'), ((2394, 2410), 'numpy.array', 'np.array', (['[1023]'], {}), '([1023])\n', (2402, 2410), True, 'import numpy as np\n'), ((2870, 2886), 'numpy.array', 'np.array', (['[1023]'], {}), '([1023])\n', (2878, 2886), True, 'import numpy as np\n'), ((3109, 3128), 'numpy.array', 'np.array', (['[0, 2047]'], {}), '([0, 2047])\n', (3117, 3128), True, 'import numpy as np\n'), ((3329, 3352), 'numpy.arange', 'np.arange', (['(24)', '(2048)', '(40)'], {}), '(24, 2048, 40)\n', (3338, 3352), True, 'import numpy as np\n'), ((3559, 3603), 'numpy.array', 'np.array', (['[pos_arr_edge[0], pos_arr_edge[1]]'], {}), '([pos_arr_edge[0], pos_arr_edge[1]])\n', (3567, 3603), True, 'import numpy as np\n'), ((3618, 3658), 'numpy.array', 'np.array', (['[pos_arr_ss[0], pos_arr_ss[1]]'], {}), '([pos_arr_ss[0], pos_arr_ss[1]])\n', (3626, 3658), True, 'import numpy as np\n'), ((4280, 4348), 'fiona.open', 'fiona.open', (['filename', '"""w"""', '"""ESRI Shapefile"""'], {'schema': 'schema', 'crs': 'crs_'}), "(filename, 'w', 'ESRI Shapefile', schema=schema, crs=crs_)\n", (4290, 4348), False, 'import fiona\n'), ((8459, 8479), 'shapely.geometry.mapping', 'mapping', (['aoi_polygon'], {}), '(aoi_polygon)\n', (8466, 8479), False, 'from shapely.geometry import mapping, Polygon\n'), ((1803, 1837), 're.sub', 're.sub', (['"""\\\\+(\\\\d|\\\\.)"""', '""" \\\\1"""', 'l1'], {}), "('\\\\+(\\\\d|\\\\.)', ' \\\\1', l1)\n", (1809, 1837), False, 'import re\n'), ((1879, 1913), 're.sub', 're.sub', (['"""\\\\+(\\\\d|\\\\.)"""', '""" \\\\1"""', 'l2'], {}), "('\\\\+(\\\\d|\\\\.)', ' \\\\1', l2)\n", (1885, 1913), False, 'import re\n'), ((1956, 1989), 'pyorbital.tlefile.Tle', 'Tle', (['platform'], {'line1': 'l1', 'line2': 'l2'}), '(platform, line1=l1, line2=l2)\n', (1959, 1989), False, 'from pyorbital.tlefile import Tle\n'), ((5893, 5955), 'geopy.distance.great_circle', 'great_circle', (['nadir_ll', '(aoi_centroid_ll.x, aoi_centroid_ll.y)'], {}), '(nadir_ll, (aoi_centroid_ll.x, aoi_centroid_ll.y))\n', (5905, 5955), False, 'from geopy.distance import great_circle\n'), ((7075, 7105), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(60)'}), '(seconds=60)\n', (7093, 7105), False, 'import datetime\n'), ((8241, 8254), 'shapely.geometry.mapping', 'mapping', (['poly'], {}), '(poly)\n', (8248, 8254), False, 'from shapely.geometry import mapping, Polygon\n'), ((7565, 7611), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': "(aoit['slots'] * 60)"}), "(seconds=aoit['slots'] * 60)\n", (7583, 7611), False, 'import datetime\n'), ((991, 1032), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * 24 * 3)'}), '(seconds=3600 * 24 * 3)\n', (1009, 1032), False, 'import datetime\n')] |
import torch as T
import numpy as np
from numpy import linalg as LA
from torch.optim import Optimizer, SGD
import copy
class online_rna(SGD):
def __init__(self,params, lr,momentum=0,dampening=0,weight_decay=0,nesterov=False,K=10,reg_acc=1e-5,acceleration_type='online',do_average=False):
self.params = list(params)
super(online_rna, self).__init__(self.params, lr, momentum, dampening, weight_decay, nesterov)
self.K = K
self.reg_acc = reg_acc
self.do_average = do_average
self.acceleration_type = acceleration_type
for group in self.param_groups:
group['running_avg_model'] = dict()
group['running_avg_grad'] = dict()
group['avg_model_hist'] = dict()
group['avg_grad_hist'] = dict()
group['avg_counter'] = dict()
self.reset_buffers()
if(acceleration_type == 'offline'):
self.dict_hist = list()
def reset_buffers(self):
for group in self.param_groups:
avg_model_hist = group['avg_model_hist']
avg_grad_hist = group['avg_grad_hist']
for param in group['params']:
avg_model_hist[param] = []
avg_grad_hist[param] = []
self.reset_running_avg()
if(self.acceleration_type == 'offline'):
self.dict_hist = list()
def reset_running_avg(self):
for group in self.param_groups:
avg_counter = group['avg_counter']
running_avg_model = group['running_avg_model']
running_avg_grad = group['running_avg_grad']
for param in group['params']:
avg_counter[param] = 0
running_avg_model[param] = None
running_avg_grad[param] = None
def update_lr(self,lr):
for group in self.param_groups:
group['lr'] = lr
def update_running_avg(self):
for group in self.param_groups:
avg_counter = group['avg_counter']
running_avg_model = group['running_avg_model']
running_avg_grad = group['running_avg_grad']
for param in group['params']:
avg_counter[param] += 1
if(avg_counter[param] == 1):
running_avg_model[param] = param.data.clone()
if(param.grad is None):
running_avg_grad[param] = None
else:
running_avg_grad[param] = param.grad.data.clone()
else:
# weight_avg_x = (avg_counter[param]-1)/avg_counter[param]
weight_avg_x = 0 #take the last one
weight_avg_grad = (avg_counter[param]-1)/avg_counter[param]
# weight_avg_grad = 0 #take the last one
running_avg_model[param] = running_avg_model[param].mul(weight_avg_x) + param.data.clone().mul(1-weight_avg_x)
if(param.grad is None):
running_avg_grad[param] = None
else:
running_avg_grad[param] = running_avg_grad[param].mul(weight_avg_grad) + param.grad.data.clone().mul(1-weight_avg_grad)
def step(self):
super(online_rna, self).step()
self.update_running_avg()
def store(self,model=None):
for group in self.param_groups:
avg_model_hist = group['avg_model_hist']
avg_grad_hist = group['avg_grad_hist']
running_avg_model = group['running_avg_model']
running_avg_grad = group['running_avg_grad']
for param in group['params']:
if(len(avg_model_hist[param])>=(self.K)): # with this, len(hist) < K
avg_model_hist[param].pop(0)
if(len(avg_grad_hist[param])>=(self.K)): # with this, len(hist) < K
avg_grad_hist[param].pop(0)
avg_model_hist[param].append(copy.deepcopy(running_avg_model[param]))
if(running_avg_grad[param] is not None):
avg_grad_hist[param].append(copy.deepcopy(running_avg_grad[param].cpu().numpy().ravel()))
self.reset_running_avg()
# self.reset_momentum()
if(self.acceleration_type == 'offline'):
if(model is None):
raise ValueError('Problem in rna.store(): model cannot be none in offline acceleration')
if(len(self.dict_hist)>=(self.K)): # with this, len(hist) < K
self.dict_hist.pop(0)
self.dict_hist.append(copy.deepcopy(model.state_dict()))
def reset_momentum(self):
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
if 'momentum_buffer' in param_state:
param_state['momentum_buffer'].mul_(0)
def compute_c_rna(self):
gradient_buffer = []
for group in self.param_groups:
avg_grad_hist = group['avg_grad_hist']
for param in group['params']:
if(len(avg_grad_hist[param]) == 0):
continue
entry = np.asmatrix(avg_grad_hist[param])
gradient_buffer.append(entry)
gradient_buffer = np.concatenate(gradient_buffer,axis=1)
R = np.asmatrix(gradient_buffer)
(k,d) = np.shape(R)
RR = np.dot(R,np.transpose(R))
normRR = LA.norm(RR,2)
RR = RR/normRR
reg_I = self.reg_acc*np.eye(k)
ones_k = np.ones(k)
try:
z = np.linalg.solve(RR+reg_I, ones_k)
except LA.linalg.LinAlgError:
z = np.linalg.lstsq(RR+reg_I, ones_k, -1)
z = z[0]
if( np.abs(np.sum(z)) < 1e-10):
z = np.ones(k)
c = (z/np.sum(z)).tolist()
return c
def accelerate(self,model=None):
c_vec = self.compute_c_rna()
if(self.do_average):
k = len(c_vec)
z = np.ones(k)
c_vec = (z/np.sum(z)).tolist()
if(self.acceleration_type.lower() == 'online'):
for group in self.param_groups:
avg_model_hist = group['avg_model_hist']
for param in group['params']:
param.data.mul_(0.0);
for (i, c) in enumerate(c_vec):
param.data.add_(c,avg_model_hist[param][i])
if(self.acceleration_type.lower() == 'none'):
print('No acceleration')
pass
if(self.acceleration_type.lower() == 'offline'):
if(model is not None):
new_dict = dict(model.state_dict())
for key in new_dict:
new_dict[key].mul_(0);
for idx_c in range(0,len(c_vec)):
new_dict[key].add_(c_vec[idx_c],self.dict_hist[idx_c][key])
model.load_state_dict(new_dict)
else:
raise ValueError('Problem in rna.accelerate(): model cannot be none in offline acceleration')
return c_vec
| [
"numpy.eye",
"numpy.linalg.solve",
"copy.deepcopy",
"numpy.ones",
"numpy.asmatrix",
"numpy.sum",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.linalg.lstsq",
"numpy.shape",
"numpy.transpose"
] | [((5518, 5557), 'numpy.concatenate', 'np.concatenate', (['gradient_buffer'], {'axis': '(1)'}), '(gradient_buffer, axis=1)\n', (5532, 5557), True, 'import numpy as np\n'), ((5578, 5606), 'numpy.asmatrix', 'np.asmatrix', (['gradient_buffer'], {}), '(gradient_buffer)\n', (5589, 5606), True, 'import numpy as np\n'), ((5623, 5634), 'numpy.shape', 'np.shape', (['R'], {}), '(R)\n', (5631, 5634), True, 'import numpy as np\n'), ((5700, 5714), 'numpy.linalg.norm', 'LA.norm', (['RR', '(2)'], {}), '(RR, 2)\n', (5707, 5714), True, 'from numpy import linalg as LA\n'), ((5802, 5812), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (5809, 5812), True, 'import numpy as np\n'), ((5666, 5681), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (5678, 5681), True, 'import numpy as np\n'), ((5775, 5784), 'numpy.eye', 'np.eye', (['k'], {}), '(k)\n', (5781, 5784), True, 'import numpy as np\n'), ((5851, 5886), 'numpy.linalg.solve', 'np.linalg.solve', (['(RR + reg_I)', 'ones_k'], {}), '(RR + reg_I, ones_k)\n', (5866, 5886), True, 'import numpy as np\n'), ((6063, 6073), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (6070, 6073), True, 'import numpy as np\n'), ((6296, 6306), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (6303, 6306), True, 'import numpy as np\n'), ((5412, 5445), 'numpy.asmatrix', 'np.asmatrix', (['avg_grad_hist[param]'], {}), '(avg_grad_hist[param])\n', (5423, 5445), True, 'import numpy as np\n'), ((5939, 5978), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(RR + reg_I)', 'ones_k', '(-1)'], {}), '(RR + reg_I, ones_k, -1)\n', (5954, 5978), True, 'import numpy as np\n'), ((6026, 6035), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (6032, 6035), True, 'import numpy as np\n'), ((4150, 4189), 'copy.deepcopy', 'copy.deepcopy', (['running_avg_model[param]'], {}), '(running_avg_model[param])\n', (4163, 4189), False, 'import copy\n'), ((6098, 6107), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (6104, 6107), True, 'import numpy as np\n'), ((6330, 6339), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (6336, 6339), True, 'import numpy as np\n')] |
from distutils.core import setup
from Cython.Build import cythonize
import numpy as np
setup(
name = "On-the-Fly Gridder",
ext_modules = cythonize("src/*.pyx", include_path = [np.get_include()]),
include_dirs = [np.get_include()]
)
| [
"numpy.get_include"
] | [((225, 241), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (239, 241), True, 'import numpy as np\n'), ((185, 201), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (199, 201), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""Basic Model Interface implementation for River Module"""
import numpy as np
import yaml
from bmipy import Bmi
from .rivermodule import RiverModule
class BmiRiverModule(Bmi):
"""The BMI for the River Avulsion Floodplain Evolution Model."""
_name = "Rafem"
_input_var_names = (
"land_surface__elevation",
"channel_exit__x_coordinate",
"channel_exit__y_coordinate",
)
_output_var_names = (
"channel_centerline__x_coordinate",
"channel_centerline__y_coordinate",
"channel_centerline__elevation",
"channel_exit_water_sediment~bedload__volume_flow_rate",
"channel_exit__x_coordinate",
"channel_exit__y_coordinate",
"land_surface__elevation",
"sea_water_surface__elevation",
# 'avulsion_record',
)
def __init__(self):
"""Create a BmiRiver module that is ready for initialization."""
self._model = None
self._values = {}
self._var_units = {}
def initialize(self, config_file):
with open(config_file, "r") as fp:
params = yaml.safe_load(fp)
params.pop("_version", None)
self._n_days = params.pop("days", np.finfo("d").max)
self._model = RiverModule(**params)
# self._model = RiverModule.from_path(config_file)
self._values = {
"channel_centerline__x_coordinate": lambda: self._model.river_x_coordinates,
"channel_centerline__y_coordinate": lambda: self._model.river_y_coordinates,
"channel_exit_water_sediment~bedload__volume_flow_rate": lambda: np.array(
self._model.sediment_flux
),
"channel_exit__x_coordinate": lambda: np.array(
self._model.river_x_coordinates[-1]
),
"channel_exit__y_coordinate": lambda: np.array(
self._model.river_y_coordinates[-1]
),
"land_surface__elevation": lambda: np.array(self._model.elevation),
"channel_centerline__elevation": lambda: self._model.profile,
"sea_water_surface__elevation": lambda: np.array(self._model.sea_level),
"avulsion_record": lambda: self._model.avulsions,
}
self._var_units = {
"channel_centerline__x_coordinate": "m",
"channel_centerline__y_coordinate": "m",
"channel_exit_water_sediment~bedload__volume_flow_rate": "m^3 s^-1",
"channel_exit__x_coordinate": "m",
"channel_exit__y_coordinate": "m",
"land_surface__elevation": "m",
"channel_centerline__elevation": "m",
"sea_water_surface__elevation": "m",
"avulsion_record": "none",
}
self._var_type = {}
for name in self._input_var_names + self._output_var_names:
self._var_type[name] = str(np.dtype(float))
self._var_grid = {
"channel_centerline__x_coordinate": 1,
"channel_centerline__y_coordinate": 1,
"channel_exit_water_sediment~bedload__volume_flow_rate": 2,
"channel_exit__x_coordinate": 2,
"channel_exit__y_coordinate": 2,
"land_surface__elevation": 0,
"channel_centerline__elevation": 1,
"sea_water_surface__elevation": 2,
"avulsion_record": None,
}
self._grid_rank = {0: 2, 1: 1, 2: 0}
def update(self):
self._model.advance_in_time()
self.river_mouth_location = (
self._model.river_x_coordinates[-1],
self._model.river_y_coordinates[-1],
)
def update_frac(self, time_frac):
"""Update model by a fraction of a time step."""
time_step = self.get_time_step()
self._model.time_step = time_frac * time_step
self.update()
self._model.time_step = time_step
def update_until(self, then):
"""Update model until a particular time."""
n_steps = (then - self.get_current_time()) / self.get_time_step()
for _ in range(int(n_steps)):
self.update()
# self.update_frac(n_steps - int(n_steps))
def finalize(self):
pass
def get_var_type(self, name):
return self._var_type[name]
def get_var_units(self, name):
return self._var_units[name]
def get_var_nbytes(self, name):
return self._values[name]().nbytes
def get_var_itemsize(self, name):
return np.dtype(self.get_var_type(name)).itemsize
def get_var_location(self, name):
return "node"
def get_var_grid(self, name):
return self._var_grid[name]
def get_grid_rank(self, grid):
return self._grid_rank[grid]
def get_grid_size(self, grid):
if grid == 0:
return int(np.prod(self._model.grid_shape))
elif grid == 1:
return len(self._model.river_x_coordinates)
elif grid == 2:
return 1
else:
raise KeyError(grid)
def get_grid_shape(self, grid, shape):
if grid == 0:
shape[:] = self._model.grid_shape
elif grid == 1:
shape[:] = self._model.river_x_coordinates.shape
elif grid == 2:
shape[0] = 1 # = (1, )
else:
raise KeyError(grid)
return shape
def get_grid_spacing(self, grid, spacing):
if grid == 0:
spacing[:] = self._model.grid_spacing
else:
raise KeyError(grid)
return spacing
def get_grid_origin(self, grid, origin):
if grid == 0:
origin[:] = (0.0, 0.0)
else:
raise KeyError(grid)
return origin
def get_grid_type(self, grid):
if grid == 0:
return "uniform_rectilinear"
elif grid == 1:
return "vector"
elif grid == 2:
return "scalar"
else:
raise KeyError(grid)
def get_value(self, name, dest):
dest[:] = self._values[name]().flat
return dest
def set_value(self, var_name, new_vals):
"""Set model values."""
if var_name not in self._input_var_names:
raise KeyError(var_name)
if var_name == "land_surface__elevation":
np.copyto(self._model.elevation.reshape((-1,)), new_vals.reshape((-1,)))
elif var_name == "channel_exit__x_coordinate":
self._model.river_x_coordinates = np.append(
self._model.river_x_coordinates, new_vals
)
elif var_name == "channel_exit__y_coordinate":
self._model.river_y_coordinates = np.append(
self._model.river_y_coordinates, new_vals
)
# Remove duplicate river mouth coordinates (if they exist).
# This seems clunky... must be better way to get values without
# duplicating each time?
# if (self._model.river_x_coordinates[-1] == self._model.river_x_coordinates[-2] and
# self._model.river_y_coordinates[-1] == self._model.river_y_coordinates[-2]):
# self._model.river_x_coordinates.pop()
# self._model.river_y_coordinates.pop()
def get_component_name(self):
return self._name
def get_input_var_names(self):
return self._input_var_names
def get_output_var_names(self):
return self._output_var_names
def get_input_item_count(self):
return len(self._input_var_names)
def get_output_item_count(self):
return len(self._output_var_names)
def get_start_time(self):
return 0.0
def get_end_time(self):
return self._n_days
def get_current_time(self):
return self._model.time
def get_time_step(self):
return self._model.time_step
def get_time_units(self):
return "d"
def get_grid_node_count(self, grid):
if grid == 0:
return int(np.prod(self._model.grid_shape))
elif grid == 1:
return len(self._model.river_x_coordinates)
elif grid == 2:
return 1
else:
raise KeyError(grid)
def get_grid_edge_count(self, grid):
raise NotImplementedError("get_grid_edge_count")
def get_grid_face_count(self, grid):
raise NotImplementedError("get_grid_face_count")
def get_grid_edge_nodes(self, grid, edge_nodes):
raise NotImplementedError("get_grid_edge_nodes")
def get_grid_face_edges(self, grid, face_edges):
raise NotImplementedError("get_grid_face_edges")
def get_grid_face_nodes(self, grid, face_nodes):
raise NotImplementedError("get_grid_edge_nodes")
def get_grid_nodes_per_face(self, grid, nodes_per_face):
raise NotImplementedError("get_grid_nodes_per_face")
def get_grid_x(self, grid, x):
raise NotImplementedError("get_grid_x")
def get_grid_y(self, grid, y):
raise NotImplementedError("get_grid_y")
def get_grid_z(self, grid, z):
raise NotImplementedError("get_grid_z")
def get_value_at_indices(self, name, dest, inds):
raise NotImplementedError("get_value_at_indices")
def get_value_ptr(self, name):
raise NotImplementedError("get_value_ptr")
def set_value_at_indices(self, name, ids, src):
raise NotImplementedError("set_value_at_indices")
| [
"numpy.prod",
"numpy.append",
"yaml.safe_load",
"numpy.array",
"numpy.finfo",
"numpy.dtype"
] | [((1127, 1145), 'yaml.safe_load', 'yaml.safe_load', (['fp'], {}), '(fp)\n', (1141, 1145), False, 'import yaml\n'), ((1227, 1240), 'numpy.finfo', 'np.finfo', (['"""d"""'], {}), "('d')\n", (1235, 1240), True, 'import numpy as np\n'), ((1631, 1666), 'numpy.array', 'np.array', (['self._model.sediment_flux'], {}), '(self._model.sediment_flux)\n', (1639, 1666), True, 'import numpy as np\n'), ((1748, 1793), 'numpy.array', 'np.array', (['self._model.river_x_coordinates[-1]'], {}), '(self._model.river_x_coordinates[-1])\n', (1756, 1793), True, 'import numpy as np\n'), ((1875, 1920), 'numpy.array', 'np.array', (['self._model.river_y_coordinates[-1]'], {}), '(self._model.river_y_coordinates[-1])\n', (1883, 1920), True, 'import numpy as np\n'), ((1999, 2030), 'numpy.array', 'np.array', (['self._model.elevation'], {}), '(self._model.elevation)\n', (2007, 2030), True, 'import numpy as np\n'), ((2158, 2189), 'numpy.array', 'np.array', (['self._model.sea_level'], {}), '(self._model.sea_level)\n', (2166, 2189), True, 'import numpy as np\n'), ((2901, 2916), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (2909, 2916), True, 'import numpy as np\n'), ((4824, 4855), 'numpy.prod', 'np.prod', (['self._model.grid_shape'], {}), '(self._model.grid_shape)\n', (4831, 4855), True, 'import numpy as np\n'), ((6471, 6523), 'numpy.append', 'np.append', (['self._model.river_x_coordinates', 'new_vals'], {}), '(self._model.river_x_coordinates, new_vals)\n', (6480, 6523), True, 'import numpy as np\n'), ((7942, 7973), 'numpy.prod', 'np.prod', (['self._model.grid_shape'], {}), '(self._model.grid_shape)\n', (7949, 7973), True, 'import numpy as np\n'), ((6655, 6707), 'numpy.append', 'np.append', (['self._model.river_y_coordinates', 'new_vals'], {}), '(self._model.river_y_coordinates, new_vals)\n', (6664, 6707), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2020 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import copy
import matplotlib.pyplot as plt
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../'))
from dfvm import Props, Boundary, Local, Convective, Equation
from dfvm import calc_a_func, calc_b_func, calc_poro
from dfvm import plot_x_y
from sgrid import Sgrid, save_files_collection_to_file
# model geometry
points_dims = [5, 5, 2]
points_origin = [0., 0., 0.]
spacing = [1., 1., 1.]
sgrid = Sgrid(points_dims, points_origin, spacing)
points_array = np.random.rand(sgrid.points_N)
points_arrays = {"points_array": points_array}
active_cells = np.arange(sgrid.cells_N, dtype=np.uint64)
# initial concentration
conc_ini = float(0.0)
concs_array1 = np.tile(conc_ini, sgrid.cells_N)
concs_array2 = np.tile(conc_ini, sgrid.cells_N)
concs_arrays = {"concs_array1": concs_array1,
"concs_array2": concs_array2}
sgrid.cells_arrays = concs_arrays
sgrid.set_cells_type('active', active_cells)
sgrid.process_type_by_cells_type('active')
# computation time
time_period = float(1000) # sec
# numerical time step
time_step = float(1) # sec
# diffusivity coeffs (specify only b coeff to make free diffusion constant)
d_coeff_a = float(0) # m2/sec
d_coeff_b = float(15.E-3) # m2/sec
# porosity of rock
poro_ini = float(1)
params = {'time_period': time_period, 'time_step': time_step,
'd_coeff_a': d_coeff_a, 'd_coeff_b': d_coeff_b,
'poro': poro_ini}
key_dirichlet_one = 'left'
key_dirichlet_two = 'right'
props = Props(params)
boundary = Boundary(props, sgrid)
boundary_faces_one = copy.deepcopy(sgrid.types_faces[key_dirichlet_one])
boundary_faces_two = copy.deepcopy(sgrid.types_faces[key_dirichlet_two])
boundary_face_one = sgrid.types_faces[key_dirichlet_one][0]
boundary_faces_one_axis = sgrid.faces_axes[boundary_face_one]
boundary_face_two = sgrid.types_faces[key_dirichlet_two][0]
boundary_faces_two_axis = sgrid.faces_axes[boundary_face_two]
boundary.shift_boundary_faces(boundary_faces_one, boundary_faces_one_axis)
boundary.shift_boundary_faces(boundary_faces_two, boundary_faces_two_axis)
local = Local(props, sgrid)
convective = Convective(props, sgrid)
equation = Equation(props, sgrid, local, convective)
# dirichlet cells (options: left, right, top, bottom, front, back)
equation.bound_groups_dirich = [key_dirichlet_one, key_dirichlet_two]
# concentration on dirichlet cells
conc_left = float(20)
conc_right = float(0)
equation.concs_bound_dirich = {key_dirichlet_one: conc_left,
key_dirichlet_two: conc_right}
# equation.cfd_procedure()
# CFD procedure
concs = [concs_array1, concs_array2]
equation.concs = concs
local.calc_time_steps()
time_steps = local.time_steps
concs_time = []
conc_curr = copy.deepcopy(equation.concs[equation.i_curr])
concs_time.append(conc_curr)
flow_rate_one_time = []
flow_rate_two_time = []
for time_step in time_steps:
# modifing porosity
equation.cfd_procedure_one_step(time_step)
conc_curr = copy.deepcopy(equation.concs[equation.i_curr])
concs_time.append(conc_curr)
flow_rate_boundary_one = equation.calc_faces_flow_rate(boundary_faces_one)
flow_rate_boundary_two = equation.calc_faces_flow_rate(boundary_faces_two)
flow_rate_one_time.append(flow_rate_boundary_one)
flow_rate_two_time.append(flow_rate_boundary_two)
# new Dirichlet boundaries can be input here
equation.concs_bound_dirich = {key_dirichlet_one: conc_left, key_dirichlet_two: conc_right}
equation.concs_time = concs_time
#
# visualising 'a' and 'b' coefficients and porosity
# set concentration range for visualisation
a_list = []
b_list = []
poro_list = []
conc_list = []
for i in range(int(conc_right), int(conc_left)):
conc_list.append(i)
a_list.append(calc_a_func(i, poro_ini))
b_list.append(calc_b_func(i, d_coeff_b, poro_ini))
poro_list.append(calc_poro(i, poro_ini))
# plotting the dependence of 'a' and 'b' coefficients and porosity on free concentration
fig, axs = plt.subplots(3, sharex=True)
plot_x_y(axs[0], conc_list, a_list, 'concentration', 'coeff a', '-',
color='green')
plot_x_y(axs[1], conc_list, b_list, 'concentration', 'coeff b', '-',
color='blue')
plot_x_y(axs[2], conc_list, poro_list, 'concentration', 'poro', '-',
color='red')
axs[0].legend('a', loc="best")
axs[1].legend('b', loc="best")
axs[2].legend(['poro'], loc="best")
# plotting inlet and outlet flow rates vs time
time = np.cumsum(np.array(time_steps))
fig1, ax1 = plt.subplots()
plot_x_y(ax1, time, flow_rate_one_time, 'time', 'G, kg/sec', '-',
color='green')
plot_x_y(ax1, time, flow_rate_two_time, 'time', 'G, kg/sec', '-',
color='blue')
ax1.legend(['$Q_{in}$', '$Q_{out}$'], loc="best")
# saving results to paraview
os.system('rm -r inOut/*.vtu')
os.system('rm -r inOut/*.pvd')
concs_dict = dict()
file_name = 'inOut/collection.pvd'
files_names = list()
files_descriptions = list()
for i in range(len(local.time_steps)):
sgrid.cells_arrays = {'conc_i': equation.concs_time[i]}
files_names.append(str(i) + '.vtu')
files_descriptions.append(str(i))
sgrid.save_cells('inOut/' + files_names[i])
save_files_collection_to_file(file_name, files_names, files_descriptions)
| [
"dfvm.Local",
"numpy.random.rand",
"numpy.array",
"dfvm.calc_a_func",
"copy.deepcopy",
"dfvm.Equation",
"dfvm.calc_poro",
"numpy.arange",
"dfvm.Boundary",
"sgrid.save_files_collection_to_file",
"numpy.tile",
"dfvm.plot_x_y",
"sgrid.Sgrid",
"dfvm.calc_b_func",
"dfvm.Props",
"dfvm.Convec... | [((1608, 1650), 'sgrid.Sgrid', 'Sgrid', (['points_dims', 'points_origin', 'spacing'], {}), '(points_dims, points_origin, spacing)\n', (1613, 1650), False, 'from sgrid import Sgrid, save_files_collection_to_file\n'), ((1666, 1696), 'numpy.random.rand', 'np.random.rand', (['sgrid.points_N'], {}), '(sgrid.points_N)\n', (1680, 1696), True, 'import numpy as np\n'), ((1760, 1801), 'numpy.arange', 'np.arange', (['sgrid.cells_N'], {'dtype': 'np.uint64'}), '(sgrid.cells_N, dtype=np.uint64)\n', (1769, 1801), True, 'import numpy as np\n'), ((1863, 1895), 'numpy.tile', 'np.tile', (['conc_ini', 'sgrid.cells_N'], {}), '(conc_ini, sgrid.cells_N)\n', (1870, 1895), True, 'import numpy as np\n'), ((1911, 1943), 'numpy.tile', 'np.tile', (['conc_ini', 'sgrid.cells_N'], {}), '(conc_ini, sgrid.cells_N)\n', (1918, 1943), True, 'import numpy as np\n'), ((2658, 2671), 'dfvm.Props', 'Props', (['params'], {}), '(params)\n', (2663, 2671), False, 'from dfvm import Props, Boundary, Local, Convective, Equation\n'), ((2683, 2705), 'dfvm.Boundary', 'Boundary', (['props', 'sgrid'], {}), '(props, sgrid)\n', (2691, 2705), False, 'from dfvm import Props, Boundary, Local, Convective, Equation\n'), ((2727, 2778), 'copy.deepcopy', 'copy.deepcopy', (['sgrid.types_faces[key_dirichlet_one]'], {}), '(sgrid.types_faces[key_dirichlet_one])\n', (2740, 2778), False, 'import copy\n'), ((2800, 2851), 'copy.deepcopy', 'copy.deepcopy', (['sgrid.types_faces[key_dirichlet_two]'], {}), '(sgrid.types_faces[key_dirichlet_two])\n', (2813, 2851), False, 'import copy\n'), ((3255, 3274), 'dfvm.Local', 'Local', (['props', 'sgrid'], {}), '(props, sgrid)\n', (3260, 3274), False, 'from dfvm import Props, Boundary, Local, Convective, Equation\n'), ((3288, 3312), 'dfvm.Convective', 'Convective', (['props', 'sgrid'], {}), '(props, sgrid)\n', (3298, 3312), False, 'from dfvm import Props, Boundary, Local, Convective, Equation\n'), ((3324, 3365), 'dfvm.Equation', 'Equation', (['props', 'sgrid', 'local', 'convective'], {}), '(props, sgrid, local, convective)\n', (3332, 3365), False, 'from dfvm import Props, Boundary, Local, Convective, Equation\n'), ((3894, 3940), 'copy.deepcopy', 'copy.deepcopy', (['equation.concs[equation.i_curr]'], {}), '(equation.concs[equation.i_curr])\n', (3907, 3940), False, 'import copy\n'), ((5129, 5157), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)'}), '(3, sharex=True)\n', (5141, 5157), True, 'import matplotlib.pyplot as plt\n'), ((5158, 5246), 'dfvm.plot_x_y', 'plot_x_y', (['axs[0]', 'conc_list', 'a_list', '"""concentration"""', '"""coeff a"""', '"""-"""'], {'color': '"""green"""'}), "(axs[0], conc_list, a_list, 'concentration', 'coeff a', '-', color=\n 'green')\n", (5166, 5246), False, 'from dfvm import plot_x_y\n'), ((5251, 5338), 'dfvm.plot_x_y', 'plot_x_y', (['axs[1]', 'conc_list', 'b_list', '"""concentration"""', '"""coeff b"""', '"""-"""'], {'color': '"""blue"""'}), "(axs[1], conc_list, b_list, 'concentration', 'coeff b', '-', color=\n 'blue')\n", (5259, 5338), False, 'from dfvm import plot_x_y\n'), ((5343, 5429), 'dfvm.plot_x_y', 'plot_x_y', (['axs[2]', 'conc_list', 'poro_list', '"""concentration"""', '"""poro"""', '"""-"""'], {'color': '"""red"""'}), "(axs[2], conc_list, poro_list, 'concentration', 'poro', '-', color=\n 'red')\n", (5351, 5429), False, 'from dfvm import plot_x_y\n'), ((5631, 5645), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5643, 5645), True, 'import matplotlib.pyplot as plt\n'), ((5646, 5731), 'dfvm.plot_x_y', 'plot_x_y', (['ax1', 'time', 'flow_rate_one_time', '"""time"""', '"""G, kg/sec"""', '"""-"""'], {'color': '"""green"""'}), "(ax1, time, flow_rate_one_time, 'time', 'G, kg/sec', '-', color='green'\n )\n", (5654, 5731), False, 'from dfvm import plot_x_y\n'), ((5736, 5815), 'dfvm.plot_x_y', 'plot_x_y', (['ax1', 'time', 'flow_rate_two_time', '"""time"""', '"""G, kg/sec"""', '"""-"""'], {'color': '"""blue"""'}), "(ax1, time, flow_rate_two_time, 'time', 'G, kg/sec', '-', color='blue')\n", (5744, 5815), False, 'from dfvm import plot_x_y\n'), ((5907, 5937), 'os.system', 'os.system', (['"""rm -r inOut/*.vtu"""'], {}), "('rm -r inOut/*.vtu')\n", (5916, 5937), False, 'import os\n'), ((5938, 5968), 'os.system', 'os.system', (['"""rm -r inOut/*.pvd"""'], {}), "('rm -r inOut/*.pvd')\n", (5947, 5968), False, 'import os\n'), ((6299, 6372), 'sgrid.save_files_collection_to_file', 'save_files_collection_to_file', (['file_name', 'files_names', 'files_descriptions'], {}), '(file_name, files_names, files_descriptions)\n', (6328, 6372), False, 'from sgrid import Sgrid, save_files_collection_to_file\n'), ((1230, 1255), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1245, 1255), False, 'import os\n'), ((1273, 1306), 'os.path.join', 'os.path.join', (['current_path', '"""../"""'], {}), "(current_path, '../')\n", (1285, 1306), False, 'import os\n'), ((4134, 4180), 'copy.deepcopy', 'copy.deepcopy', (['equation.concs[equation.i_curr]'], {}), '(equation.concs[equation.i_curr])\n', (4147, 4180), False, 'import copy\n'), ((5597, 5617), 'numpy.array', 'np.array', (['time_steps'], {}), '(time_steps)\n', (5605, 5617), True, 'import numpy as np\n'), ((4902, 4926), 'dfvm.calc_a_func', 'calc_a_func', (['i', 'poro_ini'], {}), '(i, poro_ini)\n', (4913, 4926), False, 'from dfvm import calc_a_func, calc_b_func, calc_poro\n'), ((4946, 4981), 'dfvm.calc_b_func', 'calc_b_func', (['i', 'd_coeff_b', 'poro_ini'], {}), '(i, d_coeff_b, poro_ini)\n', (4957, 4981), False, 'from dfvm import calc_a_func, calc_b_func, calc_poro\n'), ((5004, 5026), 'dfvm.calc_poro', 'calc_poro', (['i', 'poro_ini'], {}), '(i, poro_ini)\n', (5013, 5026), False, 'from dfvm import calc_a_func, calc_b_func, calc_poro\n')] |
import sys
import os
import numpy as np
import random
import time
import pickle
import fitness
from display_room import display_room
import fitness
from utils import print_iter_msg
from utils import stopwatch
# ----------------------------------------------------------------
# Generate the room, from the specified parameters
# ----------------------------------------------------------------
def generate_room(p):
# Log the progress
t_ini = time.time()
t_now = t_ini
iterx = 0
print_iter_msg('Room', iterx, p.NB_ITER_TOTAL, t_now, t_ini)
# Room initialization
room, lookup_add, lookup_rmv = room_initialization(p.INI_PATTERN, p.N)
# Get a "fitness class" (he he he)
f = fitness.Fitness()
# Initial display
if p.SHOW_INTERIM and iterx == 0:
f.assess_room_fitness(room, p.N)
display_room(room, lookup_add, lookup_rmv, t_now, t_ini, p, f, iterx=0, x=0, y=0, flip_sign=0)
# Iterative flips: cubes are randomly added or removed in the room, legal actions are tracked by the "lookup_" maps
for iterx in range(p.NB_ITER_TOTAL):
# Iteration number after the iteration zero
iterx_plus_one = iterx +1
# Log the progress
if iterx_plus_one % p.INTERIM_LOG_ITERX == 0:
t_now = time.time()
print_iter_msg('Room', iterx_plus_one, p.NB_ITER_TOTAL, t_now, t_ini)
# Randomly choose whether to add or remove a cube
lookup_flip, flip_sign = randomly_choose_add_or_rmv(
lookup_add, lookup_rmv, iterx, p.NB_ITER_INIT, p.INI_PATTERN)
# Randomly select a cube to flip, and retrieve its [x, y] coordinates
x, y = randomly_choose_cube(lookup_flip)
# Alter the cube in the room
room[y, x] += flip_sign
# Update the possible actions
lookup_add = update_lookup_add(lookup_add, room, x, y, p.N)
lookup_rmv = update_lookup_rmv(lookup_rmv, room, x, y, p.N)
# Intermediate display(s)
if p.SHOW_INTERIM and iterx_plus_one % p.INTERIM_PRINT_ITERX == 0:
f.assess_room_fitness(room, p.N)
display_room(room, lookup_add, lookup_rmv, t_now, t_ini, p, f, iterx_plus_one, x, y, flip_sign)
# Save the room
with open(os.path.join(p.RESULTS_PATH, p.ROOM_NAME), 'wb') as fid:
pickle.dump(room, fid)
# Final display
if p.NB_ITER_TOTAL > 0:
t_now = time.time()
print_iter_msg('Room', p.NB_ITER_TOTAL, p.NB_ITER_TOTAL, t_now, t_ini)
f.assess_room_fitness(room, p.N)
display_room(room, lookup_add, lookup_rmv, t_now, t_ini, p, f, p.NB_ITER_TOTAL, x=0, y=0, flip_sign=0)
# Print the fitness metrics in the console
print('Fitness\t| Monotony: {}, Filling: {}, Poles: ({}, {}), ({}, {}), ({}, {})'.format(
f.monotony,
f.filling,
f.x_full,
f.x_empty,
f.y_full,
f.y_empty,
f.z_full,
f.z_empty))
# ----------------------------------------------------------------
# Room initialization
# ----------------------------------------------------------------
def room_initialization(INI_PATTERN, N):
if INI_PATTERN == 'empty':
# Empty room: only the cube tucked in the corner (0,0) can be added, no cube can be removed
room = np.zeros((N, N), dtype = np.int32)
lookup_add = np.zeros((N, N), dtype = bool)
lookup_add[0, 0] = True
lookup_rmv = np.zeros((N, N), dtype = bool)
elif INI_PATTERN == 'full' or INI_PATTERN == 'random_half':
# Full room: only the outermost cube (N,N) can be removed, no cube can be added
room = N * np.ones((N, N), dtype = np.int32)
lookup_add = np.zeros((N, N), dtype = bool)
lookup_rmv = np.zeros((N, N), dtype = bool)
lookup_rmv[N-1, N-1] = True
elif INI_PATTERN == 'poles_2_6_10':
# Fill the room up to the plane defined by the three hex vertices that are the closest to the corner (0,0)
thresh = N
offset = 0
room, lookup_add, lookup_rmv = generate_poles(N, thresh, offset)
elif INI_PATTERN == 'poles_4_8_12':
# Fill the room up to the plane defined by the three hex vertices that are the farthest from the corner (0,0)
thresh = 2 * N
offset = 2
room, lookup_add, lookup_rmv = generate_poles(N, thresh, offset)
elif INI_PATTERN == 'arctic_circle':
# Generate a perfect arctic circle (if N is even), with 2-4-6-8-10-12 o'clock frozen poles, and a chaotic center
thresh = int(np.ceil(1.5 * N))
offset = 1
room, lookup_add, lookup_rmv = generate_poles(N, thresh, offset)
else:
# Non valid input
print('ERROR: Invalid value for parameter "INI_PATTERN": ' + str(INI_PATTERN))
sys.exit()
return room, lookup_add, lookup_rmv
# ----------------------------------------------------------------
# Initialize the room with perfect frozen poles (either 2-6-10 o'clock, 4-8-12 o'clock, or 2-4-6-8-10-12 o'clock)
# ----------------------------------------------------------------
def generate_poles(N, thresh, offset):
offset_rmv = offset +1
offset_add = offset -1
room = np.zeros((N, N), dtype = np.int32)
lookup_add = np.zeros((N, N), dtype = bool)
lookup_rmv = np.zeros((N, N), dtype = bool)
for x in range(N):
for y in range(N):
room[y, x] = min(max(thresh - (x + y + offset), 0), N)
neighbor = min(max(thresh - (x + y + offset_add), 0), N)
lookup_add[y, x] = room[y, x] - neighbor
neighbor = min(max(thresh - (x + y + offset_rmv), 0), N)
lookup_rmv[y, x] = neighbor - room[y, x]
return room, lookup_add, lookup_rmv
# ----------------------------------------------------------------
# Randomly choose a cube that can be either added or removed (as specified by "lookup") and get its [x, y] coordinates
# ----------------------------------------------------------------
def randomly_choose_cube(lookup):
idx = np.random.randint(low=0, high=lookup.sum())
pos = np.argwhere(lookup)[idx]
x = pos[1]
y = pos[0]
return x, y
# ----------------------------------------------------------------
# Randomly choose whether to add or remove a cube
# ----------------------------------------------------------------
def randomly_choose_add_or_rmv(lookup_add, lookup_rmv, iterx, NB_ITER_INIT, INI_PATTERN):
if INI_PATTERN == 'random_half' and iterx < NB_ITER_INIT:
# Cubes can only be removed, until a random configuration is reached where the room is half-full
lookup_flip = lookup_rmv
flip_sign = -1
else:
# Cubes can be added or removed
weight_add = lookup_add.sum() / (lookup_add.sum() + lookup_rmv.sum())
weight_rmv = 1.0 - weight_add
add_or_rmv = random.choices(population = ['add', 'rmv'], weights = [weight_add, weight_rmv])[0]
if add_or_rmv == 'add':
lookup_flip = lookup_add
flip_sign = +1
else:
lookup_flip = lookup_rmv
flip_sign = -1
return lookup_flip, flip_sign
# ----------------------------------------------------------------
# Update the Boolean lookup map that indicates where cubes could be added
# ----------------------------------------------------------------
def update_lookup_add(lookup_add, room, x, y, N):
# Enable self, if own height became strictly smaller than the height of the x/y previous neighbor
if x == 0 and y == 0:
lookup_add[y, x] = True
elif x > 0 and y == 0:
if room[y, x] < room[y, x-1]:
lookup_add[y, x] = True
elif x == 0 and y > 0:
if room[y, x] < room[y-1, x]:
lookup_add[y, x] = True
elif x > 0 and y > 0:
if room[y, x] < room[y, x-1] and room[y, x] < room[y-1, x]:
lookup_add[y, x] = True
# Disable self, if own height became equal to the maximal height
if room[y, x] == N:
lookup_add[y, x] = False
# Disable self, if own height became equal to the height of the x/y previous neighbor
if x > 0:
if room[y, x] == room[y, x-1]:
lookup_add[y, x] = False
if y > 0:
if room[y, x] == room[y-1, x]:
lookup_add[y, x] = False
# Disable the x/y next neighbor, if own height became equal to the height of the x/y next neighbor
if x < N-1:
if room[y, x] == room[y, x+1]:
lookup_add[y, x+1] = False
if y < N-1:
if room[y, x] == room[y+1, x]:
lookup_add[y+1, x] = False
# Enable the x/y next neighbor, if own height became strictly larger than the height of the x/y previous neighbor
if x < N-1:
if room[y, x] > room[y, x+1]:
if y == 0:
lookup_add[y, x+1] = True
else:
if room[y-1, x+1] > room[y, x+1]:
lookup_add[y, x+1] = True
if y < N-1:
if room[y, x] > room[y+1, x]:
if x == 0:
lookup_add[y+1, x] = True
else:
if room[y+1, x-1] > room[y+1, x]:
lookup_add[y+1, x] = True
return lookup_add
# ----------------------------------------------------------------
# Update the Boolean lookup map that indicates where cubes could be removed
# ----------------------------------------------------------------
def update_lookup_rmv(lookup_rmv, room, x, y, N):
N_MINUS_ONE = N-1
# Disable self, if own height became equal to the minimal height, otherwise enable self
if room[y, x] == 0:
lookup_rmv[y, x] = False
else:
lookup_rmv[y, x] = True
# Disable self, if own height became equal to the height of the x/y next neighbor
if x < N_MINUS_ONE:
if room[y, x] == room[y, x+1]:
lookup_rmv[y, x] = False
if y < N_MINUS_ONE:
if room[y, x] == room[y+1, x]:
lookup_rmv[y, x] = False
# Disable the x/y previous neighbor, if own height became equal to the height of the x/y previous neighbor
if x > 0:
if room[y, x] == room[y, x-1]:
lookup_rmv[y, x-1] = False
if y > 0:
if room[y, x] == room[y-1, x]:
lookup_rmv[y-1, x] = False
# Enable the x/y previous neighbor, if own height became strictly smaller than the height of the x/y previous neighbor
if x > 0:
if room[y, x-1] > room[y, x]:
if y == N_MINUS_ONE:
lookup_rmv[y, x-1] = True
else:
if room[y, x-1] > room[y+1, x-1]:
lookup_rmv[y, x-1] = True
if y > 0:
if room[y-1, x] > room[y, x]:
if x == N_MINUS_ONE:
lookup_rmv[y-1, x] = True
else:
if room[y-1, x] > room[y-1, x+1]:
lookup_rmv[y-1, x] = True
return lookup_rmv
| [
"utils.print_iter_msg",
"numpy.ceil",
"pickle.dump",
"numpy.ones",
"os.path.join",
"numpy.zeros",
"numpy.argwhere",
"random.choices",
"display_room.display_room",
"sys.exit",
"fitness.Fitness",
"time.time"
] | [((450, 461), 'time.time', 'time.time', ([], {}), '()\n', (459, 461), False, 'import time\n'), ((492, 552), 'utils.print_iter_msg', 'print_iter_msg', (['"""Room"""', 'iterx', 'p.NB_ITER_TOTAL', 't_now', 't_ini'], {}), "('Room', iterx, p.NB_ITER_TOTAL, t_now, t_ini)\n", (506, 552), False, 'from utils import print_iter_msg\n'), ((695, 712), 'fitness.Fitness', 'fitness.Fitness', ([], {}), '()\n', (710, 712), False, 'import fitness\n'), ((4863, 4895), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'np.int32'}), '((N, N), dtype=np.int32)\n', (4871, 4895), True, 'import numpy as np\n'), ((4913, 4941), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'bool'}), '((N, N), dtype=bool)\n', (4921, 4941), True, 'import numpy as np\n'), ((4959, 4987), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'bool'}), '((N, N), dtype=bool)\n', (4967, 4987), True, 'import numpy as np\n'), ((811, 909), 'display_room.display_room', 'display_room', (['room', 'lookup_add', 'lookup_rmv', 't_now', 't_ini', 'p', 'f'], {'iterx': '(0)', 'x': '(0)', 'y': '(0)', 'flip_sign': '(0)'}), '(room, lookup_add, lookup_rmv, t_now, t_ini, p, f, iterx=0, x=0,\n y=0, flip_sign=0)\n', (823, 909), False, 'from display_room import display_room\n'), ((2179, 2201), 'pickle.dump', 'pickle.dump', (['room', 'fid'], {}), '(room, fid)\n', (2190, 2201), False, 'import pickle\n'), ((2259, 2270), 'time.time', 'time.time', ([], {}), '()\n', (2268, 2270), False, 'import time\n'), ((2275, 2345), 'utils.print_iter_msg', 'print_iter_msg', (['"""Room"""', 'p.NB_ITER_TOTAL', 'p.NB_ITER_TOTAL', 't_now', 't_ini'], {}), "('Room', p.NB_ITER_TOTAL, p.NB_ITER_TOTAL, t_now, t_ini)\n", (2289, 2345), False, 'from utils import print_iter_msg\n'), ((2387, 2494), 'display_room.display_room', 'display_room', (['room', 'lookup_add', 'lookup_rmv', 't_now', 't_ini', 'p', 'f', 'p.NB_ITER_TOTAL'], {'x': '(0)', 'y': '(0)', 'flip_sign': '(0)'}), '(room, lookup_add, lookup_rmv, t_now, t_ini, p, f, p.\n NB_ITER_TOTAL, x=0, y=0, flip_sign=0)\n', (2399, 2494), False, 'from display_room import display_room\n'), ((3083, 3115), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'np.int32'}), '((N, N), dtype=np.int32)\n', (3091, 3115), True, 'import numpy as np\n'), ((3135, 3163), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'bool'}), '((N, N), dtype=bool)\n', (3143, 3163), True, 'import numpy as np\n'), ((3211, 3239), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'bool'}), '((N, N), dtype=bool)\n', (3219, 3239), True, 'import numpy as np\n'), ((5702, 5721), 'numpy.argwhere', 'np.argwhere', (['lookup'], {}), '(lookup)\n', (5713, 5721), True, 'import numpy as np\n'), ((1231, 1242), 'time.time', 'time.time', ([], {}), '()\n', (1240, 1242), False, 'import time\n'), ((1249, 1318), 'utils.print_iter_msg', 'print_iter_msg', (['"""Room"""', 'iterx_plus_one', 'p.NB_ITER_TOTAL', 't_now', 't_ini'], {}), "('Room', iterx_plus_one, p.NB_ITER_TOTAL, t_now, t_ini)\n", (1263, 1318), False, 'from utils import print_iter_msg\n'), ((1991, 2090), 'display_room.display_room', 'display_room', (['room', 'lookup_add', 'lookup_rmv', 't_now', 't_ini', 'p', 'f', 'iterx_plus_one', 'x', 'y', 'flip_sign'], {}), '(room, lookup_add, lookup_rmv, t_now, t_ini, p, f,\n iterx_plus_one, x, y, flip_sign)\n', (2003, 2090), False, 'from display_room import display_room\n'), ((2118, 2159), 'os.path.join', 'os.path.join', (['p.RESULTS_PATH', 'p.ROOM_NAME'], {}), '(p.RESULTS_PATH, p.ROOM_NAME)\n', (2130, 2159), False, 'import os\n'), ((3455, 3483), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'bool'}), '((N, N), dtype=bool)\n', (3463, 3483), True, 'import numpy as np\n'), ((3503, 3531), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'bool'}), '((N, N), dtype=bool)\n', (3511, 3531), True, 'import numpy as np\n'), ((6423, 6498), 'random.choices', 'random.choices', ([], {'population': "['add', 'rmv']", 'weights': '[weight_add, weight_rmv]'}), "(population=['add', 'rmv'], weights=[weight_add, weight_rmv])\n", (6437, 6498), False, 'import random\n'), ((3404, 3435), 'numpy.ones', 'np.ones', (['(N, N)'], {'dtype': 'np.int32'}), '((N, N), dtype=np.int32)\n', (3411, 3435), True, 'import numpy as np\n'), ((4465, 4475), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4473, 4475), False, 'import sys\n'), ((4245, 4261), 'numpy.ceil', 'np.ceil', (['(1.5 * N)'], {}), '(1.5 * N)\n', (4252, 4261), True, 'import numpy as np\n')] |
import os
import numpy as np
import argparse
from UCTB.model import ARIMA
from UCTB.dataset import NodeTrafficLoader
from UCTB.evaluation import metric
from UCTB.utils import multiple_process
parser = argparse.ArgumentParser(description="Argument Parser")
# data source
parser.add_argument('--Dataset', default='Bike')
parser.add_argument('--City', default='NYC')
# network parameter
parser.add_argument('--CT', default='24', type=int)
parser.add_argument('--ar', default='6', type=int)
parser.add_argument('--d', default='0', type=int)
parser.add_argument('--ma', default='1', type=int)
parser.add_argument('--sar', default='0', type=int)
parser.add_argument('--sd', default='0', type=int)
parser.add_argument('--sma', default='0', type=int)
parser.add_argument('--sp', default='0', type=int)
parser.add_argument('--DataRange', default='All')
parser.add_argument('--TrainDays', default='365')
args = vars(parser.parse_args())
data_loader = NodeTrafficLoader(dataset=args['Dataset'], city=args['City'],
closeness_len=int(args['CT']), period_len=0, trend_len=0,
data_range=args['DataRange'], train_data_length=args['TrainDays'],
with_lm=False, with_tpe=False, normalize=False)
def task(share_queue, locker, data, parameters):
print('Child process %s with pid %s' % (parameters[0], os.getpid()))
val_collector = {}
test_collector = {}
for i in data:
print('Child process %s' % (parameters[0]),
args['Dataset'], args['City'], 'Station', i, 'total', data_loader.station_number)
try:
model_obj = ARIMA(time_sequence=data_loader.train_closeness[:, i, -1, 0],
order=[args['ar'], args['d'], args['ma']],
seasonal_order=[args['sar'], args['sd'], args['sma'], args['sp']])
test_prediction = model_obj.predict(time_sequences=data_loader.test_closeness[:, i, :, 0], forecast_step=1)
del model_obj
except Exception as e:
print('Converge failed with error', e)
print('Using last as prediction')
test_prediction = data_loader.test_closeness[:, i, -1:, :]
test_collector[i] = test_prediction
print('Station', i, metric.rmse(test_prediction, data_loader.test_y[:, i:i + 1], threshold=0))
locker.acquire()
share_queue.put([val_collector, test_collector])
locker.release()
def reduce_fn(a, b):
a[0].update(b[0])
a[1].update(b[1])
return a
if __name__ == '__main__':
n_job = 8
result = multiple_process(distribute_list=range(data_loader.station_number),
partition_func=lambda data, i, n_job:
[data[e] for e in range(len(data)) if e % n_job == i],
task_func=task, n_jobs=n_job, reduce_func=reduce_fn, parameters=[])
test_rmse_collector = [e[1] for e in sorted(result[1].items(), key=lambda x: x[0])]
test_rmse_collector = np.concatenate(test_rmse_collector, axis=-2)
test_rmse = metric.rmse(test_rmse_collector, data_loader.test_y, threshold=0)
print(args['Dataset'], args['City'], 'test_rmse', test_rmse) | [
"argparse.ArgumentParser",
"numpy.concatenate",
"UCTB.model.ARIMA",
"os.getpid",
"UCTB.evaluation.metric.rmse"
] | [((204, 258), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argument Parser"""'}), "(description='Argument Parser')\n", (227, 258), False, 'import argparse\n'), ((3059, 3103), 'numpy.concatenate', 'np.concatenate', (['test_rmse_collector'], {'axis': '(-2)'}), '(test_rmse_collector, axis=-2)\n', (3073, 3103), True, 'import numpy as np\n'), ((3121, 3186), 'UCTB.evaluation.metric.rmse', 'metric.rmse', (['test_rmse_collector', 'data_loader.test_y'], {'threshold': '(0)'}), '(test_rmse_collector, data_loader.test_y, threshold=0)\n', (3132, 3186), False, 'from UCTB.evaluation import metric\n'), ((1660, 1840), 'UCTB.model.ARIMA', 'ARIMA', ([], {'time_sequence': 'data_loader.train_closeness[:, i, -1, 0]', 'order': "[args['ar'], args['d'], args['ma']]", 'seasonal_order': "[args['sar'], args['sd'], args['sma'], args['sp']]"}), "(time_sequence=data_loader.train_closeness[:, i, -1, 0], order=[args[\n 'ar'], args['d'], args['ma']], seasonal_order=[args['sar'], args['sd'],\n args['sma'], args['sp']])\n", (1665, 1840), False, 'from UCTB.model import ARIMA\n'), ((2315, 2388), 'UCTB.evaluation.metric.rmse', 'metric.rmse', (['test_prediction', 'data_loader.test_y[:, i:i + 1]'], {'threshold': '(0)'}), '(test_prediction, data_loader.test_y[:, i:i + 1], threshold=0)\n', (2326, 2388), False, 'from UCTB.evaluation import metric\n'), ((1391, 1402), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1400, 1402), False, 'import os\n')] |
import argparse
import multiprocessing
from queue import Empty
import random
import time
import os
import re
import glob
import pickle
import numpy as np
import time
import pommerman
from pommerman.agents import BaseAgent, SimpleAgent
from pommerman import constants
from keras.models import Model, load_model, model_from_json
from keras.layers import Input, Conv2D, Flatten, Dense
from keras.callbacks import EarlyStopping
from keras.initializers import RandomNormal
import keras.backend as K
import tensorflow as tf
NUM_AGENTS = 4
NUM_ACTIONS = len(constants.Action)
class MCTSNode(object):
def __init__(self, p):
# values for 6 actions
self.Q = np.zeros(NUM_ACTIONS)
self.W = np.zeros(NUM_ACTIONS)
self.N = np.zeros(NUM_ACTIONS, dtype=np.uint32)
assert p.shape == (NUM_ACTIONS,)
self.P = p
def action(self):
U = args.mcts_c_puct * self.P * np.sqrt(np.sum(self.N)) / (1 + self.N)
# TODO: use random tie-breaking for equal values
return np.argmax(self.Q + U)
def update(self, action, reward):
self.W[action] += reward
self.N[action] += 1
self.Q[action] = self.W[action] / self.N[action]
def probs(self, temperature=1):
if temperature == 0:
p = np.zeros(NUM_ACTIONS)
p[np.argmax(self.N)] = 1
return p
else:
Nt = self.N ** (1.0 / temperature)
return Nt / np.sum(Nt)
class MCTSAgent(BaseAgent):
def __init__(self, model_file=None, train=False, agent_id=0):
super().__init__()
self.agent_id = agent_id
if train:
self.env = self.make_env()
if model_file is None:
self.model = make_model()
else:
self.model = load_model(model_file)
self.reset_tree()
def make_env(self):
agents = []
for agent_id in range(NUM_AGENTS):
if agent_id == self.agent_id:
agents.append(self)
else:
agents.append(SimpleAgent())
return pommerman.make('PommeFFACompetition-v0', agents)
def reset_tree(self):
self.tree = {}
# for statistics
self.hit_probs = []
self.avg_lengths = []
self.entropies = []
self.iters_sec = []
def observation_to_features(self, obs):
# TODO: history of n moves?
board = obs['board']
# convert board items into bitmaps
maps = [board == i for i in range(1, 10)]
maps.append(obs['bomb_blast_strength'])
maps.append(obs['bomb_life'])
# duplicate ammo, blast_strength and can_kick over entire map
maps.append(np.full(board.shape, obs['ammo']))
maps.append(np.full(board.shape, obs['blast_strength']))
maps.append(np.full(board.shape, obs['can_kick']))
# add my position as bitmap
position = np.zeros(board.shape)
position[obs['position']] = 1
maps.append(position)
# add teammate
if obs['teammate'] is not None:
maps.append(board == obs['teammate'].value)
else:
maps.append(np.zeros(board.shape))
# add enemies
enemies = [board == e.value for e in obs['enemies']]
maps.append(np.any(enemies, axis=0))
return np.stack(maps, axis=2)
def search(self, root, num_iters, temperature=1):
# remember current game state
self.env._init_game_state = root
root = str(self.env.get_json_info())
# for statistics
hits = 0
misses = 0
total_length = 0
start_time = time.time()
for i in range(num_iters):
# restore game state to root node
obs = self.env.reset()
print('\rStep %d: iteration %d' % (self.env._step_count, i + 1), end=' ')
# serialize game state
state = str(self.env.get_json_info())
trace = []
done = False
while not done:
if state in self.tree:
node = self.tree[state]
# choose actions based on Q + U
action = node.action()
trace.append((node, action))
#print("Action from tree:", constants.Action(action).name)
hits += 1
else:
# initialize action probabilities with policy network
feats = self.observation_to_features(obs[self.agent_id])
feats = feats[np.newaxis, ...]
probs, values = self.model.predict(feats)
probs = probs[0]
reward = values[0, 0]
# add Dirichlet noise to root node for added exploration
# Hex people didn't find it necessary
#if len(trace) == 0:
# noise = np.random.dirichlet([args.mcts_dirichlet_alpha] * len(probs))
# probs = (1 - args.mcts_dirichlet_epsilon) * probs + args.mcts_dirichlet_epsilon * noise
# add new node to the tree
self.tree[state] = MCTSNode(probs)
misses += 1
#print("Leaf node")
# stop at leaf node
break
# ensure we are not called recursively
assert self.env.training_agent == self.agent_id
# make other agents act
actions = self.env.act(obs)
# add my action to list of actions
actions.insert(self.agent_id, action)
# step environment forward
obs, rewards, done, info = self.env.step(actions)
reward = rewards[self.agent_id]
#print("Rewards:", rewards)
state = str(self.env.get_json_info())
total_length += len(trace)
#print("Finished rollout, length:", len(trace))
#print("Backpropagating reward:", reward)
# update tree nodes with rollout results
for node, action in reversed(trace):
node.update(action, reward)
reward *= args.discount
#print("Root Q:")
#print(self.tree[root].Q)
#print("Root N:")
#print(self.tree[root].N)
print(self.tree[root].N, self.tree[root].Q, end='')
#print("(tree hits: %0.2f, avg. len: %0.2f, tree size: %d)" % (hits / (hits + misses), total_length / num_iters, len(self.tree)))
elapsed = time.time() - start_time
self.iters_sec.append(num_iters / elapsed)
self.hit_probs.append(hits / (hits + misses))
self.avg_lengths.append(total_length / num_iters)
# reset env back where we were
self.env.set_json_info()
self.env._init_game_state = None
# return action probabilities
pi = self.tree[root].probs(temperature)
print()
idx = (pi != 0)
self.entropies.append(-np.sum(pi[idx] * np.log(pi[idx])))
return pi
def rollout(self, shared_buffer, finished):
# reset search tree in the beginning of each rollout
self.reset_tree()
# guarantees that we are not called recursively
# and episode ends when this agent dies
self.env.training_agent = self.agent_id
obs = self.env.reset()
trace = []
done = False
while not done and not finished.value:
if args.render:
self.env.render()
# copy weights from trainer
self.model.set_weights(pickle.loads(shared_buffer.raw))
# use temperature 1 for first 30 steps and temperature 0 afterwards
#temp = 0 if self.env._step_count < 30 else 0
# TODO: only works when agent has access to the env
root = self.env.get_json_info()
# do Monte-Carlo tree search
pi = self.search(root, args.mcts_iters, args.temperature)
# sample action from probabilities
action = np.random.choice(NUM_ACTIONS, p=pi)
# record observations and action probabilities
feats = self.observation_to_features(obs[self.agent_id])
trace.append((feats, pi))
# ensure we are not called recursively
assert self.env.training_agent == self.agent_id
# make other agents act
actions = self.env.act(obs)
# add my action to list of actions
actions.insert(self.agent_id, action)
# step environment
obs, rewards, done, info = self.env.step(actions)
assert self == self.env._agents[self.agent_id]
print("Agent:", self.agent_id, "Step:", self.env._step_count, "Actions:", [constants.Action(a).name for a in actions], "Probs:", [round(p, 2) for p in pi], "Entropy: %.2f" % self.entropies[-1], "Iters/s: %.2f" % self.iters_sec[-1], "Rewards:", rewards, "Done:", done)
#print("Rollout finished:", finished.value)
reward = rewards[self.agent_id]
#print("Agent:", self.agent_id, "Reward:", reward, "Len trace:", len(trace))
return trace, reward, rewards
def act(self, obs, action_space):
obs = self.observation_to_features(obs)
obs = np.array([obs])
probs, reward = self.model.predict(obs)
probs = probs[0]
return np.argmax(probs)
# sample action from probabilities
#return np.random.choice(NUM_ACTIONS, p=pi)
class ReplayMemory(object):
def __init__(self, size=100000):
self.observations = np.empty((size, constants.BOARD_SIZE, constants.BOARD_SIZE, 17))
self.action_probs = np.empty((size, NUM_ACTIONS))
self.state_values = np.empty((size,))
self.size = size
self.current = 0
self.count = 0
def add_sample(self, obs, pi, z):
self.observations[self.current] = obs
self.action_probs[self.current] = pi
self.state_values[self.current] = z
self.current = (self.current + 1) % self.size
if self.count < self.size:
self.count += 1
def dataset(self):
return self.observations[:self.count], self.action_probs[:self.count], self.state_values[:self.count]
def make_model():
c = x = Input(shape=(constants.BOARD_SIZE, constants.BOARD_SIZE, 17))
for i in range(args.conv_layers):
c = Conv2D(args.conv_filters, args.conv_filter_size, activation='relu', padding='valid')(c)
h = Flatten()(c)
for i in range(args.hidden_layers):
h = Dense(args.hidden_nodes, activation='relu')(h)
hp = h
for i in range(args.policy_hidden_layers):
hp = Dense(args.hidden_nodes, activation='relu')(hp)
p = Dense(6, activation='softmax', kernel_initializer=RandomNormal(0.0, 0.001), name="policy")(hp)
hv = h
for i in range(args.value_hidden_layers):
hv = Dense(args.hidden_nodes, activation='relu')(hv)
v = Dense(1, activation='tanh', kernel_initializer=RandomNormal(0.0, 0.001), name="value")(hv)
model = Model(x, [p, v])
model.compile(optimizer='adam', loss=['categorical_crossentropy', 'mse'])
return model
def init_tensorflow():
# make sure TF does not allocate all memory
# NB! this needs to be done also in subprocesses!
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
def runner(id, model_file, shared_buffer, fifo, finished, _args):
import sys
sys.stdin = open("/dev/stdin", "r")
# make args accessible to MCTSAgent
global args
args = _args
# initialize tensorflow
init_tensorflow()
# make sure agents play at all positions
agent_id = id % NUM_AGENTS
agent = MCTSAgent(model_file, train=True, agent_id=agent_id)
while not finished.value:
# do rollout
trace, reward, rewards = agent.rollout(shared_buffer, finished)
# don't put last trace into fifo
if finished.value:
break
# add data samples to training set
fifo.put((trace, reward, rewards, agent_id, agent.hit_probs, agent.avg_lengths, len(agent.tree), agent.entropies, agent.iters_sec))
#print("Runner finished:", finished.value)
#print("Runner done")
def trainer(num_episodes, fifos, shared_buffer, model, memory, writer):
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0.001, patience=5, verbose=1, mode='auto')]
while num_episodes < args.num_episodes:
while True:
# pick random fifo (agent)
fifo = random.choice(fifos)
try:
# wait for a new trajectory and statistics
trace, reward, rewards, agent_id, hit_probs, avg_lengths, tree_size, entropies, iters_sec = fifo.get(timeout=args.queue_timeout)
# break out of the infinite loop
break
except Empty:
# just ignore empty fifos
pass
num_episodes += 1
# add samples to replay memory
# TODO: add_batch would be more efficient?
for obs, pi in trace:
memory.add_sample(obs, pi, reward)
add_summary(writer, "tree/size", tree_size, num_episodes)
add_summary(writer, "tree/mean_hit_prob", float(np.mean(hit_probs)), num_episodes)
add_summary(writer, "tree/mean_rollout_len", float(np.mean(avg_lengths)), num_episodes)
add_summary(writer, "tree/iters_sec", float(np.mean(iters_sec)), num_episodes)
add_histogram(writer, "tree/hit_probability", hit_probs, num_episodes)
add_histogram(writer, "tree/rollout_length", avg_lengths, num_episodes)
add_histogram(writer, "tree/entropies", entropies, num_episodes)
add_summary(writer, "episode/mean_entropy", float(np.mean(entropies)), num_episodes)
add_summary(writer, "episode/reward", reward, num_episodes)
add_summary(writer, "episode/length", len(trace), num_episodes)
add_summary(writer, "rewards/agent_id", agent_id, num_episodes)
for i in range(len(rewards)):
add_summary(writer, "rewards/agent%d" % i, rewards[i], num_episodes)
add_summary(writer, "replay_memory/size", memory.size, num_episodes)
add_summary(writer, "replay_memory/count", memory.count, num_episodes)
add_summary(writer, "replay_memory/current", memory.current, num_episodes)
#print("Replay memory size: %d, count: %d, current: %d" % (memory.size, memory.count, memory.current))
X, y, z = memory.dataset()
assert len(X) != 0
# reset weights?
if args.reset_network:
#model.set_weights(init_weights)
model = model_from_json(model.to_json())
model.compile(optimizer='adam', loss=['categorical_crossentropy', 'mse'])
# train for limited epochs to avoid overfitting?
history = model.fit(X, [y, z], batch_size=args.batch_size, epochs=args.num_epochs, callbacks=callbacks, validation_split=args.validation_split)
# log loss values
for k, v in history.history.items():
add_summary(writer, "training/" + k, v[-1], num_episodes)
# shared weights with runners
shared_buffer.raw = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
# save weights
if num_episodes % args.save_interval == 0:
model.save(os.path.join(logdir, "model_%d.hdf5" % num_episodes))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('label')
parser.add_argument('--load_model')
parser.add_argument('--logdir', default="logs")
parser.add_argument('--render', action="store_true", default=False)
parser.add_argument('--num_episodes', type=int, default=100)
parser.add_argument('--save_interval', type=int, default=10)
# queue params
parser.add_argument('--queue_length', type=int, default=10)
parser.add_argument('--queue_timeout', type=int, default=1)
# runner params
parser.add_argument('--num_runners', type=int, default=4)
parser.add_argument('--max_steps', type=int, default=constants.MAX_STEPS)
# trainer params
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--validation_split', type=float, default=0.1)
parser.add_argument('--reset_network', action='store_true', default=False)
# network params
parser.add_argument('--conv_layers', type=int, default=0)
parser.add_argument('--conv_filters', type=int, default=32)
parser.add_argument('--conv_filter_size', type=int, default=3)
parser.add_argument('--hidden_layers', type=int, default=0)
parser.add_argument('--hidden_nodes', type=int, default=32)
parser.add_argument('--policy_hidden_layers', type=int, default=0)
parser.add_argument('--value_hidden_layers', type=int, default=0)
# MCTS params
parser.add_argument('--mcts_iters', type=int, default=10)
parser.add_argument('--mcts_c_puct', type=float, default=1.0)
parser.add_argument('--mcts_dirichlet_epsilon', type=float, default=0.25)
parser.add_argument('--mcts_dirichlet_alpha', type=float, default=0.3)
# RL params
parser.add_argument('--discount', type=float, default=0.99)
parser.add_argument('--temperature', type=float, default=0)
args = parser.parse_args()
memory = ReplayMemory()
logdir = os.path.join(args.logdir, args.label)
if not os.path.isdir(logdir):
os.makedirs(logdir)
# use spawn method for starting subprocesses
# this seems to be more compatible with TensorFlow?
ctx = multiprocessing.get_context('spawn')
# check for commandline argument or previous model file
num_episodes = 0
model_file = None
if args.load_model:
model_file = args.load_model
else:
files = glob.glob(os.path.join(logdir, "model_*.hdf5"))
if files:
model_file = max(files, key=lambda f: int(re.search(r'_(\d+).hdf5', f).group(1)))
# set start timestep from file name when continuing previous session
num_episodes = int(re.search(r'_(\d+).hdf5', model_file).group(1))
print("Setting start episode to %d" % num_episodes)
# load saved model
init_tensorflow()
if model_file:
print("Loading model:", model_file)
model = load_model(model_file)
else:
print("Initializing new model")
model = make_model()
model.summary()
# create shared buffer for sharing weights
print("Creating shared memory for model")
init_weights = model.get_weights()
blob = pickle.dumps(init_weights, pickle.HIGHEST_PROTOCOL)
shared_buffer = ctx.Array('c', len(blob))
shared_buffer.raw = blob
# create boolean to signal end
finished = ctx.Value('i', 0)
# create fifos and processes for all runners
print("Creating child processes")
fifos = []
for i in range(args.num_runners):
fifo = ctx.Queue(args.queue_length)
fifos.append(fifo)
process = ctx.Process(target=runner, args=(i, model_file, shared_buffer, fifo, finished, args))
process.start()
from tensorboard_utils import create_summary_writer, add_summary, add_histogram
writer = create_summary_writer(logdir)
# do training in main process
print("Starting training in main process")
trainer(num_episodes, fifos, shared_buffer, model, memory, writer)
finished.value = 1
print("Finishing")
# empty queues until all child processes have exited
while len(multiprocessing.active_children()) > 0:
for i, fifo in enumerate(fifos):
if not fifo.empty():
fifo.get_nowait()
print("All done")
| [
"keras.layers.Conv2D",
"pickle.dumps",
"numpy.log",
"numpy.array",
"keras.layers.Dense",
"pickle.loads",
"tensorboard_utils.add_summary",
"multiprocessing.active_children",
"re.search",
"numpy.mean",
"pommerman.constants.Action",
"argparse.ArgumentParser",
"tensorflow.Session",
"numpy.stac... | [((10375, 10436), 'keras.layers.Input', 'Input', ([], {'shape': '(constants.BOARD_SIZE, constants.BOARD_SIZE, 17)'}), '(shape=(constants.BOARD_SIZE, constants.BOARD_SIZE, 17))\n', (10380, 10436), False, 'from keras.layers import Input, Conv2D, Flatten, Dense\n'), ((11146, 11162), 'keras.models.Model', 'Model', (['x', '[p, v]'], {}), '(x, [p, v])\n', (11151, 11162), False, 'from keras.models import Model, load_model, model_from_json\n'), ((11397, 11413), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (11411, 11413), True, 'import tensorflow as tf\n'), ((15575, 15600), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15598, 15600), False, 'import argparse\n'), ((17531, 17568), 'os.path.join', 'os.path.join', (['args.logdir', 'args.label'], {}), '(args.logdir, args.label)\n', (17543, 17568), False, 'import os\n'), ((17747, 17783), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (17774, 17783), False, 'import multiprocessing\n'), ((18750, 18801), 'pickle.dumps', 'pickle.dumps', (['init_weights', 'pickle.HIGHEST_PROTOCOL'], {}), '(init_weights, pickle.HIGHEST_PROTOCOL)\n', (18762, 18801), False, 'import pickle\n'), ((19384, 19413), 'tensorboard_utils.create_summary_writer', 'create_summary_writer', (['logdir'], {}), '(logdir)\n', (19405, 19413), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((673, 694), 'numpy.zeros', 'np.zeros', (['NUM_ACTIONS'], {}), '(NUM_ACTIONS)\n', (681, 694), True, 'import numpy as np\n'), ((712, 733), 'numpy.zeros', 'np.zeros', (['NUM_ACTIONS'], {}), '(NUM_ACTIONS)\n', (720, 733), True, 'import numpy as np\n'), ((751, 789), 'numpy.zeros', 'np.zeros', (['NUM_ACTIONS'], {'dtype': 'np.uint32'}), '(NUM_ACTIONS, dtype=np.uint32)\n', (759, 789), True, 'import numpy as np\n'), ((1024, 1045), 'numpy.argmax', 'np.argmax', (['(self.Q + U)'], {}), '(self.Q + U)\n', (1033, 1045), True, 'import numpy as np\n'), ((2078, 2126), 'pommerman.make', 'pommerman.make', (['"""PommeFFACompetition-v0"""', 'agents'], {}), "('PommeFFACompetition-v0', agents)\n", (2092, 2126), False, 'import pommerman\n'), ((2912, 2933), 'numpy.zeros', 'np.zeros', (['board.shape'], {}), '(board.shape)\n', (2920, 2933), True, 'import numpy as np\n'), ((3328, 3350), 'numpy.stack', 'np.stack', (['maps'], {'axis': '(2)'}), '(maps, axis=2)\n', (3336, 3350), True, 'import numpy as np\n'), ((3638, 3649), 'time.time', 'time.time', ([], {}), '()\n', (3647, 3649), False, 'import time\n'), ((9365, 9380), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (9373, 9380), True, 'import numpy as np\n'), ((9469, 9485), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (9478, 9485), True, 'import numpy as np\n'), ((9676, 9740), 'numpy.empty', 'np.empty', (['(size, constants.BOARD_SIZE, constants.BOARD_SIZE, 17)'], {}), '((size, constants.BOARD_SIZE, constants.BOARD_SIZE, 17))\n', (9684, 9740), True, 'import numpy as np\n'), ((9769, 9798), 'numpy.empty', 'np.empty', (['(size, NUM_ACTIONS)'], {}), '((size, NUM_ACTIONS))\n', (9777, 9798), True, 'import numpy as np\n'), ((9827, 9844), 'numpy.empty', 'np.empty', (['(size,)'], {}), '((size,))\n', (9835, 9844), True, 'import numpy as np\n'), ((10583, 10592), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (10590, 10592), False, 'from keras.layers import Input, Conv2D, Flatten, Dense\n'), ((11475, 11500), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (11485, 11500), True, 'import tensorflow as tf\n'), ((12449, 12539), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.001)', 'patience': '(5)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=0.001, patience=5, verbose=1,\n mode='auto')\n", (12462, 12539), False, 'from keras.callbacks import EarlyStopping\n'), ((13265, 13322), 'tensorboard_utils.add_summary', 'add_summary', (['writer', '"""tree/size"""', 'tree_size', 'num_episodes'], {}), "(writer, 'tree/size', tree_size, num_episodes)\n", (13276, 13322), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((13605, 13675), 'tensorboard_utils.add_histogram', 'add_histogram', (['writer', '"""tree/hit_probability"""', 'hit_probs', 'num_episodes'], {}), "(writer, 'tree/hit_probability', hit_probs, num_episodes)\n", (13618, 13675), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((13684, 13755), 'tensorboard_utils.add_histogram', 'add_histogram', (['writer', '"""tree/rollout_length"""', 'avg_lengths', 'num_episodes'], {}), "(writer, 'tree/rollout_length', avg_lengths, num_episodes)\n", (13697, 13755), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((13764, 13828), 'tensorboard_utils.add_histogram', 'add_histogram', (['writer', '"""tree/entropies"""', 'entropies', 'num_episodes'], {}), "(writer, 'tree/entropies', entropies, num_episodes)\n", (13777, 13828), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((13930, 13989), 'tensorboard_utils.add_summary', 'add_summary', (['writer', '"""episode/reward"""', 'reward', 'num_episodes'], {}), "(writer, 'episode/reward', reward, num_episodes)\n", (13941, 13989), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((14070, 14133), 'tensorboard_utils.add_summary', 'add_summary', (['writer', '"""rewards/agent_id"""', 'agent_id', 'num_episodes'], {}), "(writer, 'rewards/agent_id', agent_id, num_episodes)\n", (14081, 14133), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((14261, 14329), 'tensorboard_utils.add_summary', 'add_summary', (['writer', '"""replay_memory/size"""', 'memory.size', 'num_episodes'], {}), "(writer, 'replay_memory/size', memory.size, num_episodes)\n", (14272, 14329), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((14338, 14408), 'tensorboard_utils.add_summary', 'add_summary', (['writer', '"""replay_memory/count"""', 'memory.count', 'num_episodes'], {}), "(writer, 'replay_memory/count', memory.count, num_episodes)\n", (14349, 14408), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((14417, 14491), 'tensorboard_utils.add_summary', 'add_summary', (['writer', '"""replay_memory/current"""', 'memory.current', 'num_episodes'], {}), "(writer, 'replay_memory/current', memory.current, num_episodes)\n", (14428, 14491), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((17580, 17601), 'os.path.isdir', 'os.path.isdir', (['logdir'], {}), '(logdir)\n', (17593, 17601), False, 'import os\n'), ((17611, 17630), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (17622, 17630), False, 'import os\n'), ((18484, 18506), 'keras.models.load_model', 'load_model', (['model_file'], {}), '(model_file)\n', (18494, 18506), False, 'from keras.models import Model, load_model, model_from_json\n'), ((1285, 1306), 'numpy.zeros', 'np.zeros', (['NUM_ACTIONS'], {}), '(NUM_ACTIONS)\n', (1293, 1306), True, 'import numpy as np\n'), ((1783, 1805), 'keras.models.load_model', 'load_model', (['model_file'], {}), '(model_file)\n', (1793, 1805), False, 'from keras.models import Model, load_model, model_from_json\n'), ((2697, 2730), 'numpy.full', 'np.full', (['board.shape', "obs['ammo']"], {}), "(board.shape, obs['ammo'])\n", (2704, 2730), True, 'import numpy as np\n'), ((2752, 2795), 'numpy.full', 'np.full', (['board.shape', "obs['blast_strength']"], {}), "(board.shape, obs['blast_strength'])\n", (2759, 2795), True, 'import numpy as np\n'), ((2817, 2854), 'numpy.full', 'np.full', (['board.shape', "obs['can_kick']"], {}), "(board.shape, obs['can_kick'])\n", (2824, 2854), True, 'import numpy as np\n'), ((3287, 3310), 'numpy.any', 'np.any', (['enemies'], {'axis': '(0)'}), '(enemies, axis=0)\n', (3293, 3310), True, 'import numpy as np\n'), ((6607, 6618), 'time.time', 'time.time', ([], {}), '()\n', (6616, 6618), False, 'import time\n'), ((8124, 8159), 'numpy.random.choice', 'np.random.choice', (['NUM_ACTIONS'], {'p': 'pi'}), '(NUM_ACTIONS, p=pi)\n', (8140, 8159), True, 'import numpy as np\n'), ((10487, 10576), 'keras.layers.Conv2D', 'Conv2D', (['args.conv_filters', 'args.conv_filter_size'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(args.conv_filters, args.conv_filter_size, activation='relu', padding\n ='valid')\n", (10493, 10576), False, 'from keras.layers import Input, Conv2D, Flatten, Dense\n'), ((10648, 10691), 'keras.layers.Dense', 'Dense', (['args.hidden_nodes'], {'activation': '"""relu"""'}), "(args.hidden_nodes, activation='relu')\n", (10653, 10691), False, 'from keras.layers import Input, Conv2D, Flatten, Dense\n'), ((10766, 10809), 'keras.layers.Dense', 'Dense', (['args.hidden_nodes'], {'activation': '"""relu"""'}), "(args.hidden_nodes, activation='relu')\n", (10771, 10809), False, 'from keras.layers import Input, Conv2D, Flatten, Dense\n'), ((10987, 11030), 'keras.layers.Dense', 'Dense', (['args.hidden_nodes'], {'activation': '"""relu"""'}), "(args.hidden_nodes, activation='relu')\n", (10992, 11030), False, 'from keras.layers import Input, Conv2D, Flatten, Dense\n'), ((12659, 12679), 'random.choice', 'random.choice', (['fifos'], {}), '(fifos)\n', (12672, 12679), False, 'import random\n'), ((14184, 14252), 'tensorboard_utils.add_summary', 'add_summary', (['writer', "('rewards/agent%d' % i)", 'rewards[i]', 'num_episodes'], {}), "(writer, 'rewards/agent%d' % i, rewards[i], num_episodes)\n", (14195, 14252), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((15199, 15256), 'tensorboard_utils.add_summary', 'add_summary', (['writer', "('training/' + k)", 'v[-1]', 'num_episodes'], {}), "(writer, 'training/' + k, v[-1], num_episodes)\n", (15210, 15256), False, 'from tensorboard_utils import create_summary_writer, add_summary, add_histogram\n'), ((17985, 18021), 'os.path.join', 'os.path.join', (['logdir', '"""model_*.hdf5"""'], {}), "(logdir, 'model_*.hdf5')\n", (17997, 18021), False, 'import os\n'), ((19684, 19717), 'multiprocessing.active_children', 'multiprocessing.active_children', ([], {}), '()\n', (19715, 19717), False, 'import multiprocessing\n'), ((1321, 1338), 'numpy.argmax', 'np.argmax', (['self.N'], {}), '(self.N)\n', (1330, 1338), True, 'import numpy as np\n'), ((1450, 1460), 'numpy.sum', 'np.sum', (['Nt'], {}), '(Nt)\n', (1456, 1460), True, 'import numpy as np\n'), ((3160, 3181), 'numpy.zeros', 'np.zeros', (['board.shape'], {}), '(board.shape)\n', (3168, 3181), True, 'import numpy as np\n'), ((7665, 7696), 'pickle.loads', 'pickle.loads', (['shared_buffer.raw'], {}), '(shared_buffer.raw)\n', (7677, 7696), False, 'import pickle\n'), ((10872, 10896), 'keras.initializers.RandomNormal', 'RandomNormal', (['(0.0)', '(0.001)'], {}), '(0.0, 0.001)\n', (10884, 10896), False, 'from keras.initializers import RandomNormal\n'), ((11090, 11114), 'keras.initializers.RandomNormal', 'RandomNormal', (['(0.0)', '(0.001)'], {}), '(0.0, 0.001)\n', (11102, 11114), False, 'from keras.initializers import RandomNormal\n'), ((13379, 13397), 'numpy.mean', 'np.mean', (['hit_probs'], {}), '(hit_probs)\n', (13386, 13397), True, 'import numpy as np\n'), ((13473, 13493), 'numpy.mean', 'np.mean', (['avg_lengths'], {}), '(avg_lengths)\n', (13480, 13493), True, 'import numpy as np\n'), ((13562, 13580), 'numpy.mean', 'np.mean', (['iters_sec'], {}), '(iters_sec)\n', (13569, 13580), True, 'import numpy as np\n'), ((13887, 13905), 'numpy.mean', 'np.mean', (['entropies'], {}), '(entropies)\n', (13894, 13905), True, 'import numpy as np\n'), ((15479, 15531), 'os.path.join', 'os.path.join', (['logdir', "('model_%d.hdf5' % num_episodes)"], {}), "(logdir, 'model_%d.hdf5' % num_episodes)\n", (15491, 15531), False, 'import os\n'), ((921, 935), 'numpy.sum', 'np.sum', (['self.N'], {}), '(self.N)\n', (927, 935), True, 'import numpy as np\n'), ((2047, 2060), 'pommerman.agents.SimpleAgent', 'SimpleAgent', ([], {}), '()\n', (2058, 2060), False, 'from pommerman.agents import BaseAgent, SimpleAgent\n'), ((7083, 7098), 'numpy.log', 'np.log', (['pi[idx]'], {}), '(pi[idx])\n', (7089, 7098), True, 'import numpy as np\n'), ((8850, 8869), 'pommerman.constants.Action', 'constants.Action', (['a'], {}), '(a)\n', (8866, 8869), False, 'from pommerman import constants\n'), ((18247, 18284), 're.search', 're.search', (['"""_(\\\\d+).hdf5"""', 'model_file'], {}), "('_(\\\\d+).hdf5', model_file)\n", (18256, 18284), False, 'import re\n'), ((18095, 18123), 're.search', 're.search', (['"""_(\\\\d+).hdf5"""', 'f'], {}), "('_(\\\\d+).hdf5', f)\n", (18104, 18123), False, 'import re\n')] |
#!/usr/bin/env python3
# python print to stderr (most portable and flexible)
from __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
import numpy as np
import scipy.optimize
import sys
# get angle from arg 0
# positive numbers mean counter-clockwise
if len(sys.argv) > 1:
a=float(sys.argv[1])
else:
a=None
# load data from stdin
d=np.loadtxt("/dev/stdin")
def rotated_data(a):
# construct rotation matrix
c=np.cos(np.radians(a))
s=np.sin(np.radians(a))
m=np.array([[c,-s],[s,c]])
# apply the rotation
r=np.transpose(np.matmul(m, np.transpose(d)))
return r
if a is None:
# optimizing
def f(a):
rd=rotated_data(a)
slic=rd[:,1]
l=np.min(slic)
h=np.max(slic)
cost=(h-l)**2
return cost
res=scipy.optimize.minimize_scalar(f)
assert res.success
eprint('Found best rotation angle:', res.x)
a=res.x
# save the rotated
np.savetxt("/dev/stdout", rotated_data(a), delimiter="\t")
| [
"numpy.radians",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.loadtxt",
"numpy.transpose"
] | [((405, 429), 'numpy.loadtxt', 'np.loadtxt', (['"""/dev/stdin"""'], {}), "('/dev/stdin')\n", (415, 429), True, 'import numpy as np\n'), ((538, 565), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (546, 565), True, 'import numpy as np\n'), ((493, 506), 'numpy.radians', 'np.radians', (['a'], {}), '(a)\n', (503, 506), True, 'import numpy as np\n'), ((519, 532), 'numpy.radians', 'np.radians', (['a'], {}), '(a)\n', (529, 532), True, 'import numpy as np\n'), ((733, 745), 'numpy.min', 'np.min', (['slic'], {}), '(slic)\n', (739, 745), True, 'import numpy as np\n'), ((752, 764), 'numpy.max', 'np.max', (['slic'], {}), '(slic)\n', (758, 764), True, 'import numpy as np\n'), ((616, 631), 'numpy.transpose', 'np.transpose', (['d'], {}), '(d)\n', (628, 631), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
__author__ = "ChenJun"
import numpy as np
import tensorflow as tf
from tensorflow.contrib import crf
class BatchNormLayer(object):
"""
batch normalization.
"""
def __init__(self, n_in, inputs):
self.gamma = tf.Variable(initial_value=np.ones(n_in,), dtype=tf.float32, name="gamma")
self.beta = tf.Variable(initial_value=np.zeros(n_in,), dtype=tf.float32, name="beta")
mean, var = tf.nn.moments(inputs, [1], keep_dims=True)
self.out = tf.nn.batch_normalization(inputs, mean, var, offset=self.beta, scale=self.gamma, variance_epsilon=1e-6, name="bn")
class InteractLayer(object):
"""
interact for question_vec and answer_vec.
math: (q * W) * a
q * W: [batch_size, n_q] * [n_q, dim, n_a] -> [batch_size, dim, n_a]
(q * W) * a: [batch_size, dim, n_a] * [batch_size, n_a, 1] -> [batch_size, dim, 1]
out: [batch_size, dim, 1] -> [batch_size, dim]
"""
def __init__(self, n_q, n_a, dim):
# self.W = tf.Variable(initial_value=tf.random_uniform(shape=(n_q, dim, n_a), minval=-0.1, maxval=0.1), name="IL_W")
self.W = tf.Variable(tf.random_normal(shape=(n_q, dim, n_a)) * 0.05, name="IL_W")
self.dim = dim
def __call__(self, q_input, a_input):
qa_vec = tf.matmul(tf.tensordot(q_input, self.W, axes=[[1], [0]]), tf.expand_dims(a_input, 2))
out_put = qa_vec[:, :, -1]
return out_put
class LogisticRegression(object):
"""
logistic regression layer.
label: one_hot [[0,1], [0,1], [1,0], ...] -> tf.nn.softmax_cross_entropy_with_logits
number [1, 0, 0, 1, 0, ...] -> tf.nn.sparse_softmax_cross_entropy_with_logits
math: softmax(W * X + b)
"""
def __init__(self, input, n_in, n_out):
self.W = tf.Variable(tf.zeros(shape=(n_in, n_out)), name="LR_W")
self.b = tf.Variable(tf.zeros(shape=(n_out,)), name="LR_b")
self.linear = tf.add(tf.matmul(input, self.W), self.b)
self.p_y_given_x = tf.nn.softmax(tf.add(tf.matmul(input, self.W), self.b))
self.y_pred = tf.arg_max(self.p_y_given_x, 1)
def cross_entropy(self, y):
# cost = tf.reduce_mean(-tf.reduce_sum(tf.cast(y, dtype=tf.float32) * tf.log(self.p_y_given_x), reduction_indices=1)) # softmax(WX+b); one-hot label
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.linear, labels=y)) # unscaled(WX+b); one-hot label
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.linear, labels=y)) # unscaled(WX+b); number label
return cost
def errors(self, y):
return tf.reduce_mean(tf.cast(tf.not_equal(self.y_pred, tf.arg_max(y,1)), dtype=tf.float32))
def pred_prob(self):
return self.p_y_given_x[:,1]
class HiddenLayer(object):
"""
hidden layer
math: activation(W * X + b)
"""
def __init__(self, input, n_in, n_out, activation=tf.tanh):
self.input = input
w_value = tf.random_uniform(minval=-np.sqrt(6.0 / (n_in + n_out)), maxval=np.sqrt(6.0 / (n_in + n_out)), shape=(n_in, n_out))
if activation == tf.sigmoid:
w_value *= 4
self.W = tf.Variable(initial_value=w_value, name="HL_W")
self.b = tf.Variable(initial_value=tf.zeros(shape=(n_out,)), name="HL_b")
if activation == None:
output = tf.add(tf.matmul(input, self.W), self.b)
else:
output = activation(tf.add(tf.matmul(input, self.W), self.b))
self.output = output
class DropoutHiddenLayer(HiddenLayer):
"""
dropout after hidden layer.
"""
def __init__(self, input, n_in, n_out, keep_prop, activation):
super(DropoutHiddenLayer, self).__init__(input=input, n_in=n_in, n_out=n_out, activation=activation)
self.output = tf.nn.dropout(self.output, keep_prob=keep_prop)
class MLPDropout(object):
"""
dropout mlp(
hidden layer
dropout
logistic regression layer
)
"""
def __init__(self, input, n_in, n_hidden, n_out, keep_prop, activation=tf.tanh):
self.drop_hidden_layer = DropoutHiddenLayer(input=input, n_in=n_in, n_out=n_hidden, activation=activation, keep_prop=keep_prop)
self.logistic_regression_layer = LogisticRegression(input=self.drop_hidden_layer.output, n_in=n_hidden, n_out=n_out)
self.cross_entropy = (
self.logistic_regression_layer.cross_entropy
)
# same holds for the function computing the number of errors
self.errors = self.logistic_regression_layer.errors
self.pred_prob = self.logistic_regression_layer.pred_prob
self.L1 = (
tf.reduce_sum(abs(self.drop_hidden_layer.W)) +
tf.reduce_sum(abs(self.logistic_regression_layer.W))
)
# square of L2 norm ; one regularization option is to enforce square of L2 norm to be small
self.L2_sqr = (
tf.nn.l2_loss(self.drop_hidden_layer.W) +
tf.nn.l2_loss(self.logistic_regression_layer.W)
)
class MLP(object):
"""
mlp(
hidden layer
logistic regression layer
)
"""
def __init__(self, input, n_in, n_hidden, n_out):
self.hidden_layer = HiddenLayer(input=input, n_in=n_in, n_out=n_hidden, activation=tf.tanh)
self.logistic_regression_layer = LogisticRegression(input=self.hidden_layer.output, n_in=n_hidden, n_out=n_out)
self.cross_entropy = (
self.logistic_regression_layer.cross_entropy
)
# same holds for the function computing the number of errors
self.errors = self.logistic_regression_layer.errors
self.pred_prob = self.logistic_regression_layer.pred_prob
self.L1 = (
tf.reduce_sum(abs(self.hidden_layer.W)) +
tf.reduce_sum(abs(self.logistic_regression_layer.W))
)
# square of L2 norm ; one regularization option is to enforce square of L2 norm to be small
self.L2_sqr = (
tf.nn.l2_loss(self.hidden_layer.W) +
tf.nn.l2_loss(self.logistic_regression_layer.W)
)
| [
"tensorflow.tensordot",
"tensorflow.random_normal",
"numpy.ones",
"numpy.sqrt",
"tensorflow.Variable",
"tensorflow.nn.moments",
"tensorflow.nn.l2_loss",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.zeros",
"tensorflow.arg_max",
"tensorflow.nn.dropout",
"tensorflow.matmul",
... | [((445, 487), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs', '[1]'], {'keep_dims': '(True)'}), '(inputs, [1], keep_dims=True)\n', (458, 487), True, 'import tensorflow as tf\n'), ((507, 627), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inputs', 'mean', 'var'], {'offset': 'self.beta', 'scale': 'self.gamma', 'variance_epsilon': '(1e-06)', 'name': '"""bn"""'}), "(inputs, mean, var, offset=self.beta, scale=self.\n gamma, variance_epsilon=1e-06, name='bn')\n", (532, 627), True, 'import tensorflow as tf\n'), ((2073, 2104), 'tensorflow.arg_max', 'tf.arg_max', (['self.p_y_given_x', '(1)'], {}), '(self.p_y_given_x, 1)\n', (2083, 2104), True, 'import tensorflow as tf\n'), ((3181, 3228), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'w_value', 'name': '"""HL_W"""'}), "(initial_value=w_value, name='HL_W')\n", (3192, 3228), True, 'import tensorflow as tf\n'), ((3809, 3856), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.output'], {'keep_prob': 'keep_prop'}), '(self.output, keep_prob=keep_prop)\n', (3822, 3856), True, 'import tensorflow as tf\n'), ((1295, 1341), 'tensorflow.tensordot', 'tf.tensordot', (['q_input', 'self.W'], {'axes': '[[1], [0]]'}), '(q_input, self.W, axes=[[1], [0]])\n', (1307, 1341), True, 'import tensorflow as tf\n'), ((1343, 1369), 'tensorflow.expand_dims', 'tf.expand_dims', (['a_input', '(2)'], {}), '(a_input, 2)\n', (1357, 1369), True, 'import tensorflow as tf\n'), ((1793, 1822), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(n_in, n_out)'}), '(shape=(n_in, n_out))\n', (1801, 1822), True, 'import tensorflow as tf\n'), ((1866, 1890), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(n_out,)'}), '(shape=(n_out,))\n', (1874, 1890), True, 'import tensorflow as tf\n'), ((1934, 1958), 'tensorflow.matmul', 'tf.matmul', (['input', 'self.W'], {}), '(input, self.W)\n', (1943, 1958), True, 'import tensorflow as tf\n'), ((2462, 2538), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.linear', 'labels': 'y'}), '(logits=self.linear, labels=y)\n', (2508, 2538), True, 'import tensorflow as tf\n'), ((4931, 4970), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.drop_hidden_layer.W'], {}), '(self.drop_hidden_layer.W)\n', (4944, 4970), True, 'import tensorflow as tf\n'), ((4985, 5032), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.logistic_regression_layer.W'], {}), '(self.logistic_regression_layer.W)\n', (4998, 5032), True, 'import tensorflow as tf\n'), ((6009, 6043), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.hidden_layer.W'], {}), '(self.hidden_layer.W)\n', (6022, 6043), True, 'import tensorflow as tf\n'), ((6058, 6105), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.logistic_regression_layer.W'], {}), '(self.logistic_regression_layer.W)\n', (6071, 6105), True, 'import tensorflow as tf\n'), ((283, 296), 'numpy.ones', 'np.ones', (['n_in'], {}), '(n_in)\n', (290, 296), True, 'import numpy as np\n'), ((377, 391), 'numpy.zeros', 'np.zeros', (['n_in'], {}), '(n_in)\n', (385, 391), True, 'import numpy as np\n'), ((1141, 1180), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(n_q, dim, n_a)'}), '(shape=(n_q, dim, n_a))\n', (1157, 1180), True, 'import tensorflow as tf\n'), ((2016, 2040), 'tensorflow.matmul', 'tf.matmul', (['input', 'self.W'], {}), '(input, self.W)\n', (2025, 2040), True, 'import tensorflow as tf\n'), ((3050, 3079), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (n_in + n_out))'], {}), '(6.0 / (n_in + n_out))\n', (3057, 3079), True, 'import numpy as np\n'), ((3272, 3296), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(n_out,)'}), '(shape=(n_out,))\n', (3280, 3296), True, 'import tensorflow as tf\n'), ((3371, 3395), 'tensorflow.matmul', 'tf.matmul', (['input', 'self.W'], {}), '(input, self.W)\n', (3380, 3395), True, 'import tensorflow as tf\n'), ((2683, 2699), 'tensorflow.arg_max', 'tf.arg_max', (['y', '(1)'], {}), '(y, 1)\n', (2693, 2699), True, 'import tensorflow as tf\n'), ((3012, 3041), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (n_in + n_out))'], {}), '(6.0 / (n_in + n_out))\n', (3019, 3041), True, 'import numpy as np\n'), ((3458, 3482), 'tensorflow.matmul', 'tf.matmul', (['input', 'self.W'], {}), '(input, self.W)\n', (3467, 3482), True, 'import tensorflow as tf\n')] |
from pathlib import PosixPath
from typing import Dict, List, Optional, Union
import matplotlib.pylab as plt
import numpy as np
from numpy import ndarray
from modes import _analyse as anal
from modes import _mode_solver_lib as ms
from modes._mode_solver import _ModeSolver
class ModeSolverSemiVectorial(_ModeSolver):
"""
A semi-vectorial mode solver object used to
setup and run a mode solving simulation.
Args:
n_eigs (int): The number of eigen-values to solve for.
tol (float): The precision of the eigen-value/eigen-vector
solver. Default is 0.001.
boundary (str): The boundary conditions to use.
This is a string that identifies the type of boundary conditions applied.
The following options are available: 'A' - Hx is antisymmetric, Hy is symmetric,
'S' - Hx is symmetric and, Hy is antisymmetric, and '0' - Hx and Hy are zero
immediately outside of the boundary.
The string identifies all four boundary conditions, in the order:
North, south, east, west. For example, boundary='000A'. Default is '0000'.
mode_profiles (bool): `True if the the mode-profiles should be found, `False`
if only the effective indices should be found.
initial_mode_guess (list): An initial mode guess for the modesolver.
semi_vectorial_method (str): Either 'Ex' or 'Ey'. If 'Ex', the mode solver
will only find TE modes (horizontally polarised to the simulation window),
if 'Ey', the mode solver will find TM modes (vertically polarised to the
simulation window).
"""
def __init__(
self,
n_eigs: int,
tol: float = 0.001,
boundary: str = "0000",
mode_profiles: bool = True,
initial_mode_guess: Optional[float] = None,
semi_vectorial_method: str = "Ex",
wg: None = None,
) -> None:
self._semi_vectorial_method = semi_vectorial_method
_ModeSolver.__init__(
self, n_eigs, tol, boundary, mode_profiles, initial_mode_guess
)
self.name = "mode_solver_semi_vectorial"
self.wg = wg
self.results = None
def solve(self) -> Dict[str, Union[ndarray, List[ndarray]]]:
"""Find the modes of a given structure.
Returns:
dict: The 'n_effs' key gives the effective indices
of the modes. The 'modes' key exists of mode
profiles were solved for; in this case, it will
return arrays of the mode profiles.
"""
structure = self._structure = self.wg
wavelength = self.wg._wl
self._ms = ms._ModeSolverSemiVectorial(
wavelength, structure, self._boundary, self._semi_vectorial_method
)
self._ms.solve(
self._n_eigs,
self._tol,
self._mode_profiles,
initial_mode_guess=self._initial_mode_guess,
)
self.n_effs = self._ms.neff
r = {"n_effs": self.n_effs}
if self._mode_profiles:
r["modes"] = self._ms.modes
self._ms.modes[0] = np.real(self._ms.modes[0])
self._initial_mode_guess = np.real(self._ms.modes[0])
self.modes = self._ms.modes
return r
def write_modes_to_file(
self,
filename: PosixPath = "mode.dat",
plot: bool = True,
analyse: bool = True,
logscale: bool = False,
) -> List[ndarray]:
"""
Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for).
"""
for i, mode in enumerate(self._ms.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
self._write_mode_to_file(np.real(mode), filename_mode)
if plot:
self.plot_modes(filename=filename, analyse=analyse, logscale=logscale)
return self.modes
def plot_modes(
self,
filename: PosixPath = "mode.dat",
analyse: bool = True,
logscale: bool = False,
) -> None:
for i, mode in enumerate(self.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
if i == 0 and analyse:
A, centre, sigma_2 = anal.fit_gaussian(
self.wg.xc, self.wg.yc, np.abs(mode)
)
subtitle = (
"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, "
"MFD_{y} = %.3f"
) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1])
plt.figure()
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
subtitle,
sigma_2[0],
sigma_2[1],
centre[0],
centre[1],
wavelength=self.wg._wl,
logscale=logscale,
)
else:
plt.figure()
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
wavelength=self.wg._wl,
logscale=logscale,
)
| [
"numpy.abs",
"matplotlib.pylab.figure",
"modes._mode_solver._ModeSolver.__init__",
"numpy.real",
"modes._mode_solver_lib._ModeSolverSemiVectorial"
] | [((2010, 2098), 'modes._mode_solver._ModeSolver.__init__', '_ModeSolver.__init__', (['self', 'n_eigs', 'tol', 'boundary', 'mode_profiles', 'initial_mode_guess'], {}), '(self, n_eigs, tol, boundary, mode_profiles,\n initial_mode_guess)\n', (2030, 2098), False, 'from modes._mode_solver import _ModeSolver\n'), ((2686, 2786), 'modes._mode_solver_lib._ModeSolverSemiVectorial', 'ms._ModeSolverSemiVectorial', (['wavelength', 'structure', 'self._boundary', 'self._semi_vectorial_method'], {}), '(wavelength, structure, self._boundary, self.\n _semi_vectorial_method)\n', (2713, 2786), True, 'from modes import _mode_solver_lib as ms\n'), ((3155, 3181), 'numpy.real', 'np.real', (['self._ms.modes[0]'], {}), '(self._ms.modes[0])\n', (3162, 3181), True, 'import numpy as np\n'), ((3221, 3247), 'numpy.real', 'np.real', (['self._ms.modes[0]'], {}), '(self._ms.modes[0])\n', (3228, 3247), True, 'import numpy as np\n'), ((4646, 4659), 'numpy.real', 'np.real', (['mode'], {}), '(mode)\n', (4653, 4659), True, 'import numpy as np\n'), ((5536, 5548), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (5546, 5548), True, 'import matplotlib.pylab as plt\n'), ((6016, 6028), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (6026, 6028), True, 'import matplotlib.pylab as plt\n'), ((5263, 5275), 'numpy.abs', 'np.abs', (['mode'], {}), '(mode)\n', (5269, 5275), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import _astype_copy_false
class WeightsComputer:
'''
Weight methods:
idf : log( (1 + n) / (1 + df(t)) ) + 1
dfs : Distinguishing feature selector
chi2 : Term Weighting Based on Chi-Square Statistic
ig : Term weighting based on information gain
igm: Term Weighting Based on Inverse Gravity Moment
pb : Probability-Based Term Weighting
idf_icf : Term Weighting Based on Inverse Class Frequency
rf : Term Weighting Based on Relevance Frequency
idf_icsdf : Term Weighting Based on Inverse Class Density Frequency
iadf : inverse average document frequency
iadf_norm : inverse average document frequency normalized
'''
def __init__(
self,
dtype,
weight_method:str,
smooth_idf: bool = True
):
try:
if type(weight_method) is tuple:
weight_method = weight_method[0]
self.method = getattr(self, weight_method)
except AttributeError:
print(f'Method {weight_method} is not implemmnted.')
print('Check the list of avaliable parameters')
self.dtype = dtype
if type(self.dtype) is tuple:
self.dtype = self.dtype[0]
self.smooth_idf = smooth_idf
self.igm_lambda = 7.0
self.cross_tab = None
@staticmethod
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
# print('type X: ', type(X))
print(X.shape)
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
def make_cross_tab(self, X, y):
'''Computes Two-way contingency table of a term t
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
y : vector of class labels (n_samples,)
Returns
-------
np.array of shape (n_classes, 4, n_features)
'''
cross_tab = []
n_docs = X.shape[0]
classes, counts = np.unique(y, return_counts=True)
for i, cls in enumerate(classes):
cat = np.array(np.where(y==cls)).flatten()
not_cat = np.array(np.where(y!=cls)).flatten()
# Belong to cls, contain term t
a = self._document_frequency(X[cat]) + int(self.smooth_idf)
# Belong to cls, doesn`t contain term t
b = counts[i] - a + 2*int(self.smooth_idf)
# Don`t belong to cls, contain term t
c = self._document_frequency(X[not_cat]) + int(self.smooth_idf)
# Don`t belong to cls, doesn`t contain term t
d = (n_docs - counts[i]) - c + 2*int(self.smooth_idf)
cross_tab.append([a,b,c,d])
self.cross_tab = cross_tab
def idf(self, X, y):
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
n_docs= X.shape[0]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_docs += int(self.smooth_idf)
return np.log(n_docs / df) + 1
def dfs(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
nominator = a / (np.max(np.c_[a + c, np.ones_like(a + c)], axis=1))
denom_first = np.max(np.c_[a + b, np.ones_like(a + b)], axis=1)
denom_second = np.max(np.c_[c+d, np.ones_like(c+d)], axis=1)
denominator = b/denom_first + c/denom_second + 1
weight_factors.append(nominator / denominator)
return np.sum(weight_factors, axis=0)
def chi2(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
# N documents
D = X.shape[0]
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
nominator = np.square(a*d - b*c)
denominator = (a+c)*(b+d)*(a+b)*(c+d)
denominator = np.max(np.c_[denominator, np.ones_like(denominator)], axis=-1)
weight_factors.append(nominator / denominator)
return D * np.max(weight_factors, axis=0)
def ig(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
# N documents
N = X.shape[0]
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
# Add +1 to denominators to avoid Zero Division error
den_first = np.max(np.c_[(a+c)*(a+b), np.ones_like((a+c)*(a+b))], axis=-1)
first = a/N*np.log(1+(a*N)/den_first)
den_second = np.max(np.c_[(b+d)*(a+b), np.ones_like((b+d)*(a+b))], axis=-1)
second = b/N*np.log(1+(b*N)/den_second)
den_third = np.max(np.c_[(a+c)*(c+d), np.ones_like((a+c)*(c+d))], axis=-1)
third = c/N*np.log(1+(c*N)/den_third)
den_fourth = np.max(np.c_[(b+d)*(c+d), np.ones_like((b+d)*(c+d))], axis=-1)
fourth = d/N*np.log(1+(d*N)/den_fourth)
weight_factors.append(first + second + third + fourth)
return np.max(weight_factors, axis=0)
def igm(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
class_based_dfs = np.sort([cat[0] for cat in self.cross_tab], axis=0)[::-1]
n_classes = class_based_dfs.shape[0]
max_freq = np.max(class_based_dfs, axis=0)
igm = max_freq / np.sum(class_based_dfs.T @ np.arange(1, n_classes+1), axis=0)
return 1+ self.igm_lambda*igm
def pb(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
first = a / (np.max(np.c_[b, np.ones_like(b)], axis=1))
second = a / (np.max(np.c_[c, np.ones_like(c)], axis=1))
pb = np.log(1 + first*second)
weight_factors.append(pb)
return np.max(weight_factors, axis=0)
def idf_icf(self, X, y):
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
n_docs = X.shape[0]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_docs += int(self.smooth_idf)
idf = np.log(n_docs / df) + 1
n_classes = len(np.unique(y))
if self.cross_tab is None:
self.make_cross_tab(X, y)
# Number of classes where term t occures
class_factors = np.zeros(shape=(X.shape[1], )) #self.cross_tab[0][0]
for category in self.cross_tab:
a, b, c, d = category
a = a - int(self.smooth_idf)
class_factors += (a > 0)
icf = np.log((n_classes + int(self.smooth_idf))/(class_factors+int(self.smooth_idf))) + 1
self.icf_mean = np.mean(icf)
return idf*icf
def rf(self, X, y):
if self.cross_tab is None:
self.make_cross_tab(X, y)
weight_factors = []
for category in self.cross_tab:
a, b, c, d = category
rf = np.log(2 + a / (np.max(np.c_[c, np.ones_like(c)], axis=1)))
weight_factors.append(rf)
return np.max(weight_factors, axis=0)
def idf_icsdf(self, X, y):
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
n_docs = X.shape[0]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_docs += int(self.smooth_idf)
idf = np.log(n_docs / df) + 1
classes, counts = np.unique(y, return_counts=True)
n_classes = len(classes)
if self.cross_tab is None:
self.make_cross_tab(X, y)
class_factors = []
for i, category in enumerate(self.cross_tab):
a, b, c, d = category
a = a - int(self.smooth_idf)
D_cls = counts[i]
class_factors.append(a / D_cls)
n_classes += int(self.smooth_idf)
clf_sum = np.sum(class_factors, axis=0) # + int(self.smooth_idf)
icsdf = np.log(n_classes / clf_sum) + 1
self.icsdf_mean = np.mean(icsdf)
return idf * icsdf
def iadf(self, X, y):
D = X.shape[0]
n_terms = X.shape[1]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
mean_df = np.sum(df) / n_terms
adf = np.square(df - mean_df) / n_terms
return np.log((D + 1) / (adf + 1))
def iadf_norm(self, X, y):
D = X.shape[0]
n_terms = X.shape[1]
df = self._document_frequency(X)
df = df.astype(self.dtype, **_astype_copy_false(df))
mean_df = np.sum(df) / n_terms
adf = np.square(df - mean_df) / n_terms
adf_1 = np.log(1/(adf+1)) + 1
adf_2 = (adf_1 - np.min(adf_1)) / (np.max(adf_1) - np.min(adf_1))
return np.log((D+1) / (adf_2 + 1)) | [
"scipy.sparse.isspmatrix_csr",
"numpy.mean",
"numpy.ones_like",
"numpy.unique",
"numpy.where",
"numpy.sort",
"numpy.log",
"numpy.diff",
"numpy.min",
"numpy.max",
"numpy.square",
"numpy.sum",
"numpy.zeros",
"sklearn.utils.fixes._astype_copy_false",
"numpy.bincount",
"numpy.arange"
] | [((1634, 1654), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['X'], {}), '(X)\n', (1651, 1654), True, 'import scipy.sparse as sp\n'), ((2223, 2255), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (2232, 2255), True, 'import numpy as np\n'), ((3946, 3976), 'numpy.sum', 'np.sum', (['weight_factors'], {'axis': '(0)'}), '(weight_factors, axis=0)\n', (3952, 3976), True, 'import numpy as np\n'), ((5472, 5502), 'numpy.max', 'np.max', (['weight_factors'], {'axis': '(0)'}), '(weight_factors, axis=0)\n', (5478, 5502), True, 'import numpy as np\n'), ((5752, 5783), 'numpy.max', 'np.max', (['class_based_dfs'], {'axis': '(0)'}), '(class_based_dfs, axis=0)\n', (5758, 5783), True, 'import numpy as np\n'), ((6343, 6373), 'numpy.max', 'np.max', (['weight_factors'], {'axis': '(0)'}), '(weight_factors, axis=0)\n', (6349, 6373), True, 'import numpy as np\n'), ((6981, 7010), 'numpy.zeros', 'np.zeros', ([], {'shape': '(X.shape[1],)'}), '(shape=(X.shape[1],))\n', (6989, 7010), True, 'import numpy as np\n'), ((7317, 7329), 'numpy.mean', 'np.mean', (['icf'], {}), '(icf)\n', (7324, 7329), True, 'import numpy as np\n'), ((7684, 7714), 'numpy.max', 'np.max', (['weight_factors'], {'axis': '(0)'}), '(weight_factors, axis=0)\n', (7690, 7714), True, 'import numpy as np\n'), ((8165, 8197), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (8174, 8197), True, 'import numpy as np\n'), ((8596, 8625), 'numpy.sum', 'np.sum', (['class_factors'], {'axis': '(0)'}), '(class_factors, axis=0)\n', (8602, 8625), True, 'import numpy as np\n'), ((8725, 8739), 'numpy.mean', 'np.mean', (['icsdf'], {}), '(icsdf)\n', (8732, 8739), True, 'import numpy as np\n'), ((9051, 9078), 'numpy.log', 'np.log', (['((D + 1) / (adf + 1))'], {}), '((D + 1) / (adf + 1))\n', (9057, 9078), True, 'import numpy as np\n'), ((9480, 9509), 'numpy.log', 'np.log', (['((D + 1) / (adf_2 + 1))'], {}), '((D + 1) / (adf_2 + 1))\n', (9486, 9509), True, 'import numpy as np\n'), ((1675, 1719), 'numpy.bincount', 'np.bincount', (['X.indices'], {'minlength': 'X.shape[1]'}), '(X.indices, minlength=X.shape[1])\n', (1686, 1719), True, 'import numpy as np\n'), ((1753, 1770), 'numpy.diff', 'np.diff', (['X.indptr'], {}), '(X.indptr)\n', (1760, 1770), True, 'import numpy as np\n'), ((3354, 3373), 'numpy.log', 'np.log', (['(n_docs / df)'], {}), '(n_docs / df)\n', (3360, 3373), True, 'import numpy as np\n'), ((4250, 4274), 'numpy.square', 'np.square', (['(a * d - b * c)'], {}), '(a * d - b * c)\n', (4259, 4274), True, 'import numpy as np\n'), ((4488, 4518), 'numpy.max', 'np.max', (['weight_factors'], {'axis': '(0)'}), '(weight_factors, axis=0)\n', (4494, 4518), True, 'import numpy as np\n'), ((5630, 5681), 'numpy.sort', 'np.sort', (['[cat[0] for cat in self.cross_tab]'], {'axis': '(0)'}), '([cat[0] for cat in self.cross_tab], axis=0)\n', (5637, 5681), True, 'import numpy as np\n'), ((6265, 6291), 'numpy.log', 'np.log', (['(1 + first * second)'], {}), '(1 + first * second)\n', (6271, 6291), True, 'import numpy as np\n'), ((6771, 6790), 'numpy.log', 'np.log', (['(n_docs / df)'], {}), '(n_docs / df)\n', (6777, 6790), True, 'import numpy as np\n'), ((6820, 6832), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6829, 6832), True, 'import numpy as np\n'), ((8114, 8133), 'numpy.log', 'np.log', (['(n_docs / df)'], {}), '(n_docs / df)\n', (8120, 8133), True, 'import numpy as np\n'), ((8667, 8694), 'numpy.log', 'np.log', (['(n_classes / clf_sum)'], {}), '(n_classes / clf_sum)\n', (8673, 8694), True, 'import numpy as np\n'), ((8967, 8977), 'numpy.sum', 'np.sum', (['df'], {}), '(df)\n', (8973, 8977), True, 'import numpy as np\n'), ((9002, 9025), 'numpy.square', 'np.square', (['(df - mean_df)'], {}), '(df - mean_df)\n', (9011, 9025), True, 'import numpy as np\n'), ((9284, 9294), 'numpy.sum', 'np.sum', (['df'], {}), '(df)\n', (9290, 9294), True, 'import numpy as np\n'), ((9319, 9342), 'numpy.square', 'np.square', (['(df - mean_df)'], {}), '(df - mean_df)\n', (9328, 9342), True, 'import numpy as np\n'), ((9369, 9390), 'numpy.log', 'np.log', (['(1 / (adf + 1))'], {}), '(1 / (adf + 1))\n', (9375, 9390), True, 'import numpy as np\n'), ((3197, 3219), 'sklearn.utils.fixes._astype_copy_false', '_astype_copy_false', (['df'], {}), '(df)\n', (3215, 3219), False, 'from sklearn.utils.fixes import _astype_copy_false\n'), ((4943, 4972), 'numpy.log', 'np.log', (['(1 + a * N / den_first)'], {}), '(1 + a * N / den_first)\n', (4949, 4972), True, 'import numpy as np\n'), ((5083, 5113), 'numpy.log', 'np.log', (['(1 + b * N / den_second)'], {}), '(1 + b * N / den_second)\n', (5089, 5113), True, 'import numpy as np\n'), ((5222, 5251), 'numpy.log', 'np.log', (['(1 + c * N / den_third)'], {}), '(1 + c * N / den_third)\n', (5228, 5251), True, 'import numpy as np\n'), ((5362, 5392), 'numpy.log', 'np.log', (['(1 + d * N / den_fourth)'], {}), '(1 + d * N / den_fourth)\n', (5368, 5392), True, 'import numpy as np\n'), ((6614, 6636), 'sklearn.utils.fixes._astype_copy_false', '_astype_copy_false', (['df'], {}), '(df)\n', (6632, 6636), False, 'from sklearn.utils.fixes import _astype_copy_false\n'), ((7957, 7979), 'sklearn.utils.fixes._astype_copy_false', '_astype_copy_false', (['df'], {}), '(df)\n', (7975, 7979), False, 'from sklearn.utils.fixes import _astype_copy_false\n'), ((8925, 8947), 'sklearn.utils.fixes._astype_copy_false', '_astype_copy_false', (['df'], {}), '(df)\n', (8943, 8947), False, 'from sklearn.utils.fixes import _astype_copy_false\n'), ((9242, 9264), 'sklearn.utils.fixes._astype_copy_false', '_astype_copy_false', (['df'], {}), '(df)\n', (9260, 9264), False, 'from sklearn.utils.fixes import _astype_copy_false\n'), ((9416, 9429), 'numpy.min', 'np.min', (['adf_1'], {}), '(adf_1)\n', (9422, 9429), True, 'import numpy as np\n'), ((9434, 9447), 'numpy.max', 'np.max', (['adf_1'], {}), '(adf_1)\n', (9440, 9447), True, 'import numpy as np\n'), ((9450, 9463), 'numpy.min', 'np.min', (['adf_1'], {}), '(adf_1)\n', (9456, 9463), True, 'import numpy as np\n'), ((5836, 5863), 'numpy.arange', 'np.arange', (['(1)', '(n_classes + 1)'], {}), '(1, n_classes + 1)\n', (5845, 5863), True, 'import numpy as np\n'), ((2325, 2343), 'numpy.where', 'np.where', (['(y == cls)'], {}), '(y == cls)\n', (2333, 2343), True, 'import numpy as np\n'), ((2384, 2402), 'numpy.where', 'np.where', (['(y != cls)'], {}), '(y != cls)\n', (2392, 2402), True, 'import numpy as np\n'), ((3706, 3725), 'numpy.ones_like', 'np.ones_like', (['(a + b)'], {}), '(a + b)\n', (3718, 3725), True, 'import numpy as np\n'), ((3782, 3801), 'numpy.ones_like', 'np.ones_like', (['(c + d)'], {}), '(c + d)\n', (3794, 3801), True, 'import numpy as np\n'), ((4373, 4398), 'numpy.ones_like', 'np.ones_like', (['denominator'], {}), '(denominator)\n', (4385, 4398), True, 'import numpy as np\n'), ((4882, 4913), 'numpy.ones_like', 'np.ones_like', (['((a + c) * (a + b))'], {}), '((a + c) * (a + b))\n', (4894, 4913), True, 'import numpy as np\n'), ((5021, 5052), 'numpy.ones_like', 'np.ones_like', (['((b + d) * (a + b))'], {}), '((b + d) * (a + b))\n', (5033, 5052), True, 'import numpy as np\n'), ((5161, 5192), 'numpy.ones_like', 'np.ones_like', (['((a + c) * (c + d))'], {}), '((a + c) * (c + d))\n', (5173, 5192), True, 'import numpy as np\n'), ((5300, 5331), 'numpy.ones_like', 'np.ones_like', (['((b + d) * (c + d))'], {}), '((b + d) * (c + d))\n', (5312, 5331), True, 'import numpy as np\n'), ((3629, 3648), 'numpy.ones_like', 'np.ones_like', (['(a + c)'], {}), '(a + c)\n', (3641, 3648), True, 'import numpy as np\n'), ((6152, 6167), 'numpy.ones_like', 'np.ones_like', (['b'], {}), '(b)\n', (6164, 6167), True, 'import numpy as np\n'), ((6221, 6236), 'numpy.ones_like', 'np.ones_like', (['c'], {}), '(c)\n', (6233, 6236), True, 'import numpy as np\n'), ((7603, 7618), 'numpy.ones_like', 'np.ones_like', (['c'], {}), '(c)\n', (7615, 7618), True, 'import numpy as np\n')] |
import torch
from torch import Tensor
from torch.nn.functional import softmax
from src.helper_functions import predict
from typing import Dict, List
from collections import defaultdict
import numpy as np
def calculate_suff(attrs):
pass
def calculate_comp(
attr: Tensor,
k: float,
replacement_emb: Tensor,
model,
input_emb: Tensor,
attention_mask: Tensor,
prediction: Tensor,
) -> float:
"""
Comprehensiveness scoring of an attribution. Higher is better.
:param attr: Attribution scores for one sentence
:param k: top-k value (how many embeddings are replaced)
:param replacement_emb: embedding for one word that should be used as replacement
:param input_emb: Embedding of the sentence for which the attribution was computed
:param attention_mask: Original attention mask for the sentence
:param prediction: what model outputs for the input
"""
# get logits of masked prediction:
replaced_embed = replace_k_percent(attr, k, replacement_emb, input_emb)
new_pred = predict(model, replaced_embed, attention_mask)
# convert logits of (original) prediction and new_prediction to probabilities:
new_pred = softmax(new_pred, dim=1)
prediction = softmax(prediction, dim=1)
pred_i = torch.argmax(prediction).item()
return (torch.max(prediction) - new_pred[0, pred_i]).item()
def calculate_log_odds(
attr: Tensor,
k: float,
replacement_emb: Tensor,
model,
input_emb: Tensor,
attention_mask: Tensor,
prediction: Tensor,
) -> float:
"""
Log-odds scoring of an attribution
:param attr: Attribution scores for one sentence
:param k: top-k value (how many embeddings are replaced)
:param replacement_emb: embedding for one word that should be used as replacement
:param input_emb: Embedding of the sentence for which the attribution was computed
:param attention_mask: Original attention mask for the sentence
:param prediction: what model outputs for the input
"""
# get logits of masked prediction:
replaced_embed = replace_k_percent(attr, k, replacement_emb, input_emb)
new_pred = predict(model, replaced_embed, attention_mask)
# convert logits of (original) prediction and new_prediction to probabilities:
new_pred = softmax(new_pred, dim=1)
prediction = softmax(prediction, dim=1)
pred_i = torch.argmax(prediction).item()
return torch.log(new_pred[0, pred_i] / torch.max(prediction)).item()
def replace_k_percent(attr: Tensor, k: float, replacement_emb: Tensor, input_emb: Tensor) -> Tensor:
"""
Given a sentence embedding (without padding tokens at the end) and an attribution scoring over the tokens,
replace the top-k embeddings with the replacement embedding.
:param attr: Attribution scores for one sentence
:param k: top-k value (how many embeddings are replaced)
:param replacement_emb: embedding for one word that should be used as replacement
:param input_emb: Embedding of the sentence for which the attribution was computed
"""
assert attr.dim() == 1, "Attribution should be for just one sentence (without padding!)"
assert (
attr.shape[0] == input_emb.shape[1]
), "Attribution should contain an equal number of tokens as the input embedding!"
replaced_embed = input_emb.clone()
num_replace = round(
(attr.shape[0] - 2) * k
) # minus 2, since cls and sep token should not count to the number of tokens for the ablation
# slice attrs to not choose cls and sep embedding
indices_replace = torch.topk(attr[1:-1], num_replace).indices + 1
assert 0 not in indices_replace, "Should not replace CLS embedding"
assert attr.shape[0] - 1 not in indices_replace, "Should not replace SEP embedding"
replaced_embed[0, indices_replace] = replacement_emb
return replaced_embed
def get_avg_scores(scores: Dict[str, Dict[float, List[float]]]) -> Dict[str, Dict[float, float]]:
"""
Get average scores from a list of scores
"""
avg_comps: Dict[str, Dict[float, float]] = {
baseline_str: defaultdict(float) for baseline_str in scores.keys()
}
for baseline_str in scores.keys():
for k in scores[baseline_str].keys():
avg_comps[baseline_str][k] = np.mean(scores[baseline_str][k])
return avg_comps
| [
"numpy.mean",
"torch.topk",
"torch.max",
"src.helper_functions.predict",
"collections.defaultdict",
"torch.nn.functional.softmax",
"torch.argmax"
] | [((1048, 1094), 'src.helper_functions.predict', 'predict', (['model', 'replaced_embed', 'attention_mask'], {}), '(model, replaced_embed, attention_mask)\n', (1055, 1094), False, 'from src.helper_functions import predict\n'), ((1194, 1218), 'torch.nn.functional.softmax', 'softmax', (['new_pred'], {'dim': '(1)'}), '(new_pred, dim=1)\n', (1201, 1218), False, 'from torch.nn.functional import softmax\n'), ((1236, 1262), 'torch.nn.functional.softmax', 'softmax', (['prediction'], {'dim': '(1)'}), '(prediction, dim=1)\n', (1243, 1262), False, 'from torch.nn.functional import softmax\n'), ((2155, 2201), 'src.helper_functions.predict', 'predict', (['model', 'replaced_embed', 'attention_mask'], {}), '(model, replaced_embed, attention_mask)\n', (2162, 2201), False, 'from src.helper_functions import predict\n'), ((2301, 2325), 'torch.nn.functional.softmax', 'softmax', (['new_pred'], {'dim': '(1)'}), '(new_pred, dim=1)\n', (2308, 2325), False, 'from torch.nn.functional import softmax\n'), ((2343, 2369), 'torch.nn.functional.softmax', 'softmax', (['prediction'], {'dim': '(1)'}), '(prediction, dim=1)\n', (2350, 2369), False, 'from torch.nn.functional import softmax\n'), ((4104, 4122), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (4115, 4122), False, 'from collections import defaultdict\n'), ((1277, 1301), 'torch.argmax', 'torch.argmax', (['prediction'], {}), '(prediction)\n', (1289, 1301), False, 'import torch\n'), ((2384, 2408), 'torch.argmax', 'torch.argmax', (['prediction'], {}), '(prediction)\n', (2396, 2408), False, 'import torch\n'), ((3580, 3615), 'torch.topk', 'torch.topk', (['attr[1:-1]', 'num_replace'], {}), '(attr[1:-1], num_replace)\n', (3590, 3615), False, 'import torch\n'), ((4289, 4321), 'numpy.mean', 'np.mean', (['scores[baseline_str][k]'], {}), '(scores[baseline_str][k])\n', (4296, 4321), True, 'import numpy as np\n'), ((1321, 1342), 'torch.max', 'torch.max', (['prediction'], {}), '(prediction)\n', (1330, 1342), False, 'import torch\n'), ((2459, 2480), 'torch.max', 'torch.max', (['prediction'], {}), '(prediction)\n', (2468, 2480), False, 'import torch\n')] |
# author: <NAME>
from p5 import *
import sympy as sym
import mpmath as mp
import numpy as np
from tkinter import Tk
from scipy.spatial import distance
import PIL
from PIL import Image
import argparse
import os
import csv
import mimetypes
DEBUG = False
parser = argparse.ArgumentParser(
description='Custom frame annotator implemented in p5 and python.')
parser.add_argument('--input', dest='input',
help='Path to the directory with the input images', required=False, type=str, default='input/'),
parser.add_argument('--output', dest='output',
help='Path to the directory with the output images', required=False, type=str, default='output/'),
parser.add_argument('--cache', dest='cache',
help='Path to the cache directory (DON\'T INCLUDE \\)', required=False, type=str, default='cache'),
parser.add_argument('--scale', dest='scale',
help='scaling factor for viewing images', required=False, type=float, default=0.3),
root = Tk()
width = root.winfo_screenwidth()
height = root.winfo_screenheight()
window_offset = 200
image_width = width - window_offset
image_height = (height/width) * image_width
args = parser.parse_args()
input_dir = args.input
output_dir = args.output
cache_dir = args.cache
dirs = []
images = []
img_size = []
index = 0
points = []
c_points = []
lines = []
rectangles = []
p_colors = []
l_colors = []
last_action = 'script started'
std_color = Color(255, 255, 255) # white
a_color = Color(255, 0, 0) # azure
b_color = Color(0, 255, 0) # rose
c_color = Color(0, 0, 255) # pastel orange
def validate_dirs():
global DEBUG, input_dir, output_dir, cache_dir
dir_list = [input_dir, output_dir, cache_dir]
for directory in dir_list:
if not os.path.exists(directory):
os.makedirs(directory)
if DEBUG:
print('[validate_dirs] Validated Directories')
def load():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
validate_dirs()
load_images_from_folder(input_dir)
rectangles = load_bbox_from_file()
last_action = 'loaded images'
def setup():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
size(width - window_offset, image_height)
title('Light-notator')
last_action = 'setup window'
no_loop()
rect_mode(mode='CENTER')
def check_index():
global index
if index > len(images) - 1:
index = 0
if index < 0:
index = len(images) - 1
def draw():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
background(255)
check_index()
image(images[index], (0, 0), (image_width, image_height))
text(f'index: {index}', (5, 5))
text(f'current image: ({dirs[index]})', (5, 15))
text(f'# points: {len(points)}', (5, 25))
text(f'last action: ({last_action})', (5, 35))
for m_rectangle in rectangles:
no_fill()
stroke_weight(2)
stroke(117, 255, 117)
x_translate = floor(m_rectangle[0] * img_size[index][0])
y_translate = floor(m_rectangle[1] * img_size[index][1])
rect_width = floor(m_rectangle[2] * img_size[index][0])
rect_height = floor(m_rectangle[3] * img_size[index][1])
translate(x_translate, y_translate)
rotate(m_rectangle[4])
rect((0, 0), rect_width, rect_height)
rotate(-1 * m_rectangle[4])
translate(-1 * x_translate, -1 * y_translate)
color_index = 0
for m_point in points:
fill(p_colors[color_index])
stroke_weight(1)
stroke(41)
ellipse((m_point[0], m_point[1]), 5, 5)
color_index += 1
color_index = 0
for m_line in lines:
fill(l_colors[color_index])
line(m_line[0], m_line[1])
color_index += 1
fill(std_color)
def mouse_pressed():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if DEBUG:
print(f'mouse pressed at ({mouse_x},{mouse_y})')
add_point(mouse_x, mouse_y, std_color)
constrain_square()
redraw()
def key_pressed():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if ((key == 'R') or (key == 'r')):
remove_point()
if ((key == 'c') or (key == 'C')):
points = []
lines = []
rectangles = []
p_colors = []
l_colors = []
last_action = 'cleared all points'
if (key == 'd'):
redraw()
if (key == "2"):
last_action = 'moved to next frame'
write_bbox_to_file()
index += 1
check_index()
rectangles = load_bbox_from_file()
if (key == "1"):
last_action = 'moved to previous frame'
write_bbox_to_file()
index -= 1
check_index()
rectangles = load_bbox_from_file()
redraw()
def load_images_from_folder(folder):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
for filename in os.listdir(folder):
img_dir = os.path.join(folder, filename)
file_type = str(mimetypes.guess_type(img_dir)[0])[0:5]
if file_type == 'image':
temp_img = Image.open(img_dir)
wsize = int((float(temp_img.size[0]) * float(args.scale)))
hsize = int((float(temp_img.size[1]) * float(args.scale)))
temp_img = temp_img.resize((wsize, hsize), PIL.Image.ANTIALIAS)
new_dir = os.path.join(args.cache, filename)
temp_img.save(f'{new_dir}')
img_size.append((image_width, image_height))
dirs.append(new_dir)
images.append(load_image(new_dir))
dirs, images, img_size = (list(t)
for t in zip(*sorted(zip(dirs, images, img_size))))
def add_point(in_x, in_y, color):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if in_x <= image_width and in_y <= image_height:
points.append((in_x, in_y))
p_colors.append(color)
last_action = 'added point'
def add_line(temp_point_0, temp_point_1, color):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
lines.append((temp_point_0, temp_point_1))
l_colors.append(Color(0, 0, 0))
def constrain_square():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if len(points) == 3:
dist = []
pairs = []
for pointA in points:
for pointB in points:
dist.append(abs(distance.euclidean(pointA, pointB)))
pairs.append((pointA, pointB))
for point in points:
# arbitrarily define temporary points in order to find pointC
if not ((point == pairs[dist.index(max(dist))][0]) or (point == pairs[dist.index(max(dist))][1])):
pointC = point
hypot = max(dist)
temp_distance_0 = abs(distance.euclidean(
pointC, pairs[dist.index(max(dist))][0]))
temp_distance_1 = abs(distance.euclidean(
pointC, pairs[dist.index(max(dist))][1]))
if (temp_distance_0 > temp_distance_1):
pointA = pairs[dist.index(max(dist))][0]
pointB = pairs[dist.index(max(dist))][1]
angle_flip = False
else:
pointA = pairs[dist.index(max(dist))][1]
pointB = pairs[dist.index(max(dist))][0]
angle_flip = True
if DEBUG:
p_colors[points.index(pointA)] = a_color
p_colors[points.index(pointB)] = b_color
p_colors[points.index(pointC)] = c_color
leg1 = abs(distance.euclidean(pointC, pointA))
hypot = abs(distance.euclidean(pointB, pointA))
leg1_vector = (pointC[0] - pointA[0], pointC[1] - pointA[1])
hypot_vector = (pointB[0] - pointA[0], pointB[1] - pointA[1])
if DEBUG:
add_line(pointA, pointB, std_color)
print(
f'leg vector is {leg1_vector} and hyp_vector is {hypot_vector}')
print(
f'pointA is {pointA} and pointB is {pointB} and pointC is {pointC}')
theta = sym.acos(
(leg1_vector[0]*hypot_vector[0]+leg1_vector[1]*hypot_vector[1])/(leg1*hypot))
std_unit_vector = (1, 0)
theta_prime = sym.acos((leg1_vector[0]*std_unit_vector[0] +
leg1_vector[1]*std_unit_vector[1])/(leg1))
leg2 = leg1 * mp.tan(theta)
increment = (leg2 * mp.sin(theta_prime),
leg2 * mp.cos(theta_prime))
temp_b_check = pointB[0] > pointA[0]
if pointC[1] > pointA[1]:
increment = (-1 * increment[0], increment[1])
if not (temp_b_check == (float(pointC[0] + increment[0]) > pointA[0])):
increment = (-1 * increment[0], -1 * increment[1])
third_point = (float(pointC[0] + increment[0]),
float(pointC[1] + increment[1]))
points[points.index(pointB)] = third_point
pointB = third_point
pointD = (float(pointA[0] + increment[0]),
float(pointA[1] + increment[1]))
add_point(pointD[0], pointD[1], std_color)
validate_constraint()
angle_factor = -1
rectangle_tilt = get_angle([pointC[0], pointC[1]], [pointA[0], pointA[1]], [
pointA[0] + 20, pointA[1]])
if DEBUG:
print(f'rectangle tilt is: {180 * rectangle_tilt / mp.pi}')
rectangle_tilt *= angle_factor
if DEBUG:
print(f'shifted rectangle tilt is: {180 * rectangle_tilt / mp.pi}')
rectangle_width = abs(distance.euclidean(pointC, pointA))
rectangle_height = abs(distance.euclidean(pointD, pointA))
averageX = 0
averageY = 0
for point in points:
averageX += point[0]
averageY += point[1]
averageX /= len(points)
averageY /= len(points)
add_rectangle(averageX, averageY, rectangle_width,
rectangle_height, rectangle_tilt)
points = []
else:
last_action = 'constrain_square failed: not enough points'
lines = []
def add_rectangle(in_x, in_y, rectangle_width, rectangle_height, rectangle_tilt):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
x_relative = in_x/img_size[index][0]
y_relative = in_y/img_size[index][1]
rect_width_relative = rectangle_width/img_size[index][0]
rect_height_relative = rectangle_height/img_size[index][1]
rectangles.append((x_relative, y_relative, rect_width_relative,
rect_height_relative, rectangle_tilt))
def validate_constraint():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
angles = []
for pointA in points:
for pointB in points:
if pointB == pointA:
continue
for pointC in points:
if pointC == pointA or pointC == pointB:
continue
angle = 180 * get_angle(pointA, pointB, pointC) / np.pi
if angle == 90 or (angle > 89.9 and angle < 90.1):
angles.append(angle)
if DEBUG:
print(f'validated constraints: corner angles are {angles[0:4]}')
def get_angle(pointA, pointB, pointC):
v1 = [pointA[0] - pointB[0], pointA[1] - pointB[1]]
v2 = [pointC[0] - pointB[0], pointC[1] - pointB[1]]
angle = np.arccos(
np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
if pointA[1] > pointC[1]:
angle *= -1
return angle
def remove_point():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
curr_pos = (mouse_x, mouse_y)
dist = []
for point in points:
dist.append(distance.euclidean(point, curr_pos))
points.pop(dist.index(min(dist)))
last_action = 'removed closest point'
constrain_square()
def load_bbox_from_file():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
file_dir = dirs[index].replace('cache', 'input')
file_dir = os.path.splitext(file_dir)[0]+'.csv'
if os.path.isfile(file_dir):
temp_rectangles = []
if DEBUG:
print('There are encoded annotations in corresponding text file.')
with open(file_dir) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if not (row == []):
temp_rectangles.append(
(float(row[0]), float(row[1]), float(row[2]), float(row[3]), float(row[4])))
return temp_rectangles
else:
if DEBUG:
print('There are no encoded annotations in corresponding text file.')
return []
def write_bbox_to_file():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
file_dir = dirs[index].replace('cache', 'input')
file_dir = os.path.splitext(file_dir)[0]+'.csv'
if os.path.isfile(file_dir):
os.remove(file_dir)
with open(file_dir, 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for m_rectangle in rectangles:
tmp_lst = [m_rectangle[0], m_rectangle[1],
m_rectangle[2], m_rectangle[3], m_rectangle[4]]
filewriter.writerow(tmp_lst)
if __name__ == '__main__':
load()
run()
| [
"numpy.linalg.norm",
"mimetypes.guess_type",
"os.remove",
"os.path.exists",
"os.listdir",
"sympy.acos",
"argparse.ArgumentParser",
"numpy.dot",
"csv.reader",
"csv.writer",
"os.path.splitext",
"os.path.isfile",
"mpmath.sin",
"mpmath.tan",
"PIL.Image.open",
"os.makedirs",
"os.path.join... | [((263, 359), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Custom frame annotator implemented in p5 and python."""'}), "(description=\n 'Custom frame annotator implemented in p5 and python.')\n", (286, 359), False, 'import argparse\n'), ((1011, 1015), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (1013, 1015), False, 'from tkinter import Tk\n'), ((5748, 5766), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (5758, 5766), False, 'import os\n'), ((13793, 13817), 'os.path.isfile', 'os.path.isfile', (['file_dir'], {}), '(file_dir)\n', (13807, 13817), False, 'import os\n'), ((14760, 14784), 'os.path.isfile', 'os.path.isfile', (['file_dir'], {}), '(file_dir)\n', (14774, 14784), False, 'import os\n'), ((5786, 5816), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (5798, 5816), False, 'import os\n'), ((9274, 9375), 'sympy.acos', 'sym.acos', (['((leg1_vector[0] * hypot_vector[0] + leg1_vector[1] * hypot_vector[1]) / (\n leg1 * hypot))'], {}), '((leg1_vector[0] * hypot_vector[0] + leg1_vector[1] * hypot_vector[\n 1]) / (leg1 * hypot))\n', (9282, 9375), True, 'import sympy as sym\n'), ((9430, 9526), 'sympy.acos', 'sym.acos', (['((leg1_vector[0] * std_unit_vector[0] + leg1_vector[1] * std_unit_vector[1]\n ) / leg1)'], {}), '((leg1_vector[0] * std_unit_vector[0] + leg1_vector[1] *\n std_unit_vector[1]) / leg1)\n', (9438, 9526), True, 'import sympy as sym\n'), ((14794, 14813), 'os.remove', 'os.remove', (['file_dir'], {}), '(file_dir)\n', (14803, 14813), False, 'import os\n'), ((14876, 14952), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (14886, 14952), False, 'import csv\n'), ((1774, 1799), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1788, 1799), False, 'import os\n'), ((1813, 1835), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1824, 1835), False, 'import os\n'), ((5936, 5955), 'PIL.Image.open', 'Image.open', (['img_dir'], {}), '(img_dir)\n', (5946, 5955), False, 'from PIL import Image\n'), ((6196, 6230), 'os.path.join', 'os.path.join', (['args.cache', 'filename'], {}), '(args.cache, filename)\n', (6208, 6230), False, 'import os\n'), ((8754, 8788), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['pointC', 'pointA'], {}), '(pointC, pointA)\n', (8772, 8788), False, 'from scipy.spatial import distance\n'), ((8811, 8845), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['pointB', 'pointA'], {}), '(pointB, pointA)\n', (8829, 8845), False, 'from scipy.spatial import distance\n'), ((9573, 9586), 'mpmath.tan', 'mp.tan', (['theta'], {}), '(theta)\n', (9579, 9586), True, 'import mpmath as mp\n'), ((10756, 10790), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['pointC', 'pointA'], {}), '(pointC, pointA)\n', (10774, 10790), False, 'from scipy.spatial import distance\n'), ((10823, 10857), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['pointD', 'pointA'], {}), '(pointD, pointA)\n', (10841, 10857), False, 'from scipy.spatial import distance\n'), ((12856, 12870), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (12862, 12870), True, 'import numpy as np\n'), ((13306, 13341), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['point', 'curr_pos'], {}), '(point, curr_pos)\n', (13324, 13341), False, 'from scipy.spatial import distance\n'), ((13749, 13775), 'os.path.splitext', 'os.path.splitext', (['file_dir'], {}), '(file_dir)\n', (13765, 13775), False, 'import os\n'), ((14007, 14041), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (14017, 14041), False, 'import csv\n'), ((14716, 14742), 'os.path.splitext', 'os.path.splitext', (['file_dir'], {}), '(file_dir)\n', (14732, 14742), False, 'import os\n'), ((9616, 9635), 'mpmath.sin', 'mp.sin', (['theta_prime'], {}), '(theta_prime)\n', (9622, 9635), True, 'import mpmath as mp\n'), ((9665, 9684), 'mpmath.cos', 'mp.cos', (['theta_prime'], {}), '(theta_prime)\n', (9671, 9684), True, 'import mpmath as mp\n'), ((12874, 12892), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (12888, 12892), True, 'import numpy as np\n'), ((12895, 12913), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (12909, 12913), True, 'import numpy as np\n'), ((5841, 5870), 'mimetypes.guess_type', 'mimetypes.guess_type', (['img_dir'], {}), '(img_dir)\n', (5861, 5870), False, 'import mimetypes\n'), ((7657, 7691), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['pointA', 'pointB'], {}), '(pointA, pointB)\n', (7675, 7691), False, 'from scipy.spatial import distance\n')] |
import numpy as np
from .utils import *
def dp_value_iteration(env, gamma, tol=1e-3, iter_max=100):
"""Determines optimal policy by performing value iteration
via Bellman optimality equation
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 83
Args:
env: Environment
gamma: Discount factor
tol: Tolerance to stop iteration
iter_max: Maximum iteration count
Returns
pi: Optimal policy
v: Value function of policy
"""
v = np.zeros(env.observation_space.n)
for i_iter in range(iter_max):
print("\r> DP Value iteration: Iteration {}/{}".format(
i_iter+1, iter_max), end="")
delta = 0
for state in range(env.observation_space.n):
# Determine action-value function for state
q = np.zeros(env.action_space.n)
for action in range(env.action_space.n):
for (prob,state2,reward,done) in env.P[state][action]:
q[action] += prob*(reward + gamma*v[state2])
# Set state-value function to maximum of action-value function
delta = max(delta, np.abs(np.max(q)-v[state]))
v[state] = np.max(q)
if delta < tol:
break
print()
pi = np.array([utils.dp_greedy_policy(env, v, state, gamma)
for state in range(env.observation_space.n)])
return pi,v
| [
"numpy.zeros",
"numpy.max"
] | [((508, 541), 'numpy.zeros', 'np.zeros', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (516, 541), True, 'import numpy as np\n'), ((827, 855), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (835, 855), True, 'import numpy as np\n'), ((1203, 1212), 'numpy.max', 'np.max', (['q'], {}), '(q)\n', (1209, 1212), True, 'import numpy as np\n'), ((1159, 1168), 'numpy.max', 'np.max', (['q'], {}), '(q)\n', (1165, 1168), True, 'import numpy as np\n')] |
import os
import json
import numpy as np
from ..default import API_SCHEMA_FILE
from .. import ErsiliaBase
class ApiSchema(ErsiliaBase):
def __init__(self, model_id, config_json):
ErsiliaBase.__init__(self, config_json=config_json)
self.model_id = model_id
self.schema_file = os.path.join(
self._model_path(self.model_id), API_SCHEMA_FILE
)
self.logger.debug("Schema available in {0}".format(self.schema_file))
def _features(self, o):
if o["meta"] is not None:
return o["meta"]
if o["type"] == "array":
shape = o["shape"]
else:
return None
assert len(shape) == 1 # TODO: work with arbitrary shape arrays/tensors
n = shape[0]
chars = len(str(n))
names = []
for i in range(n):
i = str(i).zfill(chars)
names += ["f{0}".format(i)]
return names
def isfile(self):
return os.path.isfile(self.schema_file)
def get(self):
with open(self.schema_file) as f:
data = json.load(f)
for api, sc in data.items():
for k, o in sc["output"].items():
data[api]["output"][k]["meta"] = self._features(o)
return data
@property
def schema(self):
return self.get()
def get_schema_by_api(self, api_name):
return self.schema[api_name]
def get_output_by_api(self, api_name):
return self.schema[api_name]["output"]
def is_h5_serializable(self, api_name):
schema = self.get_output_by_api(api_name)
for k, v in schema.items():
if v["type"] != "numeric" and v["type"] != "array": # TODO generalize
return False
return True
def get_meta_by_api(self, api_name):
sc = self.schema[api_name]["output"]
meta = {}
for k, v in sc.items():
meta[k] = v["meta"]
return meta
def get_meta(self):
sc = self.schema
meta = {}
for api, _ in sc.items():
meta_ = self.get_meta_by_api(api)
meta[api] = meta_
return meta
def get_apis(self):
return sorted(self.schema.keys())
def empty_by_field(self, field):
if field["type"] == "array":
shape = tuple(field["shape"])
return np.full(shape, None).tolist()
return None
def empty_input_by_api(self, api_name):
sc = self.schema[api_name]["input"]
d = {}
for k, v in sc.items():
d[k] = self.empty_by_field(v)
return d
def empty_output_by_api(self, api_name):
sc = self.schema[api_name]["output"]
d = {}
for k, v in sc.items():
d[k] = self.empty_by_field(v)
return d
def empty_by_api(self, api_name):
return {
"input": self.empty_input_by_api(api_name),
"output": self.empty_output_by_api(api_name),
}
def empty(self):
d = {}
for api_name in self.get_apis():
d[api_name] = self.empty_by_api(api_name)
return d
| [
"os.path.isfile",
"json.load",
"numpy.full"
] | [((974, 1006), 'os.path.isfile', 'os.path.isfile', (['self.schema_file'], {}), '(self.schema_file)\n', (988, 1006), False, 'import os\n'), ((1088, 1100), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1097, 1100), False, 'import json\n'), ((2359, 2379), 'numpy.full', 'np.full', (['shape', 'None'], {}), '(shape, None)\n', (2366, 2379), True, 'import numpy as np\n')] |
import numpy as np
import scipy.spatial # type: ignore
def vector_equal(v1, v2):
return v1.shape == v2.shape and np.allclose(
v1, v2, rtol=1e-12, atol=1e-12, equal_nan=False
)
def distance_point_point(p1, p2):
"""Calculates the euclidian distance between two points or sets of points
>>> distance_point_point(np.array([1, 0]), np.array([0, 1]))
1.4142135623730951
>>> distance_point_point(np.array([[1, 1], [0, 0]]), np.array([0, 1]))
array([1., 1.])
>>> distance_point_point(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, -3]]))
array([1., 3.])
"""
return scipy.spatial.minkowski_distance(p1, p2)
def distance_plane_point(plane_point, plane_normal, point):
"""Calculates the signed distance from a plane to one or more points
>>> distance_plane_point(np.array([0, 0, 1]), np.array([0, 0, 1]), np.array([2, 2, 2]))
1
>>> distance_plane_point(np.array([0, 0, 1]), np.array([0, 0, 1]), np.array([[2, 2, 2], [2, 2, 3]]))
array([1, 2])
"""
assert np.allclose(
np.linalg.norm(plane_normal), 1.0, rtol=1e-12, atol=1e-12, equal_nan=False
)
return np.dot(point - plane_point, plane_normal)
def distance_line_point(line_point, line_direction, point):
"""Calculates the distance from a line to a point
>>> distance_line_point(np.array([0, 0, 1]), np.array([0, 0, 1]), np.array([1, 1, 2]))
1.4142135623730951
>>> distance_line_point(np.array([0, 0, 1]), np.array([0, 0, 1]), np.array([[1, 0, 1], [0, 2, 3]]))
array([1., 2.])
"""
assert np.allclose(
np.linalg.norm(line_direction), 1.0, rtol=1e-12, atol=1e-12, equal_nan=False
)
delta_p = point - line_point
return distance_point_point(
delta_p,
np.matmul(
np.expand_dims(np.dot(delta_p, line_direction), axis=-1),
np.atleast_2d(line_direction),
),
)
| [
"numpy.atleast_2d",
"numpy.dot",
"numpy.allclose",
"numpy.linalg.norm"
] | [((1146, 1187), 'numpy.dot', 'np.dot', (['(point - plane_point)', 'plane_normal'], {}), '(point - plane_point, plane_normal)\n', (1152, 1187), True, 'import numpy as np\n'), ((120, 180), 'numpy.allclose', 'np.allclose', (['v1', 'v2'], {'rtol': '(1e-12)', 'atol': '(1e-12)', 'equal_nan': '(False)'}), '(v1, v2, rtol=1e-12, atol=1e-12, equal_nan=False)\n', (131, 180), True, 'import numpy as np\n'), ((1054, 1082), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (1068, 1082), True, 'import numpy as np\n'), ((1582, 1612), 'numpy.linalg.norm', 'np.linalg.norm', (['line_direction'], {}), '(line_direction)\n', (1596, 1612), True, 'import numpy as np\n'), ((1849, 1878), 'numpy.atleast_2d', 'np.atleast_2d', (['line_direction'], {}), '(line_direction)\n', (1862, 1878), True, 'import numpy as np\n'), ((1794, 1825), 'numpy.dot', 'np.dot', (['delta_p', 'line_direction'], {}), '(delta_p, line_direction)\n', (1800, 1825), True, 'import numpy as np\n')] |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from py_muvr import FeatureSelector
from py_muvr.data_structures import (
FeatureEvaluationResults,
FeatureRanks,
InputDataset,
OuterLoopResults,
)
ASSETS_DIR = Path(__file__).parent / "assets"
@pytest.fixture(scope="session")
def raw_results():
return [
[
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=4,
model="model",
ranks=FeatureRanks(features=[0, 1], ranks=[1, 2], n_feats=10),
),
max_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(
features=[0, 1, 2, 3], ranks=[1, 2, 4, 3], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(features=[0, 1, 3], ranks=[1, 2, 3], n_feats=10),
),
n_features_to_score_map={5: 4, 4: 3, 3: 3, 2: 3},
),
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=3,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10
),
),
max_eval=FeatureEvaluationResults(
test_score=3,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10
),
),
n_features_to_score_map={5: 5, 4: 4, 3: 5, 2: 5},
),
],
[
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=4,
model="model",
ranks=FeatureRanks(features=[0, 1], ranks=[1, 2], n_feats=10),
),
max_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(
features=[0, 1, 4, 2], ranks=[1, 2, 3, 4], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=5,
model="model",
ranks=FeatureRanks(features=[0, 1, 4], ranks=[2, 1, 3], n_feats=10),
),
n_features_to_score_map={5: 5, 4: 3, 3: 5, 2: 3},
),
OuterLoopResults(
min_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(features=[0, 1], ranks=[1, 2], n_feats=10),
),
max_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(
features=[0, 1, 2, 3, 4], ranks=[1, 2, 5, 4, 3], n_feats=10
),
),
mid_eval=FeatureEvaluationResults(
test_score=2,
model="model",
ranks=FeatureRanks(features=[0, 1, 4], ranks=[1, 2, 3], n_feats=10),
),
n_features_to_score_map={5: 5, 4: 6, 3: 5, 2: 5},
),
],
]
@pytest.fixture
def inner_loop_results():
return [
FeatureEvaluationResults(
ranks=FeatureRanks(features=[1, 2, 3, 4], ranks=[3, 2, 1, 4]),
test_score=0.2,
model="estimator",
),
FeatureEvaluationResults(
ranks=FeatureRanks(features=[1, 2, 3, 4], ranks=[1.5, 1.5, 3, 4]),
test_score=0.2,
model="estimator",
),
]
@pytest.fixture
def inner_loop_results_2():
return [
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 3, 4], ranks=[3, 2, 1]),
test_score=0.1,
model="model",
),
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 3, 4], ranks=[1.5, 1.5, 3]),
test_score=0.5,
model="model",
),
]
@pytest.fixture
def inner_loop_results_3():
return [
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 4], ranks=[3, 2, 1]),
test_score=0.3,
model="model",
),
FeatureEvaluationResults(
ranks=FeatureRanks(features=[2, 4], ranks=[1.5, 1.5, 3]),
test_score=0.25,
model="model",
),
]
@pytest.fixture
def rfe_raw_results(inner_loop_results, inner_loop_results_2, inner_loop_results_3):
return {
(1, 2, 3, 4): inner_loop_results,
(2, 3, 4): inner_loop_results_2,
(2, 4): inner_loop_results_3,
}
@pytest.fixture
def dataset():
X = np.random.rand(12, 12)
y = np.random.choice([0, 1], 12)
return InputDataset(X=X, y=y, groups=np.arange(12))
@pytest.fixture(scope="session")
def mosquito():
df = pd.read_csv(ASSETS_DIR / "mosquito.csv", index_col=0)
df = df.sample(frac=1)
X = df.drop(columns=["Yotu"]).values
y = df.Yotu.values
groups = df.index
return InputDataset(X=X, y=y, groups=groups)
@pytest.fixture(scope="session")
def freelive():
df = pd.read_csv(ASSETS_DIR / "freelive.csv", index_col=0)
X = df.drop(columns=["YR"]).values
y = df.YR.values
groups = df.index
return InputDataset(X=X, y=y, groups=groups)
@pytest.fixture(scope="session")
def fs_results(raw_results):
fs = FeatureSelector(n_outer=3, metric="MISS", estimator="RFC")
fs._raw_results = raw_results
fs.is_fit = True
fs._selected_features = fs._post_processor.select_features(raw_results)
fs._n_features = 5
fs_results = fs.get_feature_selection_results(["A", "B", "C", "D", "E"])
return fs_results
| [
"py_muvr.data_structures.InputDataset",
"numpy.random.rand",
"pandas.read_csv",
"numpy.random.choice",
"pathlib.Path",
"py_muvr.FeatureSelector",
"pytest.fixture",
"py_muvr.data_structures.FeatureRanks",
"numpy.arange"
] | [((294, 325), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (308, 325), False, 'import pytest\n'), ((5523, 5554), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5537, 5554), False, 'import pytest\n'), ((5799, 5830), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5813, 5830), False, 'import pytest\n'), ((6044, 6075), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (6058, 6075), False, 'import pytest\n'), ((5404, 5426), 'numpy.random.rand', 'np.random.rand', (['(12)', '(12)'], {}), '(12, 12)\n', (5418, 5426), True, 'import numpy as np\n'), ((5435, 5463), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', '(12)'], {}), '([0, 1], 12)\n', (5451, 5463), True, 'import numpy as np\n'), ((5580, 5633), 'pandas.read_csv', 'pd.read_csv', (["(ASSETS_DIR / 'mosquito.csv')"], {'index_col': '(0)'}), "(ASSETS_DIR / 'mosquito.csv', index_col=0)\n", (5591, 5633), True, 'import pandas as pd\n'), ((5758, 5795), 'py_muvr.data_structures.InputDataset', 'InputDataset', ([], {'X': 'X', 'y': 'y', 'groups': 'groups'}), '(X=X, y=y, groups=groups)\n', (5770, 5795), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((5856, 5909), 'pandas.read_csv', 'pd.read_csv', (["(ASSETS_DIR / 'freelive.csv')"], {'index_col': '(0)'}), "(ASSETS_DIR / 'freelive.csv', index_col=0)\n", (5867, 5909), True, 'import pandas as pd\n'), ((6003, 6040), 'py_muvr.data_structures.InputDataset', 'InputDataset', ([], {'X': 'X', 'y': 'y', 'groups': 'groups'}), '(X=X, y=y, groups=groups)\n', (6015, 6040), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((6114, 6172), 'py_muvr.FeatureSelector', 'FeatureSelector', ([], {'n_outer': '(3)', 'metric': '"""MISS"""', 'estimator': '"""RFC"""'}), "(n_outer=3, metric='MISS', estimator='RFC')\n", (6129, 6172), False, 'from py_muvr import FeatureSelector\n'), ((258, 272), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (262, 272), False, 'from pathlib import Path\n'), ((5505, 5518), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (5514, 5518), True, 'import numpy as np\n'), ((3995, 4050), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[1, 2, 3, 4]', 'ranks': '[3, 2, 1, 4]'}), '(features=[1, 2, 3, 4], ranks=[3, 2, 1, 4])\n', (4007, 4050), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((4174, 4233), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[1, 2, 3, 4]', 'ranks': '[1.5, 1.5, 3, 4]'}), '(features=[1, 2, 3, 4], ranks=[1.5, 1.5, 3, 4])\n', (4186, 4233), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((4422, 4471), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[2, 3, 4]', 'ranks': '[3, 2, 1]'}), '(features=[2, 3, 4], ranks=[3, 2, 1])\n', (4434, 4471), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((4591, 4644), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[2, 3, 4]', 'ranks': '[1.5, 1.5, 3]'}), '(features=[2, 3, 4], ranks=[1.5, 1.5, 3])\n', (4603, 4644), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((4829, 4875), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[2, 4]', 'ranks': '[3, 2, 1]'}), '(features=[2, 4], ranks=[3, 2, 1])\n', (4841, 4875), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((4995, 5045), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[2, 4]', 'ranks': '[1.5, 1.5, 3]'}), '(features=[2, 4], ranks=[1.5, 1.5, 3])\n', (5007, 5045), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((544, 599), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1]', 'ranks': '[1, 2]', 'n_feats': '(10)'}), '(features=[0, 1], ranks=[1, 2], n_feats=10)\n', (556, 599), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((766, 833), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 2, 3]', 'ranks': '[1, 2, 4, 3]', 'n_feats': '(10)'}), '(features=[0, 1, 2, 3], ranks=[1, 2, 4, 3], n_feats=10)\n', (778, 833), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((1046, 1107), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 3]', 'ranks': '[1, 2, 3]', 'n_feats': '(10)'}), '(features=[0, 1, 3], ranks=[1, 2, 3], n_feats=10)\n', (1058, 1107), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((1385, 1452), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 4, 3]', 'ranks': '[1, 2, 3, 4]', 'n_feats': '(10)'}), '(features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10)\n', (1397, 1452), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((1665, 1732), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 4, 3]', 'ranks': '[1, 2, 3, 4]', 'n_feats': '(10)'}), '(features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10)\n', (1677, 1732), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((1945, 2012), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 4, 3]', 'ranks': '[1, 2, 3, 4]', 'n_feats': '(10)'}), '(features=[0, 1, 4, 3], ranks=[1, 2, 3, 4], n_feats=10)\n', (1957, 2012), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((2357, 2412), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1]', 'ranks': '[1, 2]', 'n_feats': '(10)'}), '(features=[0, 1], ranks=[1, 2], n_feats=10)\n', (2369, 2412), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((2579, 2646), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 4, 2]', 'ranks': '[1, 2, 3, 4]', 'n_feats': '(10)'}), '(features=[0, 1, 4, 2], ranks=[1, 2, 3, 4], n_feats=10)\n', (2591, 2646), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((2859, 2920), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 4]', 'ranks': '[2, 1, 3]', 'n_feats': '(10)'}), '(features=[0, 1, 4], ranks=[2, 1, 3], n_feats=10)\n', (2871, 2920), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((3198, 3253), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1]', 'ranks': '[1, 2]', 'n_feats': '(10)'}), '(features=[0, 1], ranks=[1, 2], n_feats=10)\n', (3210, 3253), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((3420, 3493), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 2, 3, 4]', 'ranks': '[1, 2, 5, 4, 3]', 'n_feats': '(10)'}), '(features=[0, 1, 2, 3, 4], ranks=[1, 2, 5, 4, 3], n_feats=10)\n', (3432, 3493), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n'), ((3706, 3767), 'py_muvr.data_structures.FeatureRanks', 'FeatureRanks', ([], {'features': '[0, 1, 4]', 'ranks': '[1, 2, 3]', 'n_feats': '(10)'}), '(features=[0, 1, 4], ranks=[1, 2, 3], n_feats=10)\n', (3718, 3767), False, 'from py_muvr.data_structures import FeatureEvaluationResults, FeatureRanks, InputDataset, OuterLoopResults\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
import numpy as np
from maro.rl import AbsAgent, ColumnBasedStore
class DQNAgent(AbsAgent):
"""Implementation of AbsAgent for the DQN algorithm.
Args:
name (str): Agent's name.
algorithm (AbsAlgorithm): A concrete algorithm instance that inherits from AbstractAlgorithm.
experience_pool (AbsStore): It is used to store experiences processed by the experience shaper, which will be
used by some value-based algorithms, such as DQN.
min_experiences_to_train: minimum number of experiences required for training.
num_batches: number of batches to train the DQN model on per call to ``train``.
batch_size: mini-batch size.
"""
def __init__(
self,
name: str,
algorithm,
experience_pool: ColumnBasedStore,
min_experiences_to_train,
num_batches,
batch_size
):
super().__init__(name, algorithm, experience_pool=experience_pool)
self._min_experiences_to_train = min_experiences_to_train
self._num_batches = num_batches
self._batch_size = batch_size
def train(self):
"""Implementation of the training loop for DQN.
Experiences are sampled using their TD errors as weights. After training, the new TD errors are updated
in the experience pool.
"""
if len(self._experience_pool) < self._min_experiences_to_train:
return
for _ in range(self._num_batches):
indexes, sample = self._experience_pool.sample_by_key("loss", self._batch_size)
state = np.asarray(sample["state"])
action = np.asarray(sample["action"])
reward = np.asarray(sample["reward"])
next_state = np.asarray(sample["next_state"])
loss = self._algorithm.train(state, action, reward, next_state)
self._experience_pool.update(indexes, {"loss": loss})
def dump_experience_pool(self, dir_path: str):
"""Dump the experience pool to disk."""
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, self._name), "wb") as fp:
pickle.dump(self._experience_pool, fp)
| [
"pickle.dump",
"os.path.join",
"numpy.asarray",
"os.makedirs"
] | [((2129, 2165), 'os.makedirs', 'os.makedirs', (['dir_path'], {'exist_ok': '(True)'}), '(dir_path, exist_ok=True)\n', (2140, 2165), False, 'import os\n'), ((1693, 1720), 'numpy.asarray', 'np.asarray', (["sample['state']"], {}), "(sample['state'])\n", (1703, 1720), True, 'import numpy as np\n'), ((1742, 1770), 'numpy.asarray', 'np.asarray', (["sample['action']"], {}), "(sample['action'])\n", (1752, 1770), True, 'import numpy as np\n'), ((1792, 1820), 'numpy.asarray', 'np.asarray', (["sample['reward']"], {}), "(sample['reward'])\n", (1802, 1820), True, 'import numpy as np\n'), ((1846, 1878), 'numpy.asarray', 'np.asarray', (["sample['next_state']"], {}), "(sample['next_state'])\n", (1856, 1878), True, 'import numpy as np\n'), ((2245, 2283), 'pickle.dump', 'pickle.dump', (['self._experience_pool', 'fp'], {}), '(self._experience_pool, fp)\n', (2256, 2283), False, 'import pickle\n'), ((2184, 2218), 'os.path.join', 'os.path.join', (['dir_path', 'self._name'], {}), '(dir_path, self._name)\n', (2196, 2218), False, 'import os\n')] |
'''
File name: nodes.py
Author: <NAME>
Date created: 10/31/2017
Date last modified: 10/31/2017
Python Version: 2.7
Description: Script to compute connectome
Project: Psychosis
'''
from __future__ import division
from nilearn.input_data import NiftiMasker, NiftiMapsMasker, NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
from sklearn.covariance import GraphLassoCV
from sklearn.decomposition import FastICA
from matplotlib import pyplot as plt
from scipy.signal import lfilter
from operator import itemgetter
from collections import Counter
from datetime import datetime
from itertools import groupby
from nilearn import datasets
import nilearn.signal
import nibabel as nib
import nilearn.image
import pandas as pd
import numpy as np
import argparse
import nilearn
import sys
import os
CODEDIR = os.environ['CODEDIR']
subject = os.environ.get('SUBJECT')
cleandir = os.path.join(os.environ.get('CONDIR'),"sub-%s"%subject)
keys = os.listdir(cleandir)
#keys = np.array([x.split("_bold") for x in keys if 'task' in x]).flatten()
keys = np.unique([x for x in keys if 'task' in x]).tolist()
prepdir = os.environ.get('PREPDIR')
subprep = os.path.join(prepdir,"sub-"+subject,"MNINonLinear/Results")
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("creating connectomes")
for gsr in ["_gsr",""]:
for key in keys:
print("extracting session "+key)
prepfile = os.path.join(subprep,key,key+".nii.gz") #original file
totaltp = nib.load(prepfile).shape[3]
if totaltp <= 10:
continue
imgfile = os.path.join(cleandir,key,key+'_removed_first10_despiked_masked_mvmreg%s_cmpc_bp.nii.gz'%gsr)
##################
# 1 Gordon atlas #
##################
atlasfile = os.path.join(os.environ.get("CODEDIR"),
'postbids/rest/Parcels_MNI_111.nii')
subcort_atlasfile = os.path.join(os.environ.get("CODEDIR"),
'postbids/rest/HarvardOxford-sub-prob-1mm.nii.gz')
cerebellum_atlasfile = os.path.join(os.environ.get("CODEDIR"),
'postbids/rest/Cerebellum-MNIfnirt-prob-1mm.nii.gz')
# extract signals
masker = NiftiLabelsMasker(labels_img=atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
subcortmasker = NiftiMapsMasker(maps_img=subcort_atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
cerebellummasker = NiftiMapsMasker(maps_img=cerebellum_atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
FDfile = os.path.join(cleandir,key,key+"_removed_first10_despiked_mvmreg.txt")
FD = pd.read_csv(FDfile,"\t",header=None)
FD = FD[[24,25]]
FD.columns = ['dvars','FD']
rmid = np.where(FD['FD'] > 0.5)[0]
rmid = np.unique(np.concatenate((rmid,rmid+1,rmid-1)))
short = np.append(False,np.logical_and(np.diff(rmid)>1,np.diff(rmid)<5))
#gives Bool for indices when closer than 5 frames (but evidently more than 1)
allrmid = [range(rmid[i-1],rmid[i])[1:] for i,val in enumerate(short) if val==True]
allrmid = np.sort([item for sublist in allrmid for item in sublist]+rmid.tolist())
ntp = nib.load(imgfile).shape[3]-len(allrmid)
percrem = len(allrmid)/nib.load(imgfile).shape[3]
rmidfile = os.path.join(cleandir,key,key+"_rmid.txt")
np.savetxt(rmidfile,allrmid)
percremfile = os.path.join(cleandir,key,key+"_percrem.txt")
np.savetxt(percremfile,np.array([len(allrmid),ntp,percrem]))
if percrem > 0.2:
continue
# if len(allrmid)>400:
# continue
time_series = masker.fit_transform(imgfile)
time_series_subcort = subcortmasker.fit_transform(imgfile)
time_series_cerebellum = cerebellummasker.fit_transform(imgfile)
time_series = np.concatenate((time_series,time_series_subcort,time_series_cerebellum),axis=1)
time_series_scrubbed = np.delete(time_series,allrmid,axis=0)
# Gordon_figure(correlation_matrix,limits=[-1,1])
# plt.show()
# save parcellated time series
outfile = os.path.join(cleandir,key,key+"_Gordon_ts_scrubbed%s.csv"%gsr)
np.savetxt(outfile,time_series_scrubbed)
outfile = os.path.join(cleandir,key,key+"_Gordon_ts%s.csv"%gsr)
np.savetxt(outfile,time_series)
# static correlation
outfile = os.path.join(cleandir,key,key+"_Gordon_correlation%s.csv"%gsr)
correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([time_series_scrubbed])[0]
correlation_std = 1/np.sqrt(ntp-3)
correlation_z = 1/2*np.log((1+correlation_matrix)/(1-correlation_matrix))#/correlation_std
np.fill_diagonal(correlation_z,0)
np.savetxt(outfile,correlation_z)
# static correlation
outfile = os.path.join(cleandir,key,key+"_Gordon_partial_correlation%s.csv"%gsr)
correlation_measure = ConnectivityMeasure(kind='partial correlation')
correlation_matrix = correlation_measure.fit_transform([time_series_scrubbed])[0]
correlation_z = 1/2*np.log((1+correlation_matrix)/(1-correlation_matrix))#/correlation_std
np.fill_diagonal(correlation_z,0)
np.savetxt(outfile,correlation_z)
##########################
# 2 cartography - Gordon #
##########################
# windows = 15
# numcon = time_series_scrubbed.shape[0]-windows
#
# time_series_dynamic = np.zeros([333,333,numcon])
# for tps in range(numcon):
# time_series_cut = time_series_scrubbed[tps:(tps+windows)]
# correlation_measure = ConnectivityMeasure(kind='correlation')
# time_series_dynamic[:,:,tps] = correlation_measure.fit_transform([time_series_cut])[0]
#
# time_series_2d = time_series_dynamic.reshape((int(time_series_dynamic.shape[0]**2),time_series_dynamic.shape[2]))
#
# outfile = os.path.join(OUTDIR,key+"_dynamics.csv")
# np.savetxt(outfile,time_series_2d)
# plt.imshow(correlation_z,interpolation="nearest",cmap="hot")
#
# n_components = 5
# ica = FastICA(n_components=n_components, random_state=42)
# signal = ica.fit_transform(time_series_2d)
# mixing = ica.mixing_
# signal = signal.reshape((time_series_dynamic.shape[0],time_series_dynamic.shape[0],5))
#
###############
# 3 AAL atlas #
###############
# atlas = datasets.fetch_atlas_msdl()
# atlasfile = atlas['maps']
# masker = NiftiMapsMasker(maps_img=atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
# time_series = masker.fit_transform(imgfile)
#
# time_series = np.concatenate((time_series,time_series_subcort,time_series_cerebellum),axis=1)
#
# time_series_scrubbed = np.delete(time_series,allrmid,axis=0)
#
# # save parcellated time series
# outfile = os.path.join(OUTDIR,key+"_Msdl_ts_scrubbed.csv")
# np.savetxt(outfile,time_series_scrubbed)
# outfile = os.path.join(OUTDIR,key+"_Msdl_ts.csv")
# np.savetxt(outfile,time_series)
#
# # static correlation
# outfile = os.path.join(OUTDIR,key+"_MSDL_correlation.csv")
# correlation_measure = ConnectivityMeasure(kind='correlation')
# correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# correlation_z = 1/2*np.log((1+correlation_matrix)/(1-correlation_matrix))#/correlation_std
# np.fill_diagonal(correlation_z,0)
# np.savetxt(outfile,correlation_z)
# # static correlation
# outfile = os.path.join(OUTDIR,key+"_MSDL_partial_correlation.csv")
#
# correlation_measure = ConnectivityMeasure(kind='partial correlation')
# correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# correlation_z = 1/2*np.log((1+correlation_matrix)/(1-correlation_matrix))#/correlation_std
# np.fill_diagonal(correlation_z,0)
# np.savetxt(outfile,correlation_z)
| [
"os.listdir",
"nilearn.input_data.NiftiLabelsMasker",
"numpy.unique",
"pandas.read_csv",
"numpy.sqrt",
"numpy.where",
"numpy.delete",
"nibabel.load",
"numpy.log",
"os.path.join",
"os.environ.get",
"numpy.fill_diagonal",
"numpy.diff",
"datetime.datetime.now",
"numpy.savetxt",
"numpy.con... | [((880, 905), 'os.environ.get', 'os.environ.get', (['"""SUBJECT"""'], {}), "('SUBJECT')\n", (894, 905), False, 'import os\n'), ((981, 1001), 'os.listdir', 'os.listdir', (['cleandir'], {}), '(cleandir)\n', (991, 1001), False, 'import os\n'), ((1149, 1174), 'os.environ.get', 'os.environ.get', (['"""PREPDIR"""'], {}), "('PREPDIR')\n", (1163, 1174), False, 'import os\n'), ((1185, 1248), 'os.path.join', 'os.path.join', (['prepdir', "('sub-' + subject)", '"""MNINonLinear/Results"""'], {}), "(prepdir, 'sub-' + subject, 'MNINonLinear/Results')\n", (1197, 1248), False, 'import os\n'), ((930, 954), 'os.environ.get', 'os.environ.get', (['"""CONDIR"""'], {}), "('CONDIR')\n", (944, 954), False, 'import os\n'), ((1085, 1128), 'numpy.unique', 'np.unique', (["[x for x in keys if 'task' in x]"], {}), "([x for x in keys if 'task' in x])\n", (1094, 1128), True, 'import numpy as np\n'), ((1435, 1478), 'os.path.join', 'os.path.join', (['subprep', 'key', "(key + '.nii.gz')"], {}), "(subprep, key, key + '.nii.gz')\n", (1447, 1478), False, 'import os\n'), ((1602, 1706), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_removed_first10_despiked_masked_mvmreg%s_cmpc_bp.nii.gz' % gsr)"], {}), "(cleandir, key, key + \n '_removed_first10_despiked_masked_mvmreg%s_cmpc_bp.nii.gz' % gsr)\n", (1614, 1706), False, 'import os\n'), ((2281, 2399), 'nilearn.input_data.NiftiLabelsMasker', 'NiftiLabelsMasker', ([], {'labels_img': 'atlasfile', 'standardize': '(True)', 'detrend': '(False)', 'low_pass': 'None', 'high_pass': 'None', 'verbose': '(5)'}), '(labels_img=atlasfile, standardize=True, detrend=False,\n low_pass=None, high_pass=None, verbose=5)\n', (2298, 2399), False, 'from nilearn.input_data import NiftiMasker, NiftiMapsMasker, NiftiLabelsMasker\n'), ((2415, 2537), 'nilearn.input_data.NiftiMapsMasker', 'NiftiMapsMasker', ([], {'maps_img': 'subcort_atlasfile', 'standardize': '(True)', 'detrend': '(False)', 'low_pass': 'None', 'high_pass': 'None', 'verbose': '(5)'}), '(maps_img=subcort_atlasfile, standardize=True, detrend=False,\n low_pass=None, high_pass=None, verbose=5)\n', (2430, 2537), False, 'from nilearn.input_data import NiftiMasker, NiftiMapsMasker, NiftiLabelsMasker\n'), ((2556, 2682), 'nilearn.input_data.NiftiMapsMasker', 'NiftiMapsMasker', ([], {'maps_img': 'cerebellum_atlasfile', 'standardize': '(True)', 'detrend': '(False)', 'low_pass': 'None', 'high_pass': 'None', 'verbose': '(5)'}), '(maps_img=cerebellum_atlasfile, standardize=True, detrend=\n False, low_pass=None, high_pass=None, verbose=5)\n', (2571, 2682), False, 'from nilearn.input_data import NiftiMasker, NiftiMapsMasker, NiftiLabelsMasker\n'), ((2691, 2764), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_removed_first10_despiked_mvmreg.txt')"], {}), "(cleandir, key, key + '_removed_first10_despiked_mvmreg.txt')\n", (2703, 2764), False, 'import os\n'), ((2774, 2812), 'pandas.read_csv', 'pd.read_csv', (['FDfile', '"""\t"""'], {'header': 'None'}), "(FDfile, '\\t', header=None)\n", (2785, 2812), True, 'import pandas as pd\n'), ((3460, 3506), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_rmid.txt')"], {}), "(cleandir, key, key + '_rmid.txt')\n", (3472, 3506), False, 'import os\n'), ((3511, 3540), 'numpy.savetxt', 'np.savetxt', (['rmidfile', 'allrmid'], {}), '(rmidfile, allrmid)\n', (3521, 3540), True, 'import numpy as np\n'), ((3562, 3611), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_percrem.txt')"], {}), "(cleandir, key, key + '_percrem.txt')\n", (3574, 3611), False, 'import os\n'), ((4005, 4091), 'numpy.concatenate', 'np.concatenate', (['(time_series, time_series_subcort, time_series_cerebellum)'], {'axis': '(1)'}), '((time_series, time_series_subcort, time_series_cerebellum),\n axis=1)\n', (4019, 4091), True, 'import numpy as np\n'), ((4117, 4156), 'numpy.delete', 'np.delete', (['time_series', 'allrmid'], {'axis': '(0)'}), '(time_series, allrmid, axis=0)\n', (4126, 4156), True, 'import numpy as np\n'), ((4293, 4361), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_Gordon_ts_scrubbed%s.csv' % gsr)"], {}), "(cleandir, key, key + '_Gordon_ts_scrubbed%s.csv' % gsr)\n", (4305, 4361), False, 'import os\n'), ((4364, 4405), 'numpy.savetxt', 'np.savetxt', (['outfile', 'time_series_scrubbed'], {}), '(outfile, time_series_scrubbed)\n', (4374, 4405), True, 'import numpy as np\n'), ((4423, 4482), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_Gordon_ts%s.csv' % gsr)"], {}), "(cleandir, key, key + '_Gordon_ts%s.csv' % gsr)\n", (4435, 4482), False, 'import os\n'), ((4485, 4517), 'numpy.savetxt', 'np.savetxt', (['outfile', 'time_series'], {}), '(outfile, time_series)\n', (4495, 4517), True, 'import numpy as np\n'), ((4565, 4633), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_Gordon_correlation%s.csv' % gsr)"], {}), "(cleandir, key, key + '_Gordon_correlation%s.csv' % gsr)\n", (4577, 4633), False, 'import os\n'), ((4658, 4697), 'nilearn.connectome.ConnectivityMeasure', 'ConnectivityMeasure', ([], {'kind': '"""correlation"""'}), "(kind='correlation')\n", (4677, 4697), False, 'from nilearn.connectome import ConnectivityMeasure\n'), ((4938, 4972), 'numpy.fill_diagonal', 'np.fill_diagonal', (['correlation_z', '(0)'], {}), '(correlation_z, 0)\n', (4954, 4972), True, 'import numpy as np\n'), ((4980, 5014), 'numpy.savetxt', 'np.savetxt', (['outfile', 'correlation_z'], {}), '(outfile, correlation_z)\n', (4990, 5014), True, 'import numpy as np\n'), ((5062, 5138), 'os.path.join', 'os.path.join', (['cleandir', 'key', "(key + '_Gordon_partial_correlation%s.csv' % gsr)"], {}), "(cleandir, key, key + '_Gordon_partial_correlation%s.csv' % gsr)\n", (5074, 5138), False, 'import os\n'), ((5163, 5210), 'nilearn.connectome.ConnectivityMeasure', 'ConnectivityMeasure', ([], {'kind': '"""partial correlation"""'}), "(kind='partial correlation')\n", (5182, 5210), False, 'from nilearn.connectome import ConnectivityMeasure\n'), ((5408, 5442), 'numpy.fill_diagonal', 'np.fill_diagonal', (['correlation_z', '(0)'], {}), '(correlation_z, 0)\n', (5424, 5442), True, 'import numpy as np\n'), ((5450, 5484), 'numpy.savetxt', 'np.savetxt', (['outfile', 'correlation_z'], {}), '(outfile, correlation_z)\n', (5460, 5484), True, 'import numpy as np\n'), ((1252, 1266), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1264, 1266), False, 'from datetime import datetime\n'), ((1812, 1837), 'os.environ.get', 'os.environ.get', (['"""CODEDIR"""'], {}), "('CODEDIR')\n", (1826, 1837), False, 'import os\n'), ((1950, 1975), 'os.environ.get', 'os.environ.get', (['"""CODEDIR"""'], {}), "('CODEDIR')\n", (1964, 1975), False, 'import os\n'), ((2113, 2138), 'os.environ.get', 'os.environ.get', (['"""CODEDIR"""'], {}), "('CODEDIR')\n", (2127, 2138), False, 'import os\n'), ((2887, 2911), 'numpy.where', 'np.where', (["(FD['FD'] > 0.5)"], {}), "(FD['FD'] > 0.5)\n", (2895, 2911), True, 'import numpy as np\n'), ((2940, 2982), 'numpy.concatenate', 'np.concatenate', (['(rmid, rmid + 1, rmid - 1)'], {}), '((rmid, rmid + 1, rmid - 1))\n', (2954, 2982), True, 'import numpy as np\n'), ((4816, 4832), 'numpy.sqrt', 'np.sqrt', (['(ntp - 3)'], {}), '(ntp - 3)\n', (4823, 4832), True, 'import numpy as np\n'), ((4859, 4918), 'numpy.log', 'np.log', (['((1 + correlation_matrix) / (1 - correlation_matrix))'], {}), '((1 + correlation_matrix) / (1 - correlation_matrix))\n', (4865, 4918), True, 'import numpy as np\n'), ((5329, 5388), 'numpy.log', 'np.log', (['((1 + correlation_matrix) / (1 - correlation_matrix))'], {}), '((1 + correlation_matrix) / (1 - correlation_matrix))\n', (5335, 5388), True, 'import numpy as np\n'), ((1508, 1526), 'nibabel.load', 'nib.load', (['prepfile'], {}), '(prepfile)\n', (1516, 1526), True, 'import nibabel as nib\n'), ((3025, 3038), 'numpy.diff', 'np.diff', (['rmid'], {}), '(rmid)\n', (3032, 3038), True, 'import numpy as np\n'), ((3041, 3054), 'numpy.diff', 'np.diff', (['rmid'], {}), '(rmid)\n', (3048, 3054), True, 'import numpy as np\n'), ((3342, 3359), 'nibabel.load', 'nib.load', (['imgfile'], {}), '(imgfile)\n', (3350, 3359), True, 'import nibabel as nib\n'), ((3413, 3430), 'nibabel.load', 'nib.load', (['imgfile'], {}), '(imgfile)\n', (3421, 3430), True, 'import nibabel as nib\n')] |
# -*- coding: utf-8 -*-
'''
Code for ground truth evaluation of sybil detection with photos with Residual Network model
The implementation of the Residual-50 model is based on
https://github.com/flyyufelix/cnn_finetune/blob/master/resnet_50.py
* For ground truth evaluation, large vendors will be split into two pesudo vendors, which will be
added into training and testing set respectively
* Network model was pre-trained on ImageNet, and training data are used to finetune the network weights
* Prediction is made on each image in testing set and results are averaged to obtain the
vendor similarities
To use the code, the data folder need to be changed accordingly
'''
from keras.optimizers import SGD
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import ZeroPadding2D, Flatten, add, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model, load_model
from keras import backend as K
import load_batch_data
import sys
import os
import pickle
from json import load as loadjson
import numpy as np
import gc
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""
The identity_block is the block that has no conv layer at shortcut
Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter2, (kernel_size, kernel_size),
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""
conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
def resnet50_model(img_rows, img_cols, color_type=1, num_classes=None):
"""
Resnet 50 Model for Keras
Model Schema is based on
https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
ImageNet Pretrained Weights
https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels.h5
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of class labels for our classification task
"""
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type))
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols))
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)
x_fc = Flatten()(x_fc)
x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
if K.image_dim_ordering() == 'th':
# Use pre-trained weights for Theano backend
weights_path = 'pretrained_models/resnet50_weights_th_dim_ordering_th_kernels.h5'
else:
# Use pre-trained weights for Tensorflow backend
weights_path = 'pretrained_models/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
model.load_weights(weights_path)
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)
x_newfc = Flatten()(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='fc10')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if __name__ == '__main__':
channel = 3
batch_size = 16
nb_epoch = 15
img_rows, img_cols = 224, 224
setname = sys.argv[1]
threshold = sys.argv[2]
step = sys.argv[3]
if len(sys.argv) <= 4:
root_dir = "/media/intel/m2/train_test_data"
else:
root_dir = sys.argv[4]
tr_test_path = os.path.join(root_dir, step, setname, threshold, 'labels')
with open(os.path.join(tr_test_path, 'class_name.json')) as fp:
class_name = loadjson(fp)
(num_classes, train_size, test_size, train_batches_count, test_batches_count, Y_test
) = load_batch_data.statistics_precal(tr_test_path, batch_size=batch_size)
train_generator = load_batch_data.load_data(tr_test_path, target_size=(img_rows, img_cols),
data_type='train', num_classes=num_classes,
batch_size=batch_size, shuffle=True)
# Load fine-tuned model if there is one
finetuned_models_folder = os.path.join(root_dir, step, 'finetuned_models', setname)
finetuned_model = os.path.join(finetuned_models_folder, "resnet50_%s.h5" % threshold)
if os.path.isfile(finetuned_model):
model = load_model(finetuned_model)
print('Loaded fine-tuned model from %s' % finetuned_model)
else:
# Otherwise, load pre-trained model
model = resnet50_model(img_rows, img_cols, channel, num_classes)
# Start Fine-tuning
model.fit_generator(generator=train_generator,
epochs=nb_epoch,
max_queue_size=1,
class_weight=None,
workers=1,
verbose=2,
steps_per_epoch=train_batches_count
)
gc.collect()
try:
os.makedirs(finetuned_models_folder)
except OSError:
pass
model.save(finetuned_model)
# Make predictions
test_generator = load_batch_data.load_data(tr_test_path, target_size=(img_rows, img_cols),
data_type='test', num_classes=num_classes,
batch_size=batch_size, shuffle=False)
predictions_test = model.predict_generator(generator=test_generator,
steps=test_batches_count,
max_queue_size=1,
workers=1,
verbose=2
)
gc.collect()
# Score
final_pred = {}
for i in xrange(len(Y_test)):
img_class = list(Y_test[i]).index(1.)
if img_class not in final_pred:
final_pred[img_class] = []
final_pred[img_class].append(list(predictions_test[i]))
pred_save_path = os.path.join(root_dir, step, setname, threshold, 'final_pred', 'Resnet50')
if not os.path.isdir(pred_save_path):
os.makedirs(pred_save_path)
with open(os.path.join(pred_save_path, 'prob.pkl'), 'wb') as fp:
pickle.dump(final_pred, fp)
test_ct = len(final_pred.keys())
corr_ct = 0.
for k in final_pred.keys():
pred_class = np.argmax(np.array(final_pred[k]).mean(axis=0))
if k == pred_class:
corr_ct += 1
else:
print("%s <xxx> %s" % (class_name[str(k)], class_name[str(pred_class)]))
print(test_ct, int(corr_ct), corr_ct * 100. / test_ct)
| [
"keras.layers.Conv2D",
"keras.backend.image_dim_ordering",
"numpy.array",
"keras.optimizers.SGD",
"keras.layers.Activation",
"keras.layers.Dense",
"os.path.isdir",
"keras.models.Model",
"keras.layers.ZeroPadding2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.layers.normalizati... | [((2341, 2363), 'keras.layers.add', 'add', (['[x, input_tensor]'], {}), '([x, input_tensor])\n', (2344, 2363), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((4028, 4046), 'keras.layers.add', 'add', (['[x, shortcut]'], {}), '([x, shortcut])\n', (4031, 4046), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((6472, 6494), 'keras.models.Model', 'Model', (['img_input', 'x_fc'], {}), '(img_input, x_fc)\n', (6477, 6494), False, 'from keras.models import Model, load_model\n'), ((7384, 7409), 'keras.models.Model', 'Model', (['img_input', 'x_newfc'], {}), '(img_input, x_newfc)\n', (7389, 7409), False, 'from keras.models import Model, load_model\n'), ((7461, 7516), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)\n', (7464, 7516), False, 'from keras.optimizers import SGD\n'), ((7956, 8014), 'os.path.join', 'os.path.join', (['root_dir', 'step', 'setname', 'threshold', '"""labels"""'], {}), "(root_dir, step, setname, threshold, 'labels')\n", (7968, 8014), False, 'import os\n'), ((8216, 8286), 'load_batch_data.statistics_precal', 'load_batch_data.statistics_precal', (['tr_test_path'], {'batch_size': 'batch_size'}), '(tr_test_path, batch_size=batch_size)\n', (8249, 8286), False, 'import load_batch_data\n'), ((8310, 8472), 'load_batch_data.load_data', 'load_batch_data.load_data', (['tr_test_path'], {'target_size': '(img_rows, img_cols)', 'data_type': '"""train"""', 'num_classes': 'num_classes', 'batch_size': 'batch_size', 'shuffle': '(True)'}), "(tr_test_path, target_size=(img_rows, img_cols),\n data_type='train', num_classes=num_classes, batch_size=batch_size,\n shuffle=True)\n", (8335, 8472), False, 'import load_batch_data\n'), ((8636, 8693), 'os.path.join', 'os.path.join', (['root_dir', 'step', '"""finetuned_models"""', 'setname'], {}), "(root_dir, step, 'finetuned_models', setname)\n", (8648, 8693), False, 'import os\n'), ((8716, 8783), 'os.path.join', 'os.path.join', (['finetuned_models_folder', "('resnet50_%s.h5' % threshold)"], {}), "(finetuned_models_folder, 'resnet50_%s.h5' % threshold)\n", (8728, 8783), False, 'import os\n'), ((8791, 8822), 'os.path.isfile', 'os.path.isfile', (['finetuned_model'], {}), '(finetuned_model)\n', (8805, 8822), False, 'import os\n'), ((9659, 9821), 'load_batch_data.load_data', 'load_batch_data.load_data', (['tr_test_path'], {'target_size': '(img_rows, img_cols)', 'data_type': '"""test"""', 'num_classes': 'num_classes', 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(tr_test_path, target_size=(img_rows, img_cols),\n data_type='test', num_classes=num_classes, batch_size=batch_size,\n shuffle=False)\n", (9684, 9821), False, 'import load_batch_data\n'), ((10287, 10299), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10297, 10299), False, 'import gc\n'), ((10578, 10652), 'os.path.join', 'os.path.join', (['root_dir', 'step', 'setname', 'threshold', '"""final_pred"""', '"""Resnet50"""'], {}), "(root_dir, step, setname, threshold, 'final_pred', 'Resnet50')\n", (10590, 10652), False, 'import os\n'), ((1808, 1862), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter1', '(1, 1)'], {'name': "(conv_name_base + '2a')"}), "(nb_filter1, (1, 1), name=conv_name_base + '2a')\n", (1814, 1862), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((1885, 1943), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2a')"}), "(axis=bn_axis, name=bn_name_base + '2a')\n", (1903, 1943), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1955, 1973), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1965, 1973), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((1986, 2081), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter2', '(kernel_size, kernel_size)'], {'padding': '"""same"""', 'name': "(conv_name_base + '2b')"}), "(nb_filter2, (kernel_size, kernel_size), padding='same', name=\n conv_name_base + '2b')\n", (1992, 2081), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((2103, 2161), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2b')"}), "(axis=bn_axis, name=bn_name_base + '2b')\n", (2121, 2161), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2173, 2191), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2183, 2191), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((2204, 2258), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter3', '(1, 1)'], {'name': "(conv_name_base + '2c')"}), "(nb_filter3, (1, 1), name=conv_name_base + '2c')\n", (2210, 2258), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((2270, 2328), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2c')"}), "(axis=bn_axis, name=bn_name_base + '2c')\n", (2288, 2328), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2372, 2390), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2382, 2390), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((3257, 3328), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter1', '(1, 1)'], {'strides': 'strides', 'name': "(conv_name_base + '2a')"}), "(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a')\n", (3263, 3328), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((3366, 3424), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2a')"}), "(axis=bn_axis, name=bn_name_base + '2a')\n", (3384, 3424), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3436, 3454), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3446, 3454), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((3467, 3562), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter2', '(kernel_size, kernel_size)'], {'padding': '"""same"""', 'name': "(conv_name_base + '2b')"}), "(nb_filter2, (kernel_size, kernel_size), padding='same', name=\n conv_name_base + '2b')\n", (3473, 3562), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((3584, 3642), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2b')"}), "(axis=bn_axis, name=bn_name_base + '2b')\n", (3602, 3642), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3654, 3672), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3664, 3672), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((3685, 3739), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter3', '(1, 1)'], {'name': "(conv_name_base + '2c')"}), "(nb_filter3, (1, 1), name=conv_name_base + '2c')\n", (3691, 3739), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((3751, 3809), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2c')"}), "(axis=bn_axis, name=bn_name_base + '2c')\n", (3769, 3809), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3829, 3899), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter3', '(1, 1)'], {'strides': 'strides', 'name': "(conv_name_base + '1')"}), "(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1')\n", (3835, 3899), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((3951, 4008), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '1')"}), "(axis=bn_axis, name=bn_name_base + '1')\n", (3969, 4008), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4055, 4073), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4065, 4073), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((4736, 4758), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (4756, 4758), True, 'from keras import backend as K\n'), ((4808, 4853), 'keras.layers.Input', 'Input', ([], {'shape': '(img_rows, img_cols, color_type)'}), '(shape=(img_rows, img_cols, color_type))\n', (4813, 4853), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((4904, 4949), 'keras.layers.Input', 'Input', ([], {'shape': '(color_type, img_rows, img_cols)'}), '(shape=(color_type, img_rows, img_cols))\n', (4909, 4949), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((4959, 4980), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(3, 3)'], {}), '((3, 3))\n', (4972, 4980), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((5000, 5048), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(7, 7)'], {'strides': '(2, 2)', 'name': '"""conv1"""'}), "(64, (7, 7), strides=(2, 2), name='conv1')\n", (5006, 5048), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((5060, 5109), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': '"""bn_conv1"""'}), "(axis=bn_axis, name='bn_conv1')\n", (5078, 5109), False, 'from keras.layers.normalization import BatchNormalization\n'), ((5121, 5139), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5131, 5139), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((5151, 5187), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)'}), '((3, 3), strides=(2, 2))\n', (5163, 5187), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((6302, 6343), 'keras.layers.AveragePooling2D', 'AveragePooling2D', (['(7, 7)'], {'name': '"""avg_pool"""'}), "((7, 7), name='avg_pool')\n", (6318, 6343), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((6358, 6367), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6365, 6367), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((6385, 6433), 'keras.layers.Dense', 'Dense', (['(1000)'], {'activation': '"""softmax"""', 'name': '"""fc1000"""'}), "(1000, activation='softmax', name='fc1000')\n", (6390, 6433), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((6540, 6562), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (6560, 6562), True, 'from keras import backend as K\n'), ((7161, 7202), 'keras.layers.AveragePooling2D', 'AveragePooling2D', (['(7, 7)'], {'name': '"""avg_pool"""'}), "((7, 7), name='avg_pool')\n", (7177, 7202), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((7220, 7229), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7227, 7229), False, 'from keras.layers import ZeroPadding2D, Flatten, add, Activation\n'), ((7253, 7306), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""', 'name': '"""fc10"""'}), "(num_classes, activation='softmax', name='fc10')\n", (7258, 7306), False, 'from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D\n'), ((8104, 8116), 'json.load', 'loadjson', (['fp'], {}), '(fp)\n', (8112, 8116), True, 'from json import load as loadjson\n'), ((8840, 8867), 'keras.models.load_model', 'load_model', (['finetuned_model'], {}), '(finetuned_model)\n', (8850, 8867), False, 'from keras.models import Model, load_model\n'), ((9462, 9474), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9472, 9474), False, 'import gc\n'), ((10664, 10693), 'os.path.isdir', 'os.path.isdir', (['pred_save_path'], {}), '(pred_save_path)\n', (10677, 10693), False, 'import os\n'), ((10703, 10730), 'os.makedirs', 'os.makedirs', (['pred_save_path'], {}), '(pred_save_path)\n', (10714, 10730), False, 'import os\n'), ((10808, 10835), 'pickle.dump', 'pickle.dump', (['final_pred', 'fp'], {}), '(final_pred, fp)\n', (10819, 10835), False, 'import pickle\n'), ((8029, 8074), 'os.path.join', 'os.path.join', (['tr_test_path', '"""class_name.json"""'], {}), "(tr_test_path, 'class_name.json')\n", (8041, 8074), False, 'import os\n'), ((9500, 9536), 'os.makedirs', 'os.makedirs', (['finetuned_models_folder'], {}), '(finetuned_models_folder)\n', (9511, 9536), False, 'import os\n'), ((10745, 10785), 'os.path.join', 'os.path.join', (['pred_save_path', '"""prob.pkl"""'], {}), "(pred_save_path, 'prob.pkl')\n", (10757, 10785), False, 'import os\n'), ((10954, 10977), 'numpy.array', 'np.array', (['final_pred[k]'], {}), '(final_pred[k])\n', (10962, 10977), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def plot_data(xs, ys, c, lw, label, linestyle, **kwargs):
if ys is not None:
plt.plot(xs, ys, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)
else:
plt.plot(xs, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)
def plot_measurements(xs, ys=None, c='r', lw=2, label='Measurements', linestyle='--', **kwargs):
plot_data(xs=xs, ys=ys, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)
def plot_predictions(xs, ys=None, c='b', lw=2, label='Measurements', linestyle=':', **kwargs):
plot_data(xs=xs, ys=ys, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)
def plot_filter(xs, ys=None, c='g', lw=4, label='Filter', linestyle='-', **kwargs):
plot_data(xs=xs, ys=ys, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)
def plot_track(xs, ys=None, c='k', lw=2, label='Track', linestyle='-', **kwargs):
plot_data(xs=xs, ys=ys, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)
def generate_measurements(x_0, dx, num_measurements, noise, acceleration=0):
data = []
for i in xrange(num_measurements):
data.append(x_0 + dx * i + np.random.randn() * noise)
dx += acceleration
return data
def g_h_filter(measurements, x_0, g, h, dx, dt=1.):
"""
Performs g-h filter on 1 state variable with a fixed g and h.
:param measurements:
:param x_0: initial value.
:param g: g scale factor in g-h filter.
:param h: h scale factor in g-h filter.
:param dx: initial change rate.
:param dt: time step.
:return:
"""
x_i = x_0
predictions = []
filtered_measurements = []
for measurement in measurements:
# predict the value
x_prediction = x_i + dx * dt
predictions.append(x_prediction)
# calculate the residual
residual = measurement - x_prediction
# update the change rate
dx += h * residual / dt
# update the initial guess/value
x_i = x_prediction + g * residual
filtered_measurements.append(x_i)
return np.array(predictions), np.array(filtered_measurements)
def plot_g_h_results(measurements, predictions, filtered_data, title='', z_label='Scale', ):
plot_measurements(measurements, label=z_label)
plot_predictions(predictions)
plot_filter(filtered_data)
plt.legend(loc=4)
plt.title(title)
plt.gca().set_xlim(left=0, right=len(measurements))
plt.show()
test = [
{'title': 'test', 'x_0': 160, 'dx': 1, 'num_x': 30, 'noise': 3}, # testing assumptions
{'title': 'bad initial', 'x_0': 5, 'x_0_guess': 30, 'dx': 1, 'num_x': 100, 'noise': 10}, # bad initial guess
{'title': 'extreme noise', 'x_0': 5, 'dx': 1, 'num_x': 100, 'noise': 100}, # extreme noise
{'title': 'acceleration', 'x_0': 10, 'dx': 0, 'num_x': 20, 'noise': 0, 'acceleration': 2, 'g': 0.2, 'h': 0.02},
# acceleration, shows the lag error or systemic error
# varying g, greater g favors measurement instead of prediction
{'title': 'g = 0.1', 'x_0': 5, 'x_0_guess': 0, 'dx': 5, 'num_x': 100, 'noise': 50, 'g': 0.1}, # g 0.1
{'title': 'g = 0.5', 'x_0': 5, 'x_0_guess': 0, 'dx': 5, 'num_x': 100, 'noise': 50, 'g': 0.5}, # g 0.5
{'title': 'g = 0.9', 'x_0': 5, 'x_0_guess': 0, 'dx': 5, 'num_x': 100, 'noise': 50, 'g': 0.9}, # g 0.9]
# varying h, greater h makes the filter react rapidly to transient changes
{
'title': 'h = 0.05', 'x_0': 0, 'x_0_guess': 0, 'dx': 0, 'num_x': 50, 'noise': 50, 'h': 0.05,
'measurements': np.linspace(0, 1, 50)
}, # g 0.1
{
'title': 'h = 0.05', 'x_0': 0, 'x_0_guess': 0, 'dx': 2, 'num_x': 50, 'noise': 50, 'h': 0.05,
'measurements': np.linspace(0, 1, 50)
}, # g 0.5
{
'title': 'h = 0.5', 'x_0': 0, 'x_0_guess': 0, 'dx': 2, 'num_x': 50, 'noise': 50, 'h': 0.5,
'measurements': np.linspace(0, 1, 50)
}, # g 0.9
]
for t in test:
g = t.get('g', 0.2)
h = t.get('h', 0.01)
x_0 = t.get('x_0_guess', t['x_0'])
measurements = t.get('measurements')
if measurements is None:
measurements = generate_measurements(t['x_0'], t['dx'], t['num_x'], t['noise'], t.get('acceleration', 0))
plt.xlim([0, t['num_x']])
plot_track([0, t['num_x']], [measurements[0], measurements[t['num_x'] - 1]], label='Actual weight')
xs = xrange(1, t['num_x']+1)
line = np.poly1d(np.polyfit(xs, measurements, 1))
plot_data(xs, line(xs), label='least squares', c='y', lw=3, linestyle='-')
predictions, filtered_measurements = g_h_filter(measurements=measurements, x_0=x_0, dx=t['dx'],
g=g, h=h, dt=1.)
plot_g_h_results(measurements, predictions, filtered_measurements, title=t['title'])
measurements = [5, 6, 7, 8, 9, 9, 9, 9, 9, 10, 11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]
predictions, filtered_measurements = g_h_filter(measurements=measurements, x_0=4., dx=1., dt=1., g=.302, h=0.054)
plot_g_h_results(measurements, predictions, filtered_measurements, 'g = 0.302, h = 0.054')
predictions, filtered_measurements = g_h_filter(measurements=measurements, x_0=4., dx=1., dt=1., g=.546, h=0.205)
plot_g_h_results(measurements, predictions, filtered_measurements, 'g = 0.546, h = 0.205') | [
"numpy.polyfit",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2359, 2376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (2369, 2376), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2397), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2390, 2397), True, 'import matplotlib.pyplot as plt\n'), ((2458, 2468), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2466, 2468), True, 'import matplotlib.pyplot as plt\n'), ((4227, 4252), 'matplotlib.pyplot.xlim', 'plt.xlim', (["[0, t['num_x']]"], {}), "([0, t['num_x']])\n", (4235, 4252), True, 'import matplotlib.pyplot as plt\n'), ((142, 214), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'c': 'c', 'lw': 'lw', 'linestyle': 'linestyle', 'label': 'label'}), '(xs, ys, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)\n', (150, 214), True, 'import matplotlib.pyplot as plt\n'), ((233, 301), 'matplotlib.pyplot.plot', 'plt.plot', (['xs'], {'c': 'c', 'lw': 'lw', 'linestyle': 'linestyle', 'label': 'label'}), '(xs, c=c, lw=lw, linestyle=linestyle, label=label, **kwargs)\n', (241, 301), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2110), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (2097, 2110), True, 'import numpy as np\n'), ((2112, 2143), 'numpy.array', 'np.array', (['filtered_measurements'], {}), '(filtered_measurements)\n', (2120, 2143), True, 'import numpy as np\n'), ((3558, 3579), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (3569, 3579), True, 'import numpy as np\n'), ((3727, 3748), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (3738, 3748), True, 'import numpy as np\n'), ((3894, 3915), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (3905, 3915), True, 'import numpy as np\n'), ((4411, 4442), 'numpy.polyfit', 'np.polyfit', (['xs', 'measurements', '(1)'], {}), '(xs, measurements, 1)\n', (4421, 4442), True, 'import numpy as np\n'), ((2402, 2411), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2409, 2411), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1188), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1186, 1188), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
np.set_printoptions(precision=3, linewidth=256)
from dyconnmap.fc import pli
if __name__ == "__main__":
data = np.load("data/eeg_32chans_10secs.npy")
print(np.shape(data))
ts, avg = pli(data, [1.0, 4.0], 128.0, pairs=None)
print(avg)
| [
"numpy.shape",
"numpy.load",
"dyconnmap.fc.pli",
"numpy.set_printoptions"
] | [((44, 91), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'linewidth': '(256)'}), '(precision=3, linewidth=256)\n', (63, 91), True, 'import numpy as np\n'), ((162, 200), 'numpy.load', 'np.load', (['"""data/eeg_32chans_10secs.npy"""'], {}), "('data/eeg_32chans_10secs.npy')\n", (169, 200), True, 'import numpy as np\n'), ((243, 283), 'dyconnmap.fc.pli', 'pli', (['data', '[1.0, 4.0]', '(128.0)'], {'pairs': 'None'}), '(data, [1.0, 4.0], 128.0, pairs=None)\n', (246, 283), False, 'from dyconnmap.fc import pli\n'), ((212, 226), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (220, 226), True, 'import numpy as np\n')] |
"""
Contains utility functions.
Meant to be used with the main virtual environment.
"""
from typing import NamedTuple, List, Type, Callable, Tuple
import numpy as np
import torch.nn as nn
import torch
class Features(NamedTuple):
"""
Holds information regarding features in a feature file
"""
feature_file: str
arg_names: List[str] = ['X', 'y']
embed_size: int = 0
vector_count: int = 0
class FusionData:
def __init__(self, features: List[Features], do_reshape: bool = True):
"""
Stacks the data samples referenced by the given Feature objects.
The samples can either be features or base model outputs.
Features can be reshaped according to their vector count
for calculcating the mean of a single file's classifier outputs.
Args:
features: a list of Features
"""
self.features = features
self.all_data = []
for idx, feature in enumerate(self.features):
data = np.load(feature.feature_file)
samples = data[feature.arg_names[0]]
if do_reshape:
samples = np.reshape(samples,
(len(samples) // feature.vector_count,
feature.vector_count, feature.embed_size))
self.all_data.append(samples)
if idx == len(features)-1:
labels = data[feature.arg_names[1]]
if do_reshape:
labels = labels[::feature.vector_count]
self.all_data.append(labels)
def get_data(self) -> List[np.ndarray]:
"""
Returns:
a list of arrays consisting of feature data and labels
"""
return self.all_data
def get_labels(self) -> np.ndarray:
"""
Returns:
an array of labels
"""
return self.all_data[-1]
def load_model(model_class: Type[nn.Module], saved_model: str,
device: str) -> nn.Module:
"""
Load a model with the given state and set it to evalute.
Args:
model_class: the class of the model
saved_model: a path to a file containing the saved model state
device: device to run pytorch on
Returns:
a loaded model
"""
model = model_class()
model.load_state_dict(torch.load(saved_model))
model.to(device)
model.eval()
return model
def create_model_and_features(model_class: Type[nn.Module], saved_model: str,
feature_file: str, embed_size: int,
vector_count: int, device: str) -> (nn.Module, Features):
"""
Load a model and create a single Features object
Args:
model_class: the class of the model
saved_model: a path to a file containing the saved model state
feature_file: a path to a feature file containing feature fectors
embed_size: size of a single feature embed
vector_count: number of feature vectors per file
device: device to run pytorch on
Returns:
a loaded model and a Features object
"""
model = load_model(model_class, saved_model, device)
feature = Features(feature_file=feature_file,
arg_names=['X', 'y'],
embed_size=embed_size,
vector_count=vector_count)
return model, feature
def create_multiple_features(feature_files: List[str], embed_size: int,
vector_count: int) -> Tuple:
"""
Create a Feature object from every given feature file.
Args:
feature_files: a list of paths to feature files containing feature vectors
embed_size: size of a single feature embed
vector_count: number of feature vectors per file
Returns:
a tuple of the created Feature objects
"""
features = []
for feature_file in feature_files:
feature = Features(feature_file=feature_file,
arg_names=['X', 'y'],
embed_size=embed_size,
vector_count=vector_count)
features.append(feature)
return tuple(features)
class FusionInfo(NamedTuple):
"""
Contains information of a fusion model required to test it.
"""
models: List[nn.Module]
all_data: FusionData
rule_function: Callable = None # Combiner rule in fusion
weights: torch.Tensor = None # Weights in a combiner
fusion_model: nn.Module = None # Model used by deep rule based combiner
plot_cm: bool = False # Plot confusion matrix
| [
"torch.load",
"numpy.load"
] | [((2354, 2377), 'torch.load', 'torch.load', (['saved_model'], {}), '(saved_model)\n', (2364, 2377), False, 'import torch\n'), ((1004, 1033), 'numpy.load', 'np.load', (['feature.feature_file'], {}), '(feature.feature_file)\n', (1011, 1033), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import math
import numpy as np
class Monitor:
def __init__(self, w, h, screen, dist, n=5):
"""
Create Monitor Configuration
@param w: Screen Width (px)
@param h: Screen Height (px)
@param screen: Screen Size (in)
@param dist: Distance between Subject and Screen (in)
@param n: number of target points
"""
self.n = n
self.w = float(w)
self.h = float(h)
r = math.sqrt(self.w * self.w + self.h * self.h)
self.dpi = r / float(screen)
self.D = float(dist)
# get pixels for degree visual angle (10 deg offset for targets)
deg_offset = 10.0
dpi_x = self.deg_to_px(deg_offset)
dx = dpi_x / float(w)
dy = dpi_x / float(h)
# the actual (known) locations of the n target points
# in (0,0) top-left normalized coordinates
self.S = np.array([
[0.5, 0.5], # center
[0.5 + dx, 0.5 + dy], # bottom right
[0.5 - dx, 0.5 + dy], # bottom left
[0.5 - dx, 0.5 - dy], # top left
[0.5 + dx, 0.5 - dy], # top right
], dtype=float)
self.n_dict = {
'center': 0,
'bottom_right': 1,
'bottom_left': 2,
'top_left': 3,
'top_right': 4
}
def deg_to_px(self, deg):
px = 2 * self.D * math.tan(math.radians(deg / 2.0)) * self.dpi
return px
def px_to_deg(self, px):
r = math.sqrt(self.w * self.w + self.h * self.h)
# pixel distances coming in are normalized
px = px * r
deg = 2 * math.degrees(math.atan2(px / self.dpi, 2 * self.D))
return deg
| [
"numpy.array",
"math.sqrt",
"math.atan2",
"math.radians"
] | [((436, 480), 'math.sqrt', 'math.sqrt', (['(self.w * self.w + self.h * self.h)'], {}), '(self.w * self.w + self.h * self.h)\n', (445, 480), False, 'import math\n'), ((842, 969), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.5 + dx, 0.5 + dy], [0.5 - dx, 0.5 + dy], [0.5 - dx, 0.5 -\n dy], [0.5 + dx, 0.5 - dy]]'], {'dtype': 'float'}), '([[0.5, 0.5], [0.5 + dx, 0.5 + dy], [0.5 - dx, 0.5 + dy], [0.5 - dx,\n 0.5 - dy], [0.5 + dx, 0.5 - dy]], dtype=float)\n', (850, 969), True, 'import numpy as np\n'), ((1352, 1396), 'math.sqrt', 'math.sqrt', (['(self.w * self.w + self.h * self.h)'], {}), '(self.w * self.w + self.h * self.h)\n', (1361, 1396), False, 'import math\n'), ((1487, 1524), 'math.atan2', 'math.atan2', (['(px / self.dpi)', '(2 * self.D)'], {}), '(px / self.dpi, 2 * self.D)\n', (1497, 1524), False, 'import math\n'), ((1266, 1289), 'math.radians', 'math.radians', (['(deg / 2.0)'], {}), '(deg / 2.0)\n', (1278, 1289), False, 'import math\n')] |
import numpy as np
import pandas as pd
def create_csv():
source = pd.read_csv("/media/wml/新加卷/flushSTEAD/merged.csv")
source = source.drop(['network_code','receiver_code','receiver_type','receiver_latitude','receiver_longitude',
'receiver_elevation_m','p_status','p_weight','p_travel_sec',
's_status','s_weight',
'source_id','source_origin_time','source_origin_uncertainty_sec',
'source_latitude','source_longitude','source_error_sec',
'source_gap_deg','source_horizontal_uncertainty_km', 'source_depth_uncertainty_km',
'source_magnitude', 'source_magnitude_type', 'source_magnitude_author','source_mechanism_strike_dip_rake',
'source_distance_deg', 'back_azimuth_deg', 'snr_db',
'trace_start_time'], axis = 1)
source.to_csv("/media/wml/新加卷/flushSTEAD/predict.csv")
def f1(y_true, y_pred):
def recall(y_true, y_pred):
'Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected.'
true_positives = np.sum(np.round(np.clip(y_true * y_pred, 0, 1)))
possible_positives = np.sum(np.round(np.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + 1e-07)
return recall
def precision(y_true, y_pred):
'Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant.'
true_positives = np.sum(np.round(np.clip(y_true * y_pred, 0, 1)))
predicted_positives = np.sum(np.round(np.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + 1e-07)
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return precision, recall, 2 * ((precision * recall) / (precision + recall + 1e-07))
def array_generate(point):
array = np.zeros(shape = (6000,))
if point < 50:
array[0 : point+51] = 1
elif point > 6000 - 50:
array[point-50 : 6000] = 1
else:
array[point-50 : point+51] = 1
return array
def calcu():
df = pd.read_csv("/media/wml/新加卷/flushSTEAD/result1602857324.2880619.csv")
p_f1_total = 0
p_pr_total = 0
p_re_total = 0
s_f1_total = 0
s_pr_total = 0
s_re_total = 0
end_f1_total = 0
end_re_total = 0
end_pr_total = 0
p_all_array = 0
s_all_array = 0
for index in df.index:
p_array_t = array_generate(int(df.loc[index].values[1]))
p_array_p = array_generate(df.loc[index].values[4])
s_array_t = array_generate(int(df.loc[index].values[2]))
s_array_p = array_generate(df.loc[index].values[5])
end_array_t = np.zeros(shape = (6000,))
end_array_t[int(df.loc[index].values[1]) : int(df.loc[index].values[3][3:-3])] = 1
end_array_p = np.zeros(shape = (6000,))
if df.loc[index].values[6] >= df.loc[index].values[4]:
end_array_p[int(df.loc[index].values[4]) : int(df.loc[index].values[6])] = 1
p_all_array += abs(df.loc[index].values[4] - int(df.loc[index].values[1]))
s_all_array += abs(df.loc[index].values[5] - int(df.loc[index].values[2]))
p_pr, p_re, p_f1 = f1(p_array_t, p_array_p)
print(p_pr, p_re, p_f1)
p_pr_total += p_pr
p_re_total += p_re
p_f1_total += p_f1
s_pr, s_re, s_f1 = f1(s_array_t, s_array_p)
s_pr_total += s_pr
s_re_total += s_re
s_f1_total += s_f1
end_pr, end_re, end_f1 = f1(end_array_t, end_array_p)
end_pr_total += end_pr
end_re_total += end_re
end_f1_total += end_f1
# if index % 100 == 0:
# print(f'round {index} : p_f1_score {p_f1_total/index}, s_f1_score {s_f1_total/index}, end_f1_score {end_f1_total/index}')
# print(f'round {index} : p_pr_score {p_pr_total/index}, s_pr_score {s_pr_total/index}, end_pr_score {end_pr_total/index}')
# print(f'round {index} : p_re_score {p_re_total/index}, s_re_score {s_re_total/index}, end_re_score {end_re_total/index}')
# print(f'round {index} : p_mae_score {p_all_array/index/1000}, s_mae_score {s_all_array/index/1000}')
# print()
if __name__ == "__main__":
calcu()
| [
"numpy.clip",
"numpy.zeros",
"pandas.read_csv"
] | [((71, 122), 'pandas.read_csv', 'pd.read_csv', (['"""/media/wml/新加卷/flushSTEAD/merged.csv"""'], {}), "('/media/wml/新加卷/flushSTEAD/merged.csv')\n", (82, 122), True, 'import pandas as pd\n'), ((2196, 2219), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6000,)'}), '(shape=(6000,))\n', (2204, 2219), True, 'import numpy as np\n'), ((2427, 2496), 'pandas.read_csv', 'pd.read_csv', (['"""/media/wml/新加卷/flushSTEAD/result1602857324.2880619.csv"""'], {}), "('/media/wml/新加卷/flushSTEAD/result1602857324.2880619.csv')\n", (2438, 2496), True, 'import pandas as pd\n'), ((3014, 3037), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6000,)'}), '(shape=(6000,))\n', (3022, 3037), True, 'import numpy as np\n'), ((3153, 3176), 'numpy.zeros', 'np.zeros', ([], {'shape': '(6000,)'}), '(shape=(6000,))\n', (3161, 3176), True, 'import numpy as np\n'), ((1339, 1369), 'numpy.clip', 'np.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1346, 1369), True, 'import numpy as np\n'), ((1417, 1438), 'numpy.clip', 'np.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (1424, 1438), True, 'import numpy as np\n'), ((1794, 1824), 'numpy.clip', 'np.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1801, 1824), True, 'import numpy as np\n'), ((1873, 1894), 'numpy.clip', 'np.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (1880, 1894), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
try:
import openeye.oechem as oechem
except ImportError:
pass
import warnings
import numpy as np
import itertools
from math import radians, degrees
import copy
from . import utils, chemi
from cmiles.utils import mol_to_smiles, has_atom_map, get_atom_map
from .utils import BOHR_2_ANGSTROM, logger
# warnings.simplefilter('always')
def find_torsions(molecule, restricted=True, terminal=True):
"""
This function takes an OEMol (atoms must be tagged with index map) and finds the map indices for torsion that need
to be driven.
Parameters
----------
molecule : OEMol
The atoms in the molecule need to be tagged with map indices
restricted: bool, optional, default True
If True, will find restricted torsions such as torsions in rings and double bonds.
terminal: bool, optional, default True
If True, will find terminal torsions
Returns
-------
needed_torsion_scans: dict
a dictionary that maps internal, terminal and restricted torsions to map indices of torsion atoms
"""
# Check if molecule has map
is_mapped = has_atom_map(molecule)
if not is_mapped:
utils.logger().warning('Molecule does not have atom map. A new map will be generated. You might need a new tagged SMARTS if the ordering was changed')
tagged_smiles = mol_to_smiles(molecule, isomeric=True, mapped=True, explicit_hydrogen=True)
# Generate new molecule with tags
molecule = chemi.smiles_to_oemol(tagged_smiles)
utils.logger().warning('If you already have a tagged SMARTS, compare it with the new one to ensure the ordering did not change')
utils.logger().warning('The new tagged SMARTS is: {}'.format(tagged_smiles))
# ToDo: save the new tagged SMILES somewhere. Maybe return it?
needed_torsion_scans = {'internal': {}, 'terminal': {}, 'restricted': {}}
mol = oechem.OEMol(molecule)
if restricted:
smarts = '[*]~[C,c]=,@[C,c]~[*]' # This should capture double bonds (not capturing rings because OpenEye does not
# generate skewed conformations. ToDo: use scan in geometric or something else to get this done.
restricted_tors = _find_torsions_from_smarts(molecule=mol, smarts=smarts)
if len(restricted_tors) > 0:
restricted_tors_min = one_torsion_per_rotatable_bond(restricted_tors)
for i, tor in enumerate(restricted_tors_min):
tor_name = ((tor[0].GetMapIdx() - 1), (tor[1].GetMapIdx() - 1), (tor[2].GetMapIdx() - 1), (tor[3].GetMapIdx() - 1))
needed_torsion_scans['restricted']['torsion_{}'.format(str(i))] = tor_name
if terminal:
smarts = '[*]~[*]-[X2H1,X3H2,X4H3]-[#1]' # This smarts should match terminal torsions such as -CH3, -NH2, -NH3+, -OH, and -SH
h_tors = _find_torsions_from_smarts(molecule=mol, smarts=smarts)
if len(h_tors) > 0:
h_tors_min = one_torsion_per_rotatable_bond(h_tors)
for i, tor in enumerate(h_tors_min):
tor_name = ((tor[0].GetMapIdx() -1 ), (tor[1].GetMapIdx() - 1), (tor[2].GetMapIdx() - 1), (tor[3].GetMapIdx() - 1))
needed_torsion_scans['terminal']['torsion_{}'.format(str(i))] = tor_name
mid_tors = [[tor.a, tor.b, tor.c, tor.d ] for tor in oechem.OEGetTorsions(mol)]
if mid_tors:
mid_tors_min = one_torsion_per_rotatable_bond(mid_tors)
for i, tor in enumerate(mid_tors_min):
tor_name = ((tor[0].GetMapIdx() - 1), (tor[1].GetMapIdx() - 1), (tor[2].GetMapIdx() - 1), (tor[3].GetMapIdx() - 1))
needed_torsion_scans['internal']['torsion_{}'.format(str(i))] = tor_name
# Check that there are no duplicate torsions in mid and h_torsions
list_tor = list(needed_torsion_scans['internal'].values()) + list(needed_torsion_scans['terminal'].values())
set_tor = set(list_tor)
if not len(set_tor) == len(list_tor):
raise Warning("There is a torsion defined in both mid and terminal torsions. This should not happen. Check "
"your molecule and the atom mapping")
return needed_torsion_scans
def _find_torsions_from_smarts(molecule, smarts):
"""
Do a substrcutre search on provided SMARTS to find torsions that match the SAMRTS
Parameters
----------
molecule: OEMol
molecule to search on
smarts: str
SMARTS pattern to search for
Returns
-------
tors: list
list of torsions that match the SMARTS string
"""
#ToDO use MDL aromaticity model
qmol=oechem.OEQMol()
if not oechem.OEParseSmarts(qmol, smarts):
utils.logger().warning('OEParseSmarts failed')
ss = oechem.OESubSearch(qmol)
tors = []
oechem.OEPrepareSearch(molecule, ss)
unique = True
for match in ss.Match(molecule, unique):
tor = []
for ma in match.GetAtoms():
tor.append(ma.target)
tors.append(tor)
return tors
def one_torsion_per_rotatable_bond(torsion_list):
"""
Keep only one torsion per rotatable bond
Parameters
----------
torsion_list: list
list of torsion in molecule
Returns
-------
list of only one torsion per rotatable bonds
"""
central_bonds = np.zeros((len(torsion_list), 3), dtype=int)
for i, tor in enumerate(torsion_list):
central_bonds[i][0] = i
central_bonds[i][1] = tor[1].GetIdx()
central_bonds[i][2] = tor[2].GetIdx()
grouped = central_bonds[central_bonds[:, 2].argsort()]
sorted_tors = [torsion_list[i] for i in grouped[:, 0]]
# Keep only one torsion per rotatable bond
tors = []
best_tor = [sorted_tors[0][0], sorted_tors[0][0], sorted_tors[0][0], sorted_tors[0][0]]
best_tor_order = best_tor[0].GetAtomicNum() + best_tor[3].GetAtomicNum()
first_pass = True
for tor in sorted_tors:
utils.logger().debug("Map Idxs: {} {} {} {}".format(tor[0].GetMapIdx(), tor[1].GetMapIdx(), tor[2].GetMapIdx(), tor[3].GetMapIdx()))
utils.logger().debug("Atom Numbers: {} {} {} {}".format(tor[0].GetAtomicNum(), tor[1].GetAtomicNum(), tor[2].GetAtomicNum(), tor[3].GetAtomicNum()))
if tor[1].GetMapIdx() != best_tor[1].GetMapIdx() or tor[2].GetMapIdx() != best_tor[2].GetMapIdx():
new_tor = True
if not first_pass:
utils.logger().debug("Adding to list: {} {} {} {}".format(best_tor[0].GetMapIdx(), best_tor[1].GetMapIdx(), best_tor[2].GetMapIdx(), best_tor[3].GetMapIdx()))
tors.append(best_tor)
first_pass = False
best_tor = tor
best_tor_order = tor[0].GetAtomicNum() + tor[3].GetAtomicNum()
utils.logger().debug("new_tor with central bond across atoms: {} {}".format(tor[1].GetMapIdx(), tor[2].GetMapIdx()))
else:
utils.logger().debug("Not a new_tor but now with end atoms: {} {}".format(tor[0].GetMapIdx(), tor[3].GetMapIdx()))
tor_order = tor[0].GetAtomicNum() + tor[3].GetAtomicNum()
if tor_order > best_tor_order:
best_tor = tor
best_tor_order = tor_order
utils.logger().debug("Adding to list: {} {} {} {}".format(best_tor[0].GetMapIdx(), best_tor[1].GetMapIdx(), best_tor[2].GetMapIdx(), best_tor[3].GetMapIdx()))
tors.append(best_tor)
utils.logger().info("List of torsion to drive:")
for tor in tors:
utils.logger().info("Idx: {} {} {} {}".format(tor[0].GetMapIdx(), tor[1].GetMapIdx(), tor[2].GetMapIdx(), tor[3].GetMapIdx()))
utils.logger().info("Atom numbers: {} {} {} {}".format(tor[0].GetAtomicNum(), tor[1].GetAtomicNum(), tor[2].GetAtomicNum(), tor[3].GetAtomicNum()))
return tors
def define_torsiondrive_jobs(needed_torsion_drives, internal_torsion_resolution=30, terminal_torsion_resolution=0,
scan_internal_terminal_combination=0, scan_dimension=2):
"""
define crank jobs with torsions to drive and resolution to drive them at.
Parameters
----------
fragment_data: dict
dictionary that maps fragment to needed torsions
internal_torsion_resolution: int, optional. Default 15
interval in degrees for torsion scan. If 0, internal torsions will not be driven
terminal_torsion_resolution: int, optional. Default 0
interval in degrees for torsion scans. If 0, terminal torsions will not be driven
scan_internal_terminal_combination: int, optional. Default 0
flag if internal and terminal torsions should be combined for higher dimension. If 0, only internal torsions will
be driven. If 1, terminal and internal torsions will be scanned together.
scan_dimension: int, optional. Default 2
dimension of torsion scan. Combinations of torsions at the specified dimension will be generated as separate crank jobs
qc_program: str, optional. Default Psi4
method: str, optional. Default B3LYP
basis: str, optional. Default aug-cc-pVDZ
kwargs: optional keywords for psi4
Returns
-------
fragment_data: dict
dictionary that maps fragment to crank torsion jobs specifications.
"""
if not internal_torsion_resolution and not terminal_torsion_resolution:
utils.logger().warning("Resolution for internal and terminal torsions are 0. No torsions will be driven")
if scan_internal_terminal_combination and (not internal_torsion_resolution or not terminal_torsion_resolution):
raise Warning("If you are not scanning internal or terminal torsions, you must set scan_internal_terminal_"
"combinations to 0")
internal_torsions = needed_torsion_drives['internal']
terminal_torsions = needed_torsion_drives['terminal']
internal_dimension = len(internal_torsions)
terminal_dimension = len(terminal_torsions)
torsion_dimension = internal_dimension + terminal_dimension
crank_job = 0
crank_jobs = dict()
if not scan_internal_terminal_combination:
if internal_torsion_resolution:
for comb in itertools.combinations(internal_torsions, scan_dimension):
dihedrals = [internal_torsions[torsion] for torsion in comb]
grid = [internal_torsion_resolution]*len(dihedrals)
crank_jobs['crank_job_{}'.format(crank_job)] = {'dihedrals': dihedrals, 'grid_spacing': grid}
crank_job +=1
if internal_dimension < scan_dimension and internal_dimension > 0:
dihedrals = [internal_torsions[torsion] for torsion in internal_torsions]
grid = [internal_torsion_resolution]*len(dihedrals)
crank_jobs['crank_job_{}'.format(crank_job)] = {'dihedrals': dihedrals, 'grid_spacing': grid}
crank_job +=1
if terminal_torsion_resolution:
for comb in itertools.combinations(terminal_torsions, scan_dimension):
dihedrals = [terminal_torsions[torsion] for torsion in comb]
grid = [terminal_torsion_resolution]*scan_dimension
crank_jobs['crank_job_{}'.format(crank_job)] = {'dihedrals': dihedrals, 'grid_spacing': grid}
crank_job +=1
if terminal_dimension < scan_dimension and terminal_dimension > 0:
dihedrals = [terminal_torsions[torsion] for torsion in terminal_torsions]
grid = [terminal_torsion_resolution]*len(dihedrals)
crank_jobs['crank_job_{}'.format(crank_job)] = {'dihedrals': dihedrals, 'grid_spacing': grid}
crank_job +=1
else:
# combine both internal and terminal torsions
all_torsion_idx = np.arange(0, torsion_dimension)
for comb in itertools.combinations(all_torsion_idx, scan_dimension):
internal_torsions = [internal_torsions['torsion_{}'.format(i)] for i in comb if i < internal_dimension]
terminal_torsions = [terminal_torsions['torsion_{}'.format(i-internal_dimension)] for i in comb if i >= internal_dimension]
grid = [internal_torsion_resolution]*len(internal_torsions)
grid.extend([terminal_torsion_resolution]*len(terminal_torsions))
dihedrals = internal_torsions + terminal_torsions
crank_jobs['crank_job_{}'.format(crank_job)] = {'diherals': dihedrals, 'grid_spacing': grid}
crank_job += 1
if torsion_dimension < scan_dimension:
internal_torsions = [internal_torsions['torsion_{}'.format(i)] for i in all_torsion_idx if i < internal_dimension]
terminal_torsions = [terminal_torsions['torsion_{}'.format(i-internal_dimension)] for i in all_torsion_idx if i >= internal_dimension]
grid = [internal_torsion_resolution]*len(internal_torsions)
grid.extend([terminal_torsion_resolution]*len(terminal_torsions))
dihedrals = internal_torsions + terminal_torsions
crank_jobs['crank_job_{}'.format(crank_job)] = {'diherals': dihedrals, 'grid_spacing': grid}
crank_job += 1
return crank_jobs
def define_restricted_drive(qc_molecule, restricted_dihedrals, steps=6, maximum_rotation=30, scan_dimension=1):
"""
Parameters
----------
qc_molecule: molecule in QC_JSON format. This comes with a geometry, connectivity table, identifiers that also has
a mapped SMILES so it can be checked.
needed_torsion_drives
grid_resolution
maximum_rotation
Returns
-------
"""
#ToDo extend to multi-dimensional scans
#natoms = len(qc_molecule['symbols'])
# Convert to 3D shape for
#coords = np.array(qc_molecule['geometry'], dtype=float).reshape(natoms, 3) * utils.BOHR_2_ANGSTROM
connectivity = np.asarray(qc_molecule['connectivity'])
# Check dihedral indices are connected
bond_tuples = list(zip(connectivity[:, :2].T[0], connectivity[:, :2].T[1]))
optimization_jobs = {}
i = 0
for torsion in restricted_dihedrals:
for a1, a2 in zip(restricted_dihedrals[torsion], restricted_dihedrals[torsion][1:]):
if (a1, a2) not in bond_tuples and (a2, a1) not in bond_tuples:
utils.logger().warning("torsion {} is not bonded. Skipping this torsion")
continue
# measure dihedral angle
dihedral_angle = measure_dihedral_angle(restricted_dihedrals[torsion], qc_molecule['geometry'])
t_tuple = restricted_dihedrals[torsion]
angle = round(dihedral_angle)
optimization_jobs['{}_{}'.format(t_tuple, i)] = {
'type': 'optimization_input',
'initial_molecule': qc_molecule,
'dihedrals': [restricted_dihedrals[torsion]],
'constraints': {'scan': [('dihedral', str(t_tuple[0]), str(t_tuple[1]), str(t_tuple[2]),
str(t_tuple[3]), str(angle), str(angle + maximum_rotation),
str(steps))]}}
optimization_jobs['{}_{}'.format(t_tuple, i+1)] = {
'type': 'optimization_input',
'initial_molecule': qc_molecule,
'dihedrals': [restricted_dihedrals[torsion]],
'constraints': {'scan': [('dihedral', str(t_tuple[0]), str(t_tuple[1]), str(t_tuple[2]),
str(t_tuple[3]), str(angle), str(angle - maximum_rotation),
str(steps))]}}
return optimization_jobs
def generate_constraint_opt_input(qc_molecule, dihedrals, maximum_rotation=30, interval=5, filename=None):
"""
Parameters
----------
qc_molecule
dihedrals
Returns
-------
QCFractal optimization jobs input
"""
optimization_jobs = {}
tagged_smiles = qc_molecule['identifiers']['canonical_isomeric_explicit_hydrogen_mapped_smiles']
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, tagged_smiles)
atom_map = get_atom_map(mol, tagged_smiles)
coords = chemi.from_mapped_xyz_to_mol_idx_order(qc_molecule['geometry'], atom_map)
# convert coord to Angstrom
coords = coords * BOHR_2_ANGSTROM
conf = mol.GetConfs().next()
conf.SetCoords(oechem.OEFloatArray(coords))
# new molecule for setting dihedral angles
mol_2 = oechem.OEMol(mol)
conf_2 = mol_2.GetConfs().next()
coords_2 = oechem.OEFloatArray(conf_2.GetMaxAtomIdx()*3)
conf.GetCoords(coords_2)
mol_2.DeleteConfs()
interval = radians(interval)
max_rot = radians(maximum_rotation)
for dihedral in dihedrals:
j = 0
dih_idx = dihedrals[dihedral]
tor = []
for i in dih_idx:
a = mol.GetAtom(oechem.OEHasMapIdx(i+1))
tor.append(a)
dih_angle = oechem.OEGetTorsion(conf, tor[0], tor[1], tor[2], tor[3])
for i, angle in enumerate(np.arange(dih_angle-max_rot, dih_angle+max_rot, interval)):
newconf = mol.NewConf(coords_2)
oechem.OESetTorsion(newconf, tor[0], tor[1], tor[2], tor[3], angle)
new_angle = oechem.OEGetTorsion(newconf, tor[0], tor[1], tor[2], tor[3])
# if new_angle == dih_angle:
# j += 1
# if j > 1:
# # One equivalent angle should be generated.
# logger().warning("Openeye did not generate a new conformer for torsion and angle {} {}. Will not generate"
# "qcfractal optimizaiton input".format(dih_idx, angle))
# break
if filename:
pdb = oechem.oemolostream("{}_{}.pdb".format(filename, i))
oechem.OEWritePDBFile(pdb, newconf)
symbols, geometry = chemi.to_mapped_geometry(newconf, atom_map)
qc_molecule = copy.deepcopy(qc_molecule)
qc_molecule['geometry'] = geometry
qc_molecule['symbols'] = symbols
degree = degrees(angle)
optimization_jobs['{}_{}'.format(dih_idx, int(round(degree)))] = {
'type': 'optimization_input',
'initial_molecule': qc_molecule,
'dihedral': dih_idx,
'constraints': {
"set": [{
"type": "dihedral",
"indices": dih_idx,
"value": degree
}]
}
}
return optimization_jobs
def measure_dihedral_angle(dihedral, coords):
"""
calculate the dihedral angle in degrees
Parameters
----------
dihedral
coords
Returns
-------
"""
coords = np.array(coords, dtype=float).reshape(int(len(coords)/3), 3) * utils.BOHR_2_ANGSTROM
a = coords[dihedral[0]]
b = coords[dihedral[1]]
c = coords[dihedral[2]]
d = coords[dihedral[3]]
v1 = b-a
v2 = c-b
v3 = d-c
t1 = np.linalg.norm(v2)*np.dot(v1, np.cross(v2, v3))
t2 = np.dot(np.cross(v1, v2), np.cross(v2, v3))
phi = np.arctan2(t1, t2)
degree = phi * 180 / np.pi
return degree
def find_equivelant_torsions(mapped_mol, restricted=False, central_bonds=None):
"""
Final all torsions around a given central bond
Parameters
----------
mapped_mol: oemol. Must contaion map indices
restricted: bool, optional, default False
If True, will also find restricted torsions
central_bonds: list of tuple of ints, optional, defualt None
If provides, only torsions around those central bonds will be given. If None, all torsions in molecule will be found
Returns
-------
eq_torsions: dict
maps central bond to all equivelant torisons
"""
#ToDo check that mol has mapping
mol = oechem.OEMol(mapped_mol)
if not has_atom_map(mol):
raise ValueError("OEMol must have map indices")
terminal_smarts = '[*]~[*]-[X2H1,X3H2,X4H3]-[#1]'
terminal_torsions = _find_torsions_from_smarts(mol, terminal_smarts)
mid_torsions = [[tor.a, tor.b, tor.c, tor.d] for tor in oechem.OEGetTorsions(mapped_mol)]
all_torsions = terminal_torsions + mid_torsions
if restricted:
restricted_smarts = '[*]~[C,c]=,@[C,c]~[*]'
restricted_torsions = _find_torsions_from_smarts(mol, restricted_smarts)
all_torsions = all_torsions + restricted_torsions
tor_idx = []
for tor in all_torsions:
tor_name = (tor[0].GetMapIdx()-1, tor[1].GetMapIdx()-1, tor[2].GetMapIdx()-1, tor[3].GetMapIdx()-1)
tor_idx.append(tor_name)
if central_bonds:
if not isinstance(central_bonds, list):
central_bonds = [central_bonds]
if not central_bonds:
central_bonds = set((tor[1], tor[2]) for tor in tor_idx)
eq_torsions = {cb : [tor for tor in tor_idx if cb == (tor[1], tor[2]) or cb ==(tor[2], tor[1])] for cb in
central_bonds}
return eq_torsions
def get_initial_crank_state(fragment):
"""
Generate initial crank state JSON for each crank job in fragment
Parameters
----------
fragment: dict
A fragment from JSON crank jobs
Returns
-------
crank_initial_states: dict
dictionary containing JSON specs for initial states for all crank jobs in a fragment.
"""
crank_initial_states = {}
init_geometry = fragment['molecule']['geometry']
needed_torsions = fragment['needed_torsion_drives']
crank_jobs = fragment['crank_torsion_drives']
for i, job in enumerate(crank_jobs):
dihedrals = []
grid_spacing = []
needed_mid_torsions = needed_torsions['internal']
for mid_torsion in crank_jobs[job]['internal_torsions']:
# convert 1-based indexing to 0-based indexing
dihedrals.append([j-1 for j in needed_mid_torsions[mid_torsion]])
grid_spacing.append(crank_jobs[job]['internal_torsions'][mid_torsion])
needed_terminal_torsions = needed_torsions['terminal']
for terminal_torsion in crank_jobs[job]['terminal_torsions']:
# convert 1-based indexing to 0-based indexing
dihedrals.append([j-1 for j in needed_terminal_torsions[terminal_torsion]])
grid_spacing.append(crank_jobs[job]['terminal_torsions'][terminal_torsion])
crank_state = {}
crank_state['dihedrals'] = dihedrals
crank_state['grid_spacing'] = grid_spacing
crank_state['elements'] = fragment['molecule']['symbols']
#ToDo add ability to start with many geomotries
crank_state['init_coords'] = [init_geometry]
crank_state['grid_status'] = {}
crank_initial_states[job] = crank_state
return crank_initial_states
| [
"openeye.oechem.OESetTorsion",
"openeye.oechem.OEGetTorsions",
"numpy.array",
"numpy.arctan2",
"numpy.linalg.norm",
"copy.deepcopy",
"numpy.arange",
"cmiles.utils.mol_to_smiles",
"openeye.oechem.OEQMol",
"numpy.cross",
"numpy.asarray",
"openeye.oechem.OEHasMapIdx",
"openeye.oechem.OEPrepareS... | [((1135, 1157), 'cmiles.utils.has_atom_map', 'has_atom_map', (['molecule'], {}), '(molecule)\n', (1147, 1157), False, 'from cmiles.utils import mol_to_smiles, has_atom_map, get_atom_map\n'), ((1919, 1941), 'openeye.oechem.OEMol', 'oechem.OEMol', (['molecule'], {}), '(molecule)\n', (1931, 1941), True, 'import openeye.oechem as oechem\n'), ((4608, 4623), 'openeye.oechem.OEQMol', 'oechem.OEQMol', ([], {}), '()\n', (4621, 4623), True, 'import openeye.oechem as oechem\n'), ((4735, 4759), 'openeye.oechem.OESubSearch', 'oechem.OESubSearch', (['qmol'], {}), '(qmol)\n', (4753, 4759), True, 'import openeye.oechem as oechem\n'), ((4778, 4814), 'openeye.oechem.OEPrepareSearch', 'oechem.OEPrepareSearch', (['molecule', 'ss'], {}), '(molecule, ss)\n', (4800, 4814), True, 'import openeye.oechem as oechem\n'), ((13732, 13771), 'numpy.asarray', 'np.asarray', (["qc_molecule['connectivity']"], {}), "(qc_molecule['connectivity'])\n", (13742, 13771), True, 'import numpy as np\n'), ((15848, 15862), 'openeye.oechem.OEMol', 'oechem.OEMol', ([], {}), '()\n', (15860, 15862), True, 'import openeye.oechem as oechem\n'), ((15867, 15907), 'openeye.oechem.OESmilesToMol', 'oechem.OESmilesToMol', (['mol', 'tagged_smiles'], {}), '(mol, tagged_smiles)\n', (15887, 15907), True, 'import openeye.oechem as oechem\n'), ((15923, 15955), 'cmiles.utils.get_atom_map', 'get_atom_map', (['mol', 'tagged_smiles'], {}), '(mol, tagged_smiles)\n', (15935, 15955), False, 'from cmiles.utils import mol_to_smiles, has_atom_map, get_atom_map\n'), ((16256, 16273), 'openeye.oechem.OEMol', 'oechem.OEMol', (['mol'], {}), '(mol)\n', (16268, 16273), True, 'import openeye.oechem as oechem\n'), ((16441, 16458), 'math.radians', 'radians', (['interval'], {}), '(interval)\n', (16448, 16458), False, 'from math import radians, degrees\n'), ((16473, 16498), 'math.radians', 'radians', (['maximum_rotation'], {}), '(maximum_rotation)\n', (16480, 16498), False, 'from math import radians, degrees\n'), ((18944, 18962), 'numpy.arctan2', 'np.arctan2', (['t1', 't2'], {}), '(t1, t2)\n', (18954, 18962), True, 'import numpy as np\n'), ((19676, 19700), 'openeye.oechem.OEMol', 'oechem.OEMol', (['mapped_mol'], {}), '(mapped_mol)\n', (19688, 19700), True, 'import openeye.oechem as oechem\n'), ((1363, 1438), 'cmiles.utils.mol_to_smiles', 'mol_to_smiles', (['molecule'], {'isomeric': '(True)', 'mapped': '(True)', 'explicit_hydrogen': '(True)'}), '(molecule, isomeric=True, mapped=True, explicit_hydrogen=True)\n', (1376, 1438), False, 'from cmiles.utils import mol_to_smiles, has_atom_map, get_atom_map\n'), ((4635, 4669), 'openeye.oechem.OEParseSmarts', 'oechem.OEParseSmarts', (['qmol', 'smarts'], {}), '(qmol, smarts)\n', (4655, 4669), True, 'import openeye.oechem as oechem\n'), ((11685, 11716), 'numpy.arange', 'np.arange', (['(0)', 'torsion_dimension'], {}), '(0, torsion_dimension)\n', (11694, 11716), True, 'import numpy as np\n'), ((11737, 11792), 'itertools.combinations', 'itertools.combinations', (['all_torsion_idx', 'scan_dimension'], {}), '(all_torsion_idx, scan_dimension)\n', (11759, 11792), False, 'import itertools\n'), ((16167, 16194), 'openeye.oechem.OEFloatArray', 'oechem.OEFloatArray', (['coords'], {}), '(coords)\n', (16186, 16194), True, 'import openeye.oechem as oechem\n'), ((16724, 16781), 'openeye.oechem.OEGetTorsion', 'oechem.OEGetTorsion', (['conf', 'tor[0]', 'tor[1]', 'tor[2]', 'tor[3]'], {}), '(conf, tor[0], tor[1], tor[2], tor[3])\n', (16743, 16781), True, 'import openeye.oechem as oechem\n'), ((18834, 18852), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (18848, 18852), True, 'import numpy as np\n'), ((18898, 18914), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (18906, 18914), True, 'import numpy as np\n'), ((18916, 18932), 'numpy.cross', 'np.cross', (['v2', 'v3'], {}), '(v2, v3)\n', (18924, 18932), True, 'import numpy as np\n'), ((19712, 19729), 'cmiles.utils.has_atom_map', 'has_atom_map', (['mol'], {}), '(mol)\n', (19724, 19729), False, 'from cmiles.utils import mol_to_smiles, has_atom_map, get_atom_map\n'), ((3346, 3371), 'openeye.oechem.OEGetTorsions', 'oechem.OEGetTorsions', (['mol'], {}), '(mol)\n', (3366, 3371), True, 'import openeye.oechem as oechem\n'), ((10088, 10145), 'itertools.combinations', 'itertools.combinations', (['internal_torsions', 'scan_dimension'], {}), '(internal_torsions, scan_dimension)\n', (10110, 10145), False, 'import itertools\n'), ((10874, 10931), 'itertools.combinations', 'itertools.combinations', (['terminal_torsions', 'scan_dimension'], {}), '(terminal_torsions, scan_dimension)\n', (10896, 10931), False, 'import itertools\n'), ((16816, 16877), 'numpy.arange', 'np.arange', (['(dih_angle - max_rot)', '(dih_angle + max_rot)', 'interval'], {}), '(dih_angle - max_rot, dih_angle + max_rot, interval)\n', (16825, 16877), True, 'import numpy as np\n'), ((16932, 16999), 'openeye.oechem.OESetTorsion', 'oechem.OESetTorsion', (['newconf', 'tor[0]', 'tor[1]', 'tor[2]', 'tor[3]', 'angle'], {}), '(newconf, tor[0], tor[1], tor[2], tor[3], angle)\n', (16951, 16999), True, 'import openeye.oechem as oechem\n'), ((17024, 17084), 'openeye.oechem.OEGetTorsion', 'oechem.OEGetTorsion', (['newconf', 'tor[0]', 'tor[1]', 'tor[2]', 'tor[3]'], {}), '(newconf, tor[0], tor[1], tor[2], tor[3])\n', (17043, 17084), True, 'import openeye.oechem as oechem\n'), ((17746, 17772), 'copy.deepcopy', 'copy.deepcopy', (['qc_molecule'], {}), '(qc_molecule)\n', (17759, 17772), False, 'import copy\n'), ((17886, 17900), 'math.degrees', 'degrees', (['angle'], {}), '(angle)\n', (17893, 17900), False, 'from math import radians, degrees\n'), ((18864, 18880), 'numpy.cross', 'np.cross', (['v2', 'v3'], {}), '(v2, v3)\n', (18872, 18880), True, 'import numpy as np\n'), ((19974, 20006), 'openeye.oechem.OEGetTorsions', 'oechem.OEGetTorsions', (['mapped_mol'], {}), '(mapped_mol)\n', (19994, 20006), True, 'import openeye.oechem as oechem\n'), ((16653, 16678), 'openeye.oechem.OEHasMapIdx', 'oechem.OEHasMapIdx', (['(i + 1)'], {}), '(i + 1)\n', (16671, 16678), True, 'import openeye.oechem as oechem\n'), ((17608, 17643), 'openeye.oechem.OEWritePDBFile', 'oechem.OEWritePDBFile', (['pdb', 'newconf'], {}), '(pdb, newconf)\n', (17629, 17643), True, 'import openeye.oechem as oechem\n'), ((18589, 18618), 'numpy.array', 'np.array', (['coords'], {'dtype': 'float'}), '(coords, dtype=float)\n', (18597, 18618), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Ex08.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sYCTiZxq-Z1zibubnLeqBRyFc7-7mcV_
"""
import numpy as np
"""# N x N Grid world"""
class Environment:
actions = ['left', 'right', 'up', 'down']
def __init__(self, grid_size=(4,4)):
self.grid_size = grid_size
self.s = np.arange(self.grid_size[0] * self.grid_size[1])
self.v = np.zeros(self.grid_size)
self.terminal_state_indices = [self.s[0], self.s[-1]]
# we are considering a random policy i.e. uniform probablity for each action
def isTerminalState(self, state_index):
return state_index in self.terminal_state_indices
def step(self, state_index, action_index):
num_rows, num_cols = self.grid_size
if state_index % num_cols == 0 and self.actions[action_index] == 'left':
return state_index, 0
elif (state_index + 1) % num_cols == 0 and self.actions[action_index] == 'right':
return state_index, 0
elif state_index + 1 < num_cols and self.actions[action_index] == 'up':
return state_index, 0
elif state_index + 1 > (num_rows - 1) * num_cols and self.actions[action_index] == 'down':
return state_index, 0
else:
getNextState = {
'left': -1,
'right': 1,
'up': -num_cols,
'down': num_cols
}
next_state = getNextState[self.actions[action_index]] + state_index
reward = -1
return next_state,reward
def update_vtable(self, state_index):
num_rows, num_cols = self.grid_size
num_valid_actions = 0
v_sum = 0
for action_index in np.arange(len(self.actions)):
next_state, reward = self.step(state_index, action_index)
if next_state == state_index:
#it means the agent went outside the grid so this action is not possible
continue
v_sum += reward + self.v[int(next_state/num_rows), next_state%num_rows]
num_valid_actions += 1
# counting the number of valid actions to get the correct expectation value
return np.round((1/num_valid_actions) * v_sum, 1)
def run_iterative_Policy_Eval(self, num_iterations):
self.v_copy = np.zeros((num_iterations, self.v.shape[0], self.v.shape[1]))
for i in range(num_iterations):
for state_ind in self.s:
v_row_index, v_col_index = int(state_ind/self.grid_size[0]) , state_ind % self.grid_size[0]
if not self.isTerminalState(state_ind):
self.v_copy[i, v_row_index, v_col_index] = self.update_vtable(state_ind)
self.v = self.v_copy[i]
"""# 4 X 4 Grid Environment"""
A = Environment()
print('Num States')
print(A.s)
print('termainal States')
print(A.terminal_state_indices)
print('Num Actions')
print(A.actions)
print('initial Value function')
A.v
A.run_iterative_Policy_Eval(5)
print('Final Value function')
A.v
for i in range(A.v_copy.shape[0]):
print('----- Value Iteration after step ',i, '-------------------')
print(A.v_copy[i])
"""# 10 x 10 Grid Experiment"""
B = Environment(grid_size= (10,10))
print('Num States')
print(B.s)
print('termainal States')
print(B.terminal_state_indices)
print('Num Actions')
print(B.actions)
print('initial Value function')
B.v
B.run_iterative_Policy_Eval(25)
print('Final Value function')
B.v
for i in range(B.v_copy.shape[0]):
print('----- Value Iteration after step ',i, '-------------------')
print(B.v_copy[i])
"""# Simplified Environment Impelmentation(4X4 Grid )"""
class Environment2:
num_columns = 4
grid_size = num_columns * num_columns
actions = ['left', 'right', 'up', 'down']
def __init__(self):
self.s = np.arange(self.grid_size)
self.v = np.zeros(self.grid_size)
self.pi_i = 1 / len(self.actions)
# we are considering a random policy i.e. uniform probablity for each action
def isTerminalState(self, state_index):
return state_index == 0 or state_index == 15
def step(self, state_index, action_index):
if state_index % self.num_columns == 0 and self.actions[action_index] == 'left':
return state_index, 0
elif (state_index + 1) % self.num_columns == 0 and self.actions[action_index] == 'right':
return state_index, 0
elif state_index + 1 < self.num_columns and self.actions[action_index] == 'up':
return state_index, 0
elif state_index + 1 > self.grid_size - self.num_columns and self.actions[action_index] == 'down':
return state_index, 0
else:
getNextState = {
'left': -1,
'right': 1,
'up': -self.num_columns,
'down': self.num_columns
}
next_state = getNextState[self.actions[action_index]] + state_index
reward = -1
return next_state,reward
def update_vtable(self, state_index):
num_valid_actions = 0
v_sum = 0
for action_index in np.arange(len(self.actions)):
next_state, reward = self.step(state_index, action_index)
if next_state == state_index:
#it means the agent went outside the grid so this action is not possible
continue
v_sum += reward + self.v[next_state]
num_valid_actions += 1
# counting the number of valid actions to get the correct expectation value
return (1/num_valid_actions) * v_sum
def run_iterative_Policy_Eval(self, num_iterations):
v_copy = np.zeros_like(self.v)
for i in range(num_iterations):
for state_ind in self.s:
if not self.isTerminalState(state_ind):
v_copy[state_ind] = self.update_vtable(state_ind)
self.v = v_copy
F = Environment2()
F.v
F.run_iterative_Policy_Eval(1)
F.v
F.run_iterative_Policy_Eval(1)
F.v | [
"numpy.round",
"numpy.zeros",
"numpy.zeros_like",
"numpy.arange"
] | [((394, 442), 'numpy.arange', 'np.arange', (['(self.grid_size[0] * self.grid_size[1])'], {}), '(self.grid_size[0] * self.grid_size[1])\n', (403, 442), True, 'import numpy as np\n'), ((456, 480), 'numpy.zeros', 'np.zeros', (['self.grid_size'], {}), '(self.grid_size)\n', (464, 480), True, 'import numpy as np\n'), ((2110, 2152), 'numpy.round', 'np.round', (['(1 / num_valid_actions * v_sum)', '(1)'], {}), '(1 / num_valid_actions * v_sum, 1)\n', (2118, 2152), True, 'import numpy as np\n'), ((2243, 2303), 'numpy.zeros', 'np.zeros', (['(num_iterations, self.v.shape[0], self.v.shape[1])'], {}), '((num_iterations, self.v.shape[0], self.v.shape[1]))\n', (2251, 2303), True, 'import numpy as np\n'), ((3692, 3717), 'numpy.arange', 'np.arange', (['self.grid_size'], {}), '(self.grid_size)\n', (3701, 3717), True, 'import numpy as np\n'), ((3731, 3755), 'numpy.zeros', 'np.zeros', (['self.grid_size'], {}), '(self.grid_size)\n', (3739, 3755), True, 'import numpy as np\n'), ((5399, 5420), 'numpy.zeros_like', 'np.zeros_like', (['self.v'], {}), '(self.v)\n', (5412, 5420), True, 'import numpy as np\n')] |
#================================================================
#
# File name : BipedalWalker-v3_PPO
# Author : PyLessons
# Created date: 2020-10-18
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/Reinforcement_Learning
# Description : BipedalWalker-v3 PPO continuous agent
# TensorFlow : 2.3.1
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # -1:cpu, 0:first gpu
import random
import gym
import pylab
import numpy as np
import tensorflow as tf
from tensorboardX import SummaryWriter
#tf.config.experimental_run_functions_eagerly(True) # used for debuging and development
tf.compat.v1.disable_eager_execution(
) # usually using this for fastest performance
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.optimizers import Adam, RMSprop, Adagrad, Adadelta
from tensorflow.keras import backend as K
import copy
from threading import Thread, Lock
from multiprocessing import Process, Pipe
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError:
pass
class Environment(Process):
def __init__(self,
env_idx,
child_conn,
env_name,
state_size,
action_size,
visualize=False):
super(Environment, self).__init__()
self.env = gym.make(env_name)
self.is_render = visualize
self.env_idx = env_idx
self.child_conn = child_conn
self.state_size = state_size
self.action_size = action_size
def run(self):
super(Environment, self).run()
state = self.env.reset()
state = np.reshape(state, [1, self.state_size])
self.child_conn.send(state)
while True:
action = self.child_conn.recv()
#if self.is_render and self.env_idx == 0:
#self.env.render()
state, reward, done, info = self.env.step(action)
state = np.reshape(state, [1, self.state_size])
if done:
state = self.env.reset()
state = np.reshape(state, [1, self.state_size])
self.child_conn.send([state, reward, done, info])
class Actor_Model:
def __init__(self, input_shape, action_space, lr, optimizer):
X_input = Input(input_shape)
self.action_space = action_space
X = Dense(512,
activation="relu",
kernel_initializer=tf.random_normal_initializer(
stddev=0.01))(X_input)
X = Dense(
256,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(X)
X = Dense(
64,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(X)
output = Dense(self.action_space, activation="tanh")(X)
self.Actor = Model(inputs=X_input, outputs=output)
self.Actor.compile(loss=self.ppo_loss_continuous,
optimizer=optimizer(lr=lr))
#print(self.Actor.summary())
def ppo_loss_continuous(self, y_true, y_pred):
advantages, actions, logp_old_ph, = y_true[:, :
1], y_true[:, 1:1 + self.
action_space], y_true[:,
1
+
self
.
action_space]
LOSS_CLIPPING = 0.2
logp = self.gaussian_likelihood(actions, y_pred)
ratio = K.exp(logp - logp_old_ph)
p1 = ratio * advantages
p2 = tf.where(advantages > 0, (1.0 + LOSS_CLIPPING) * advantages,
(1.0 - LOSS_CLIPPING) * advantages) # minimum advantage
actor_loss = -K.mean(K.minimum(p1, p2))
return actor_loss
def gaussian_likelihood(self, actions, pred): # for keras custom loss
log_std = -0.5 * np.ones(self.action_space, dtype=np.float32)
pre_sum = -0.5 * (
((actions - pred) /
(K.exp(log_std) + 1e-8))**2 + 2 * log_std + K.log(2 * np.pi))
return K.sum(pre_sum, axis=1)
def predict(self, state):
return self.Actor.predict(state)
class Critic_Model:
def __init__(self, input_shape, action_space, lr, optimizer):
X_input = Input(input_shape)
old_values = Input(shape=(1, ))
V = Dense(512,
activation="relu",
kernel_initializer=tf.random_normal_initializer(
stddev=0.01))(X_input)
V = Dense(
256,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(V)
V = Dense(
64,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(V)
value = Dense(1, activation=None)(V)
self.Critic = Model(inputs=[X_input, old_values], outputs=value)
self.Critic.compile(loss=[self.critic_PPO2_loss(old_values)],
optimizer=optimizer(lr=lr))
def critic_PPO2_loss(self, values):
def loss(y_true, y_pred):
LOSS_CLIPPING = 0.2
clipped_value_loss = values + K.clip(y_pred - values,
-LOSS_CLIPPING, LOSS_CLIPPING)
v_loss1 = (y_true - clipped_value_loss)**2
v_loss2 = (y_true - y_pred)**2
value_loss = 0.5 * K.mean(K.maximum(v_loss1, v_loss2))
#value_loss = K.mean((y_true - y_pred) ** 2) # standard PPO loss
return value_loss
return loss
def predict(self, state):
return self.Critic.predict([state, np.zeros((state.shape[0], 1))])
class PPOAgent:
# PPO Main Optimization Algorithm
def __init__(self, env_name, model_name=""):
# Initialization
# Environment and PPO parameters
self.env_name = env_name
self.env = gym.make(env_name)
self.action_size = self.env.action_space.shape[0]
self.state_size = self.env.observation_space.shape
self.EPISODES = 200000 # total episodes to train through all environments
self.episode = 0 # used to track the episodes total count of episodes played through all thread environments
self.max_average = 0 # when average score is above 0 model will be saved
self.lr = 0.00025
self.epochs = 10 # training epochs
self.shuffle = True
self.Training_batch = 512
#self.optimizer = RMSprop
self.optimizer = Adam
self.replay_count = 0
self.writer = SummaryWriter(comment="_" + self.env_name + "_" +
self.optimizer.__name__ + "_" +
str(self.lr))
# Instantiate plot memory
self.scores_, self.episodes_, self.average_ = [], [], [
] # used in matplotlib plots
# Create Actor-Critic network models
self.Actor = Actor_Model(input_shape=self.state_size,
action_space=self.action_size,
lr=self.lr,
optimizer=self.optimizer)
self.Critic = Critic_Model(input_shape=self.state_size,
action_space=self.action_size,
lr=self.lr,
optimizer=self.optimizer)
self.Actor_name = f"{self.env_name}_PPO_Actor.h5"
self.Critic_name = f"{self.env_name}_PPO_Critic.h5"
#self.load() # uncomment to continue training from old weights
# do not change bellow
self.log_std = -0.5 * np.ones(self.action_size, dtype=np.float32)
self.std = np.exp(self.log_std)
def act(self, state):
# Use the network to predict the next action to take, using the model
pred = self.Actor.predict(state)
low, high = -1.0, 1.0 # -1 and 1 are boundaries of tanh
action = pred + np.random.uniform(low, high,
size=pred.shape) * self.std
action = np.clip(action, low, high)
logp_t = self.gaussian_likelihood(action, pred, self.log_std)
return action, logp_t
def gaussian_likelihood(self, action, pred, log_std):
# https://github.com/hill-a/stable-baselines/blob/master/stable_baselines/sac/policies.py
pre_sum = -0.5 * (
((action - pred) /
(np.exp(log_std) + 1e-8))**2 + 2 * log_std + np.log(2 * np.pi))
return np.sum(pre_sum, axis=1)
def discount_rewards(self, reward): #gaes is better
# Compute the gamma-discounted rewards over an episode
# We apply the discount and normalize it to avoid big variability of rewards
gamma = 0.99 # discount rate
running_add = 0
discounted_r = np.zeros_like(reward)
for i in reversed(range(0, len(reward))):
running_add = running_add * gamma + reward[i]
discounted_r[i] = running_add
discounted_r -= np.mean(discounted_r) # normalizing the result
discounted_r /= (np.std(discounted_r) + 1e-8
) # divide by standard deviation
return discounted_r
def get_gaes(self,
rewards,
dones,
values,
next_values,
gamma=0.99,
lamda=0.90,
normalize=True):
deltas = [
r + gamma * (1 - d) * nv - v
for r, d, nv, v in zip(rewards, dones, next_values, values)
]
deltas = np.stack(deltas)
gaes = copy.deepcopy(deltas)
for t in reversed(range(len(deltas) - 1)):
gaes[t] = gaes[t] + (1 - dones[t]) * gamma * lamda * gaes[t + 1]
target = gaes + values
if normalize:
gaes = (gaes - gaes.mean()) / (gaes.std() + 1e-8)
return np.vstack(gaes), np.vstack(target)
def replay(self, states, actions, rewards, dones, next_states, logp_ts):
# reshape memory to appropriate shape for training
states = np.vstack(states)
next_states = np.vstack(next_states)
actions = np.vstack(actions)
logp_ts = np.vstack(logp_ts)
# Get Critic network predictions
values = self.Critic.predict(states)
next_values = self.Critic.predict(next_states)
# Compute discounted rewards and advantages
#discounted_r = self.discount_rewards(rewards)
#advantages = np.vstack(discounted_r - values)
advantages, target = self.get_gaes(rewards, dones, np.squeeze(values),
np.squeeze(next_values))
'''
pylab.plot(adv,'.')
pylab.plot(target,'-')
ax=pylab.gca()
ax.grid(True)
pylab.subplots_adjust(left=0.05, right=0.98, top=0.96, bottom=0.06)
pylab.show()
if str(episode)[-2:] == "00": pylab.savefig(self.env_name+"_"+self.episode+".png")
'''
# stack everything to numpy array
# pack all advantages, predictions and actions to y_true and when they are received
# in custom loss function we unpack it
y_true = np.hstack([advantages, actions, logp_ts])
# training Actor and Critic networks
a_loss = self.Actor.Actor.fit(states,
y_true,
epochs=self.epochs,
verbose=0,
shuffle=self.shuffle)
c_loss = self.Critic.Critic.fit([states, values],
target,
epochs=self.epochs,
verbose=0,
shuffle=self.shuffle)
# calculate loss parameters (should be done in loss, but couldn't find working way how to do that with disabled eager execution)
pred = self.Actor.predict(states)
log_std = -0.5 * np.ones(self.action_size, dtype=np.float32)
logp = self.gaussian_likelihood(actions, pred, log_std)
approx_kl = np.mean(logp_ts - logp)
approx_ent = np.mean(-logp)
self.writer.add_scalar('Data/actor_loss_per_replay',
np.sum(a_loss.history['loss']),
self.replay_count)
self.writer.add_scalar('Data/critic_loss_per_replay',
np.sum(c_loss.history['loss']),
self.replay_count)
self.writer.add_scalar('Data/approx_kl_per_replay', approx_kl,
self.replay_count)
self.writer.add_scalar('Data/approx_ent_per_replay', approx_ent,
self.replay_count)
self.replay_count += 1
def load(self):
self.Actor.Actor.load_weights(self.Actor_name)
self.Critic.Critic.load_weights(self.Critic_name)
def save(self):
self.Actor.Actor.save_weights(self.Actor_name)
self.Critic.Critic.save_weights(self.Critic_name)
pylab.figure(figsize=(18, 9))
pylab.subplots_adjust(left=0.05, right=0.98, top=0.96, bottom=0.06)
def PlotModel(self, score, episode, save=True):
self.scores_.append(score)
self.episodes_.append(episode)
self.average_.append(sum(self.scores_[-50:]) / len(self.scores_[-50:]))
if str(episode)[-2:] == "00": # much faster than episode % 100
pylab.plot(self.episodes_, self.scores_, 'b')
pylab.plot(self.episodes_, self.average_, 'r')
pylab.ylabel('Score', fontsize=18)
pylab.xlabel('Steps', fontsize=18)
try:
pylab.grid(True)
pylab.savefig(self.env_name + ".png")
except OSError:
pass
# saving best models
if self.average_[-1] >= self.max_average and save:
self.max_average = self.average_[-1]
self.save()
SAVING = "SAVING"
# decreaate learning rate every saved model
#self.lr *= 0.99
#K.set_value(self.Actor.Actor.optimizer.learning_rate, self.lr)
#K.set_value(self.Critic.Critic.optimizer.learning_rate, self.lr)
else:
SAVING = ""
return self.average_[-1], SAVING
def run_batch(self):
state = self.env.reset()
state = np.reshape(state, [1, self.state_size[0]])
done, score, SAVING = False, 0, ''
while True:
# Instantiate or reset games memory
states, next_states, actions, rewards, dones, logp_ts = [], [], [], [], [], []
for t in range(self.Training_batch):
self.env.render()
# Actor picks an action
action, logp_t = self.act(state)
# Retrieve new state, reward, and whether the state is terminal
next_state, reward, done, _ = self.env.step(action[0])
# Memorize (state, next_states, action, reward, done, logp_ts) for training
states.append(state)
next_states.append(
np.reshape(next_state, [1, self.state_size[0]]))
actions.append(action)
rewards.append(reward)
dones.append(done)
logp_ts.append(logp_t[0])
# Update current state shape
state = np.reshape(next_state, [1, self.state_size[0]])
score += reward
if done:
self.episode += 1
average, SAVING = self.PlotModel(score, self.episode)
print(
"episode: {}/{}, score: {}, average: {:.2f} {}".format(
self.episode, self.EPISODES, score, average,
SAVING))
self.writer.add_scalar(f'Workers:{1}/score_per_episode',
score, self.episode)
self.writer.add_scalar(f'Workers:{1}/learning_rate',
self.lr, self.episode)
self.writer.add_scalar(f'Workers:{1}/average_score',
average, self.episode)
state, done, score, SAVING = self.env.reset(), False, 0, ''
state = np.reshape(state, [1, self.state_size[0]])
self.replay(states, actions, rewards, dones, next_states, logp_ts)
if self.episode >= self.EPISODES:
break
self.env.close()
def run_multiprocesses(self, num_worker=4):
works, parent_conns, child_conns = [], [], []
for idx in range(num_worker):
parent_conn, child_conn = Pipe()
work = Environment(idx, child_conn, self.env_name,
self.state_size[0], self.action_size, True)
work.start()
works.append(work)
parent_conns.append(parent_conn)
child_conns.append(child_conn)
states = [[] for _ in range(num_worker)]
next_states = [[] for _ in range(num_worker)]
actions = [[] for _ in range(num_worker)]
rewards = [[] for _ in range(num_worker)]
dones = [[] for _ in range(num_worker)]
logp_ts = [[] for _ in range(num_worker)]
score = [0 for _ in range(num_worker)]
state = [0 for _ in range(num_worker)]
for worker_id, parent_conn in enumerate(parent_conns):
state[worker_id] = parent_conn.recv()
while self.episode < self.EPISODES:
# get batch of action's and log_pi's
action, logp_pi = self.act(
np.reshape(state, [num_worker, self.state_size[0]]))
for worker_id, parent_conn in enumerate(parent_conns):
parent_conn.send(action[worker_id])
actions[worker_id].append(action[worker_id])
logp_ts[worker_id].append(logp_pi[worker_id])
for worker_id, parent_conn in enumerate(parent_conns):
next_state, reward, done, _ = parent_conn.recv()
states[worker_id].append(state[worker_id])
next_states[worker_id].append(next_state)
rewards[worker_id].append(reward)
dones[worker_id].append(done)
state[worker_id] = next_state
score[worker_id] += reward
if done:
average, SAVING = self.PlotModel(score[worker_id],
self.episode)
print(
"episode: {}/{}, worker: {}, score: {}, average: {:.2f} {}"
.format(self.episode, self.EPISODES, worker_id,
score[worker_id], average, SAVING))
self.writer.add_scalar(
f'Workers:{num_worker}/score_per_episode',
score[worker_id], self.episode)
self.writer.add_scalar(
f'Workers:{num_worker}/learning_rate', self.lr,
self.episode)
self.writer.add_scalar(
f'Workers:{num_worker}/average_score', average,
self.episode)
score[worker_id] = 0
if (self.episode < self.EPISODES):
self.episode += 1
for worker_id in range(num_worker):
if len(states[worker_id]) >= self.Training_batch:
self.replay(states[worker_id], actions[worker_id],
rewards[worker_id], dones[worker_id],
next_states[worker_id], logp_ts[worker_id])
states[worker_id] = []
next_states[worker_id] = []
actions[worker_id] = []
rewards[worker_id] = []
dones[worker_id] = []
logp_ts[worker_id] = []
# terminating processes after a while loop
works.append(work)
for work in works:
work.terminate()
print('TERMINATED:', work)
work.join()
def test(self, test_episodes=100): #evaluate
self.load()
for e in range(101):
state = self.env.reset()
state = np.reshape(state, [1, self.state_size[0]])
done = False
score = 0
while not done:
self.env.render()
action = self.Actor.predict(state)[0]
state, reward, done, _ = self.env.step(action)
state = np.reshape(state, [1, self.state_size[0]])
score += reward
if done:
average, SAVING = self.PlotModel(score, e, save=False)
print("episode: {}/{}, score: {}, average{}".format(
e, test_episodes, score, average))
break
self.env.close()
if __name__ == "__main__":
# newest gym fixed bugs in 'BipedalWalker-v2' and now it's called 'BipedalWalker-v3'
env_name = 'BipedalWalker-v3'
agent = PPOAgent(env_name)
#agent.run_batch() # train as PPO
agent.run_multiprocesses(
num_worker=16) # train PPO multiprocessed (fastest)
# agent.test() | [
"numpy.clip",
"tensorflow.keras.backend.log",
"pylab.subplots_adjust",
"numpy.hstack",
"pylab.savefig",
"numpy.log",
"pylab.xlabel",
"tensorflow.keras.layers.Dense",
"copy.deepcopy",
"gym.make",
"tensorflow.keras.layers.Input",
"numpy.mean",
"numpy.reshape",
"pylab.plot",
"tensorflow.ker... | [((710, 748), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (746, 748), True, 'import tensorflow as tf\n'), ((1124, 1175), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1168, 1175), True, 'import tensorflow as tf\n'), ((13822, 13851), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(18, 9)'}), '(figsize=(18, 9))\n', (13834, 13851), False, 'import pylab\n'), ((13856, 13923), 'pylab.subplots_adjust', 'pylab.subplots_adjust', ([], {'left': '(0.05)', 'right': '(0.98)', 'top': '(0.96)', 'bottom': '(0.06)'}), '(left=0.05, right=0.98, top=0.96, bottom=0.06)\n', (13877, 13923), False, 'import pylab\n'), ((1237, 1292), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), '(gpus[0], True)\n', (1277, 1292), True, 'import tensorflow as tf\n'), ((1623, 1641), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1631, 1641), False, 'import gym\n'), ((1929, 1968), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size]'], {}), '(state, [1, self.state_size])\n', (1939, 1968), True, 'import numpy as np\n'), ((2572, 2590), 'tensorflow.keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (2577, 2590), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((3178, 3215), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'X_input', 'outputs': 'output'}), '(inputs=X_input, outputs=output)\n', (3183, 3215), False, 'from tensorflow.keras.models import Model, load_model\n'), ((4185, 4210), 'tensorflow.keras.backend.exp', 'K.exp', (['(logp - logp_old_ph)'], {}), '(logp - logp_old_ph)\n', (4190, 4210), True, 'from tensorflow.keras import backend as K\n'), ((4257, 4357), 'tensorflow.where', 'tf.where', (['(advantages > 0)', '((1.0 + LOSS_CLIPPING) * advantages)', '((1.0 - LOSS_CLIPPING) * advantages)'], {}), '(advantages > 0, (1.0 + LOSS_CLIPPING) * advantages, (1.0 -\n LOSS_CLIPPING) * advantages)\n', (4265, 4357), True, 'import tensorflow as tf\n'), ((4768, 4790), 'tensorflow.keras.backend.sum', 'K.sum', (['pre_sum'], {'axis': '(1)'}), '(pre_sum, axis=1)\n', (4773, 4790), True, 'from tensorflow.keras import backend as K\n'), ((4969, 4987), 'tensorflow.keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (4974, 4987), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((5009, 5026), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5014, 5026), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((5556, 5606), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[X_input, old_values]', 'outputs': 'value'}), '(inputs=[X_input, old_values], outputs=value)\n', (5561, 5606), False, 'from tensorflow.keras.models import Model, load_model\n'), ((6609, 6627), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (6617, 6627), False, 'import gym\n'), ((8411, 8431), 'numpy.exp', 'np.exp', (['self.log_std'], {}), '(self.log_std)\n', (8417, 8431), True, 'import numpy as np\n'), ((8784, 8810), 'numpy.clip', 'np.clip', (['action', 'low', 'high'], {}), '(action, low, high)\n', (8791, 8810), True, 'import numpy as np\n'), ((9220, 9243), 'numpy.sum', 'np.sum', (['pre_sum'], {'axis': '(1)'}), '(pre_sum, axis=1)\n', (9226, 9243), True, 'import numpy as np\n'), ((9535, 9556), 'numpy.zeros_like', 'np.zeros_like', (['reward'], {}), '(reward)\n', (9548, 9556), True, 'import numpy as np\n'), ((9732, 9753), 'numpy.mean', 'np.mean', (['discounted_r'], {}), '(discounted_r)\n', (9739, 9753), True, 'import numpy as np\n'), ((10300, 10316), 'numpy.stack', 'np.stack', (['deltas'], {}), '(deltas)\n', (10308, 10316), True, 'import numpy as np\n'), ((10332, 10353), 'copy.deepcopy', 'copy.deepcopy', (['deltas'], {}), '(deltas)\n', (10345, 10353), False, 'import copy\n'), ((10802, 10819), 'numpy.vstack', 'np.vstack', (['states'], {}), '(states)\n', (10811, 10819), True, 'import numpy as np\n'), ((10842, 10864), 'numpy.vstack', 'np.vstack', (['next_states'], {}), '(next_states)\n', (10851, 10864), True, 'import numpy as np\n'), ((10883, 10901), 'numpy.vstack', 'np.vstack', (['actions'], {}), '(actions)\n', (10892, 10901), True, 'import numpy as np\n'), ((10920, 10938), 'numpy.vstack', 'np.vstack', (['logp_ts'], {}), '(logp_ts)\n', (10929, 10938), True, 'import numpy as np\n'), ((11905, 11946), 'numpy.hstack', 'np.hstack', (['[advantages, actions, logp_ts]'], {}), '([advantages, actions, logp_ts])\n', (11914, 11946), True, 'import numpy as np\n'), ((12864, 12887), 'numpy.mean', 'np.mean', (['(logp_ts - logp)'], {}), '(logp_ts - logp)\n', (12871, 12887), True, 'import numpy as np\n'), ((12909, 12923), 'numpy.mean', 'np.mean', (['(-logp)'], {}), '(-logp)\n', (12916, 12923), True, 'import numpy as np\n'), ((15152, 15194), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size[0]]'], {}), '(state, [1, self.state_size[0]])\n', (15162, 15194), True, 'import numpy as np\n'), ((2237, 2276), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size]'], {}), '(state, [1, self.state_size])\n', (2247, 2276), True, 'import numpy as np\n'), ((3109, 3152), 'tensorflow.keras.layers.Dense', 'Dense', (['self.action_space'], {'activation': '"""tanh"""'}), "(self.action_space, activation='tanh')\n", (3114, 3152), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((4574, 4618), 'numpy.ones', 'np.ones', (['self.action_space'], {'dtype': 'np.float32'}), '(self.action_space, dtype=np.float32)\n', (4581, 4618), True, 'import numpy as np\n'), ((5504, 5529), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'None'}), '(1, activation=None)\n', (5509, 5529), False, 'from tensorflow.keras.layers import Input, Dense\n'), ((8348, 8391), 'numpy.ones', 'np.ones', (['self.action_size'], {'dtype': 'np.float32'}), '(self.action_size, dtype=np.float32)\n', (8355, 8391), True, 'import numpy as np\n'), ((9805, 9825), 'numpy.std', 'np.std', (['discounted_r'], {}), '(discounted_r)\n', (9811, 9825), True, 'import numpy as np\n'), ((10613, 10628), 'numpy.vstack', 'np.vstack', (['gaes'], {}), '(gaes)\n', (10622, 10628), True, 'import numpy as np\n'), ((10630, 10647), 'numpy.vstack', 'np.vstack', (['target'], {}), '(target)\n', (10639, 10647), True, 'import numpy as np\n'), ((11303, 11321), 'numpy.squeeze', 'np.squeeze', (['values'], {}), '(values)\n', (11313, 11321), True, 'import numpy as np\n'), ((11366, 11389), 'numpy.squeeze', 'np.squeeze', (['next_values'], {}), '(next_values)\n', (11376, 11389), True, 'import numpy as np\n'), ((12736, 12779), 'numpy.ones', 'np.ones', (['self.action_size'], {'dtype': 'np.float32'}), '(self.action_size, dtype=np.float32)\n', (12743, 12779), True, 'import numpy as np\n'), ((13017, 13047), 'numpy.sum', 'np.sum', (["a_loss.history['loss']"], {}), "(a_loss.history['loss'])\n", (13023, 13047), True, 'import numpy as np\n'), ((13192, 13222), 'numpy.sum', 'np.sum', (["c_loss.history['loss']"], {}), "(c_loss.history['loss'])\n", (13198, 13222), True, 'import numpy as np\n'), ((14215, 14260), 'pylab.plot', 'pylab.plot', (['self.episodes_', 'self.scores_', '"""b"""'], {}), "(self.episodes_, self.scores_, 'b')\n", (14225, 14260), False, 'import pylab\n'), ((14273, 14319), 'pylab.plot', 'pylab.plot', (['self.episodes_', 'self.average_', '"""r"""'], {}), "(self.episodes_, self.average_, 'r')\n", (14283, 14319), False, 'import pylab\n'), ((14332, 14366), 'pylab.ylabel', 'pylab.ylabel', (['"""Score"""'], {'fontsize': '(18)'}), "('Score', fontsize=18)\n", (14344, 14366), False, 'import pylab\n'), ((14379, 14413), 'pylab.xlabel', 'pylab.xlabel', (['"""Steps"""'], {'fontsize': '(18)'}), "('Steps', fontsize=18)\n", (14391, 14413), False, 'import pylab\n'), ((17536, 17542), 'multiprocessing.Pipe', 'Pipe', ([], {}), '()\n', (17540, 17542), False, 'from multiprocessing import Process, Pipe\n'), ((21198, 21240), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size[0]]'], {}), '(state, [1, self.state_size[0]])\n', (21208, 21240), True, 'import numpy as np\n'), ((2364, 2403), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size]'], {}), '(state, [1, self.state_size])\n', (2374, 2403), True, 'import numpy as np\n'), ((4427, 4444), 'tensorflow.keras.backend.minimum', 'K.minimum', (['p1', 'p2'], {}), '(p1, p2)\n', (4436, 4444), True, 'from tensorflow.keras import backend as K\n'), ((4735, 4751), 'tensorflow.keras.backend.log', 'K.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4740, 4751), True, 'from tensorflow.keras import backend as K\n'), ((5882, 5936), 'tensorflow.keras.backend.clip', 'K.clip', (['(y_pred - values)', '(-LOSS_CLIPPING)', 'LOSS_CLIPPING'], {}), '(y_pred - values, -LOSS_CLIPPING, LOSS_CLIPPING)\n', (5888, 5936), True, 'from tensorflow.keras import backend as K\n'), ((6354, 6383), 'numpy.zeros', 'np.zeros', (['(state.shape[0], 1)'], {}), '((state.shape[0], 1))\n', (6362, 6383), True, 'import numpy as np\n'), ((8668, 8713), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high'], {'size': 'pred.shape'}), '(low, high, size=pred.shape)\n', (8685, 8713), True, 'import numpy as np\n'), ((9186, 9203), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (9192, 9203), True, 'import numpy as np\n'), ((14447, 14463), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (14457, 14463), False, 'import pylab\n'), ((14480, 14517), 'pylab.savefig', 'pylab.savefig', (["(self.env_name + '.png')"], {}), "(self.env_name + '.png')\n", (14493, 14517), False, 'import pylab\n'), ((16178, 16225), 'numpy.reshape', 'np.reshape', (['next_state', '[1, self.state_size[0]]'], {}), '(next_state, [1, self.state_size[0]])\n', (16188, 16225), True, 'import numpy as np\n'), ((18485, 18536), 'numpy.reshape', 'np.reshape', (['state', '[num_worker, self.state_size[0]]'], {}), '(state, [num_worker, self.state_size[0]])\n', (18495, 18536), True, 'import numpy as np\n'), ((21491, 21533), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size[0]]'], {}), '(state, [1, self.state_size[0]])\n', (21501, 21533), True, 'import numpy as np\n'), ((2730, 2771), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2758, 2771), True, 'import tensorflow as tf\n'), ((2903, 2944), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2931, 2944), True, 'import tensorflow as tf\n'), ((3046, 3087), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (3074, 3087), True, 'import tensorflow as tf\n'), ((5126, 5167), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (5154, 5167), True, 'import tensorflow as tf\n'), ((5299, 5340), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (5327, 5340), True, 'import tensorflow as tf\n'), ((5442, 5483), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (5470, 5483), True, 'import tensorflow as tf\n'), ((6123, 6150), 'tensorflow.keras.backend.maximum', 'K.maximum', (['v_loss1', 'v_loss2'], {}), '(v_loss1, v_loss2)\n', (6132, 6150), True, 'from tensorflow.keras import backend as K\n'), ((15905, 15952), 'numpy.reshape', 'np.reshape', (['next_state', '[1, self.state_size[0]]'], {}), '(next_state, [1, self.state_size[0]])\n', (15915, 15952), True, 'import numpy as np\n'), ((17140, 17182), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size[0]]'], {}), '(state, [1, self.state_size[0]])\n', (17150, 17182), True, 'import numpy as np\n'), ((4692, 4706), 'tensorflow.keras.backend.exp', 'K.exp', (['log_std'], {}), '(log_std)\n', (4697, 4706), True, 'from tensorflow.keras import backend as K\n'), ((9142, 9157), 'numpy.exp', 'np.exp', (['log_std'], {}), '(log_std)\n', (9148, 9157), True, 'import numpy as np\n')] |
"""
Developed by <NAME>
Algorithm module
"""
import zdt3
import random
import numpy as np
import parsing
class MOEC:
# Evaluaciones: N x G <= 4000 ó 10000
# N: n_sp
# G: generations
def __init__(self,n_sp,generations,neighbourhood,de_F,de_CR,de_SIG,crossover_mode,path_to_file,const_mode=None,problem=zdt3.ZDT3(),weights=False):
self.n_sp = n_sp
self.problem = problem
self.generations = generations
self.de_F = de_F
self.de_CR = de_CR
self.de_SIG = de_SIG
self.const = const_mode
self.crossover = crossover_mode
self.weights = weights if weights else np.array([1 for _ in range(problem.n_con)])
self.EP = list()
self.lambda_vectors = [np.array([n/(self.n_sp-1),1-(n/(self.n_sp-1))]) for n in range(self.n_sp)]
self.neighbours = {}
for i in range(self.n_sp):
self.neighbours[i] = self.get_neighbours(self.lambda_vectors[i],list(range(self.n_sp)),self.lambda_vectors,neighbourhood)
self.population = [np.random.uniform(self.problem.min_real,self.problem.max_real) for _ in range(self.n_sp)]
performances = [self.problem.func(s) for s in self.population]
self.z_star = np.array([min([ind[o] for ind in performances]) for o in range(self.problem.n_obj)])
self.initialize_EP(performances)
self.parser = parsing.Parser(path_to_file)
def get_neighbours(self,v1,sps,lambdas,T):
return sorted(sps, key = lambda v: np.linalg.norm(v1-lambdas[v]))[:T]
def run(self):
for g in range(self.generations):
for i in range(self.n_sp):
y = self.recombination(i,g)
y_performance = self.problem.func(y)
self.update_EP(y,y_performance)
for j in range(self.problem.n_obj):
if(y_performance[j]<=self.z_star[j]):
self.z_star[j]=y_performance[j]
for j in self.neighbours[i]:
if(self.fitness(y,self.population[j],self.lambda_vectors[j])):
self.population[j]=y
self.parser.write_popm(self.population,self.problem)
print("Finished")
return self.population
def recombination(self,i,gen=None):
# todo el vecindario de x_j puede salir, incluyendo x_j, que PUEDE SALIR, pero no tiene por que
# tambien vale mi version, PROBAR
# CON REEMPLAZAMIENTO
# incluye a las soluciones de EP
differential_sp = np.random.choice(self.neighbours[i]+list(range(len(self.EP))),size=3)
vector_pool = self.population + [f[0] for f in self.EP]
vectors = [vector_pool[v] for v in differential_sp]
if(self.crossover == "AHX"):
p1 = 0.7*(1/(1+np.exp(20*((gen/self.generations)-0.25))))+0.3
p2 = 0.8*(1/(1+np.exp(-20*((gen/self.generations)-0.5))))+0.1
if(random.random()<self.de_CR):
de_result=np.zeros(self.problem.n_real)
for i in range(self.problem.n_real):
if(random.random()<p1):
b_rand = random.random()
beta = (2*b_rand)**(1/(1+self.de_SIG)) if b_rand <= 0.5 else (1/(2-2*b_rand))**(1/(1+self.de_SIG))
c1 = 0.5*((1+beta)*vectors[0][i]+(1-beta)*vectors[1][i])
c2 = 0.5*((1-beta)*vectors[0][i]+(1+beta)*vectors[1][i])
v_i = vectors[0][i]+self.de_F*(vectors[1][i]-vectors[2][i])
if(random.random()<p2):
if(random.random()<0.5):
de_result[i]=c2
else:
de_result[i]=c1
else:
de_result[i]=v_i
else:
de_result[i]=vectors[1][i]
else:
de_result = vectors[0]
elif(self.crossover == "DE"):
de_result = vectors[0]+self.de_F*(vectors[1]-vectors[2])
# genero un nuevo vector donde, con una cierta probabilidad CR, se copia cada posicion del vector de
# population[i] o de de_result
de_result = np.array([de_result[p] if random.random()>= self.de_CR else self.population[i][p] for p in range(len(de_result))])
#con probabilidad 1/p significa que estadisticamente solo cambiara UNO de los elementos
sigma = lambda p: (self.problem.max_real[p]-self.problem.min_real[p])/self.de_SIG
de_result = np.array([de_result[p] + np.random.normal(0,sigma(p)) if random.random()>= 1/self.problem.n_real else de_result[p]
for p in range(len(de_result))])
else:
de_result = self.population[i]
for e in range(len(de_result)):
if(de_result[e] > self.problem.max_real[e]):
de_result[e]=self.problem.max_real[e]
if(de_result[e] < self.problem.min_real[e]):
de_result[e]=self.problem.min_real[e]
return de_result
def fitness(self,x,y,lamb):
if not self.const:
return self.tchebycheff(x,lamb) <= self.tchebycheff(y,lamb)
elif(self.const == "penalty"):
return self.tchebycheff(x,lamb) + self.penalty(x) <= self.tchebycheff(y,lamb) + self.penalty(y)
else:
return self.constraint_dominance(x,y,lamb)
def tchebycheff(self,x,lamb):
return max(lamb[i]*abs(self.problem.func(x)[i]-self.z_star[i]) for i in range(self.problem.n_obj))
def penalty(self,x):
v = self.problem.const(x)
return sum(self.weights*v)
def constraint_dominance(self,s1,s2,lamb):
if(self.penalty(s1) < self.penalty(s2)):
return True
else:
return self.tchebycheff(s1,lamb) <= self.tchebycheff(s2,lamb)
def initialize_EP(self,performances):
for s in range(len(self.population)):
if(all([performances[s][0]<performances[c][0] or performances[s][1]<performances[c][1] for c in range(len(self.population)) if c!=s])):
self.EP.append((self.population[s],performances[s]))
def update_EP(self,y,y_p):
self.EP = [e for e in self.EP if not (y_p[0] < e[1][0] and y_p[1] < e[1][1])]
if(all([y_p[0]<e[1][0] or y_p[1]<e[1][1] for e in self.EP])):
self.EP.append((y,y_p))
| [
"numpy.linalg.norm",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"parsing.Parser",
"random.random",
"zdt3.ZDT3"
] | [((320, 331), 'zdt3.ZDT3', 'zdt3.ZDT3', ([], {}), '()\n', (329, 331), False, 'import zdt3\n'), ((1378, 1406), 'parsing.Parser', 'parsing.Parser', (['path_to_file'], {}), '(path_to_file)\n', (1392, 1406), False, 'import parsing\n'), ((745, 801), 'numpy.array', 'np.array', (['[n / (self.n_sp - 1), 1 - n / (self.n_sp - 1)]'], {}), '([n / (self.n_sp - 1), 1 - n / (self.n_sp - 1)])\n', (753, 801), True, 'import numpy as np\n'), ((1047, 1110), 'numpy.random.uniform', 'np.random.uniform', (['self.problem.min_real', 'self.problem.max_real'], {}), '(self.problem.min_real, self.problem.max_real)\n', (1064, 1110), True, 'import numpy as np\n'), ((2923, 2938), 'random.random', 'random.random', ([], {}), '()\n', (2936, 2938), False, 'import random\n'), ((2978, 3007), 'numpy.zeros', 'np.zeros', (['self.problem.n_real'], {}), '(self.problem.n_real)\n', (2986, 3007), True, 'import numpy as np\n'), ((1499, 1530), 'numpy.linalg.norm', 'np.linalg.norm', (['(v1 - lambdas[v])'], {}), '(v1 - lambdas[v])\n', (1513, 1530), True, 'import numpy as np\n'), ((3084, 3099), 'random.random', 'random.random', ([], {}), '()\n', (3097, 3099), False, 'import random\n'), ((3138, 3153), 'random.random', 'random.random', ([], {}), '()\n', (3151, 3153), False, 'import random\n'), ((2786, 2830), 'numpy.exp', 'np.exp', (['(20 * (gen / self.generations - 0.25))'], {}), '(20 * (gen / self.generations - 0.25))\n', (2792, 2830), True, 'import numpy as np\n'), ((2860, 2904), 'numpy.exp', 'np.exp', (['(-20 * (gen / self.generations - 0.5))'], {}), '(-20 * (gen / self.generations - 0.5))\n', (2866, 2904), True, 'import numpy as np\n'), ((3550, 3565), 'random.random', 'random.random', ([], {}), '()\n', (3563, 3565), False, 'import random\n'), ((3602, 3617), 'random.random', 'random.random', ([], {}), '()\n', (3615, 3617), False, 'import random\n'), ((4278, 4293), 'random.random', 'random.random', ([], {}), '()\n', (4291, 4293), False, 'import random\n'), ((4643, 4658), 'random.random', 'random.random', ([], {}), '()\n', (4656, 4658), False, 'import random\n')] |
import pdb
import numpy as np
import theano as th
import theano.tensor as T
from objective import kld
import distributions
mu = th.shared(value = 15.0, name='mu', borrow=True) # Initial value of mu
sigma = th.shared(value = 2.0, name='sigma', borrow=True) # Initial value of sigma
learning_rate = 0.01
X = np.linspace(-10,20,2000) # NOTE !!!! remember: (next line)
# that (low,high) must include initial values of mu defined above.
qx = distributions.qx(mu=mu, sigma=sigma) # gaussian
px = distributions.px(m1=0,m2=10) # mix gaussian
loss = kld(px,qx,X)
#loss = kld(qx,px,X)
g_mu = th.grad(loss, mu)
g_sig = th.grad(loss, sigma)
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip([mu,sigma],[g_mu, g_sig])
]
f = th.function([], [loss, mu, sigma], updates=updates)
for i in range(1000):
loss,mu_val,sigma_val = f()
if i % 100:
print("Epoch %0.5d , KLD = %f" % (i,loss))
print("mu = {}, sigma = {}".format(mu_val, sigma_val))
print("Plotting px {}".format(px.name))
#px.plot()
fig,plt=px.plot(get_fig=True)
plt.hold(True)
print("Plotting qx {}".format(qx.name))
qx = distributions.qx(mu=mu_val, sigma=sigma_val, name="optimized")
qx.plot()
| [
"distributions.px",
"theano.shared",
"theano.function",
"objective.kld",
"distributions.qx",
"numpy.linspace",
"theano.grad"
] | [((129, 174), 'theano.shared', 'th.shared', ([], {'value': '(15.0)', 'name': '"""mu"""', 'borrow': '(True)'}), "(value=15.0, name='mu', borrow=True)\n", (138, 174), True, 'import theano as th\n'), ((208, 255), 'theano.shared', 'th.shared', ([], {'value': '(2.0)', 'name': '"""sigma"""', 'borrow': '(True)'}), "(value=2.0, name='sigma', borrow=True)\n", (217, 255), True, 'import theano as th\n'), ((311, 337), 'numpy.linspace', 'np.linspace', (['(-10)', '(20)', '(2000)'], {}), '(-10, 20, 2000)\n', (322, 337), True, 'import numpy as np\n'), ((443, 479), 'distributions.qx', 'distributions.qx', ([], {'mu': 'mu', 'sigma': 'sigma'}), '(mu=mu, sigma=sigma)\n', (459, 479), False, 'import distributions\n'), ((496, 525), 'distributions.px', 'distributions.px', ([], {'m1': '(0)', 'm2': '(10)'}), '(m1=0, m2=10)\n', (512, 525), False, 'import distributions\n'), ((548, 562), 'objective.kld', 'kld', (['px', 'qx', 'X'], {}), '(px, qx, X)\n', (551, 562), False, 'from objective import kld\n'), ((590, 607), 'theano.grad', 'th.grad', (['loss', 'mu'], {}), '(loss, mu)\n', (597, 607), True, 'import theano as th\n'), ((616, 636), 'theano.grad', 'th.grad', (['loss', 'sigma'], {}), '(loss, sigma)\n', (623, 636), True, 'import theano as th\n'), ((771, 822), 'theano.function', 'th.function', (['[]', '[loss, mu, sigma]'], {'updates': 'updates'}), '([], [loss, mu, sigma], updates=updates)\n', (782, 822), True, 'import theano as th\n'), ((1142, 1204), 'distributions.qx', 'distributions.qx', ([], {'mu': 'mu_val', 'sigma': 'sigma_val', 'name': '"""optimized"""'}), "(mu=mu_val, sigma=sigma_val, name='optimized')\n", (1158, 1204), False, 'import distributions\n')] |
"""
Test utilities Unit tests, part of the glGA SDK ECSS
glGA SDK v2021.0.5 ECSS (Entity Component System in a Scenegraph)
@Coopyright 2020-2021 <NAME>
"""
import unittest
import numpy as np
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
from pyglGA.ECSS.utilities import *
class TestUtilities(unittest.TestCase):
""" main class to test CG utilities and convenience functions """
def test_vec(self):
"""
test_vec function
"""
print("\nTestUtilities:test_vec() START")
a = [1.0,0.0,0.0,1.0]
vec_a = vec(a)
np_a = np.array([1.0,0.0,0.0,1.0],dtype=np.float,order='F')
self.assertEqual(vec_a.tolist(), np_a.tolist())
np.testing.assert_array_equal(vec_a,np_a)
print(vec_a)
print(np_a)
print("TestUtilities:test_vec() END")
def test_normalised(self):
"""
test_normalised function
"""
print("\nTestUtilities:test_normalised() START")
a = [2.0,2.0,0.0,1.0]
vec_a = vec(a)
norm_vec = normalise(vec_a)
norm_a = normalise(a) # in this case the simple list will be converted to numpy array first implicitly
np_a = np.array([2.0,2.0,0.0,1.0],dtype=np.float,order='F')
norm_np = np.array([0.666667,0.666667,0.0,0.333333],dtype=np.float,order='F')
self.assertAlmostEqual(norm_vec.all(), norm_np.all())
self.assertAlmostEqual(norm_a.all(), norm_np.all())
np.testing.assert_array_almost_equal(norm_vec,norm_np,decimal=5)
np.testing.assert_array_almost_equal(norm_a,norm_np,decimal=5)
print(norm_vec)
print(norm_np)
print(norm_a)
print("TestUtilities:test_normalised() END")
def test_lerp(self):
"""Test linear interpolation between two points"""
print("\nTestUtilities:test_lerp() START")
# lerp between 0.0 to 1.0
point0 = lerp(0.0, 1.0, 0.0)
point1 = lerp(0.0, 1.0, 1.0)
pointb = lerp(0.0, 1.0, 0.5)
print(point0)
print(point1)
print(pointb)
self.assertEqual(point0, 0)
self.assertEqual(point1, 1)
self.assertEqual(pointb, 0.5)
print("\TestUtilities:test_lerp() END")
def test_identity(self):
"""
test_identity function
"""
print("\nTestUtilities:test_identity() START")
matI = identity(4)
np_i1 = np.ones((4,4))
np_i4 = np.identity(4)
np_i = np.array([
[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F')
self.assertEqual(matI.tolist(), np_i4.tolist())
self.assertEqual(matI.tolist(), np_i.tolist())
self.assertNotEqual(matI.tolist(), np_i1.tolist())
print(matI)
print(np_i)
print(np_i1)
print("TestUtilities:test_identity() END")
def test_inverse(self):
"""
test_inverse function,
https://numpy.org/doc/stable/reference/generated/numpy.linalg.inv.html
"""
print("\nTestUtilities:test_rotate() START")
mLat = np.array([
[1,0,0,1],
[0,1,0,2],
[0,0,1,3],
[0,0,0,1]
],dtype=np.float,order='F')
mLatInv = np.array([
[1,0,0,-1],
[0,1,0,-2],
[0,0,1,-3],
[0,0,0,1]
],dtype=np.float,order='F')
utilmLatInv = inverse(mLat)
np.testing.assert_array_almost_equal(utilmLatInv,mLatInv,decimal=5)
print(utilmLatInv)
print(mLatInv)
print("TestUtilities:test_inverse() END")
def test_ortho(self):
"""
test_ortho function,
tested against results from https://glm.g-truc.net/0.9.2/api/a00245.html
"""
print("\nTestUtilities:test_ortho() START")
matOrtho = ortho(-100.0, 100.0, -100.0, 100.0, 1.0, 100.0)
np_Ortho = np.array([
[0.01,0.0,0.0,0.0],
[0.0,0.01,0.0,0.0],
[0.0,0.0,-0.020202,-1.0202],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F')
self.assertAlmostEqual(matOrtho.all(), np_Ortho.all())
print(matOrtho)
print(np_Ortho)
print("TestUtilities:test_ortho() END")
def test_perspective(self):
"""
test_perspective function,
tested against results from https://glm.g-truc.net/0.9.2/api/a00245.html
"""
print("\nTestUtilities:test_perspective() START")
matPersp = perspective(90.0, 1, 0.1, 100)
np_Persp = np.array([
[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,-1.002,-0.2002],
[0.0,0.0,-1.0,0.0],
],dtype=np.float,order='F')
matPersp2 = perspective(45.0, 1.33, 0.1, 100)
np_Persp2 = np.array([
[1.815,0.0,0.0,0.0],
[0.0,2.414,0.0,0.0],
[0.0,0.0,-1.002,-0.2002],
[0.0,0.0,-1.0,0.0],
],dtype=np.float,order='F')
#self.assertAlmostEqual(matPersp.all(), np_Persp.all())
np.testing.assert_array_almost_equal(matPersp,np_Persp,decimal=5)
np.testing.assert_array_almost_equal(matPersp2,np_Persp2,decimal=3)
print(matPersp)
print(np_Persp)
print("TestUtilities:test_perspective() END")
def test_frustum(self):
"""
test_frustum function,
tested against results from https://glm.g-truc.net/0.9.2/api/a00245.html
"""
print("\nTestUtilities:test_frustum() START")
matPersp = frustum(-10.0, 10.0,-10.0,10.0, 0.1, 100)
np_Persp = np.array([
[0.01,0.0,0.0,0.0],
[0.0,0.01,0.0,0.0],
[0.0,0.0,-1.002,-0.2002],
[0.0,0.0,-1.0,0.0],
],dtype=np.float,order='F')
self.assertAlmostEqual(matPersp.all(), np_Persp.all())
print(matPersp)
print(np_Persp)
print("TestUtilities:test_frustum() END")
def test_translate(self):
"""
test_translate function,
tested against results from https://glm.g-truc.net/0.9.2/api/a00245.html
"""
print("\nTestUtilities:test_translate() START")
matTrans = translate(1.0, 2.0, 3.0)
matTrans2 = translate(vec(1.0, 2.0, 3.0))
mT = np.array([
[1.0,0.0,0.0,1.0],
[0.0,1.0,0.0,2.0],
[0.0,0.0,1.0,3.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F')
self.assertEqual(matTrans.tolist(), mT.tolist())
self.assertEqual(matTrans2.tolist(), mT.tolist())
np.testing.assert_array_equal(matTrans,mT)
np.testing.assert_array_equal(matTrans2,mT)
print(matTrans)
print(matTrans2)
print(mT)
print("TestUtilities:test_translate() END")
def test_scale(self):
"""
test_scale function,
tested against results from https://glm.g-truc.net/0.9.2/api/a00245.html
"""
print("\nTestUtilities:test_scale() START")
matTrans = scale(1.0, 2.0, 3.0)
matTrans2 = scale(vec(1.0, 2.0, 3.0))
matTrans3 = scale(10.0) #uniform scaling
mT = np.array([
[1.0,0.0,0.0,0.0],
[0.0,2.0,0.0,0.0],
[0.0,0.0,3.0,0.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F')
mT3 = np.array([
[10.0,0.0,0.0,0.0],
[0.0,10.0,0.0,0.0],
[0.0,0.0,10.0,0.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F')
self.assertEqual(matTrans.tolist(), mT.tolist())
self.assertEqual(matTrans2.tolist(), mT.tolist())
self.assertEqual(matTrans3.tolist(), mT3.tolist())
print(matTrans)
print(matTrans2)
print(mT)
print(matTrans3)
print("TestUtilities:test_scale() END")
def test_sincos(self):
"""
test_sincos is sine cosine calculation function,
tested against results from https://glm.g-truc.net/0.9.4/api/a00136.html
from GLM 0.9.5.1 radians are default and not degrees: GLM_FORCE_RADIANS
"""
print("\nTestUtilities:test_sincos() START")
cos0 = 1.0
cos45 = 0.7071067811865476
sin0 = 0.0
sin90 = 1.0
cos90 = 0.0
sine0, cosine0 = sincos(0.0)
sine45, cosine45 = sincos(45, np.pi/4)
sine90, cosine90 = sincos(90, np.pi/2)
sine90d, cosine90d = sincos(90) #cosine90 does not return pure 0
print(f' sine90: {sine90}, sin90: {sin90}')
print(f' cosine90: {cosine90}, cos90: {cos90}')
print(f' cosine90d: {cosine90d}, sine90d: {sine90d}')
print(f' sine0: {sine90}, sin0: {sin0}')
print(f' cosine0: {cosine0}, cos0: {cos0}')
print(f' cosine45: {cosine45}, cos45: {cos45}')
self.assertAlmostEqual(sine0, sin0)
self.assertAlmostEqual(sine90, sin90)
self.assertAlmostEqual(sine90d, sin90)
self.assertAlmostEqual(cosine0, cos0)
self.assertAlmostEqual(cosine90, cos90)
self.assertAlmostEqual(cosine90d, cos90)
self.assertAlmostEqual(cosine45, cos45)
self.assertAlmostEqual(sine45, cos45)
print("TestUtilities:test_sincos() END")
def test_rotate(self):
"""
test_rotate function,
tested against results from https://glm.g-truc.net/0.9.2/api/a00245.html
and theory: https://en.wikipedia.org/wiki/Rotation_matrix
"""
print("\nTestUtilities:test_rotate() START")
axis=(1.0, 1.0, 1.0)
angle = 90.0
matRot = rotate(axis, angle)
mR = np.array([
[0.333333,-0.244017,0.910684,0.0],
[0.910684,0.333333,-0.244017,0.0],
[-0.244017,0.910684,0.333333,0.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F')
#self.assertAlmostEquals(matRot.all(), mR.all(),6)
np.testing.assert_array_almost_equal(matRot,mR,decimal=6)
np.testing.assert_array_almost_equal(matRot,mR)
print(matRot)
print(mR)
print("TestUtilities:test_rotate() END")
def test_lookat(self):
"""
test_lookat function,
tested against results from
https://github.com/g-truc/glm/blob/master/glm/ext/matrix_transform.inl
and https://github.com/Zuzu-Typ/PyGLM/blob/master/wiki/function-reference/stable_extensions/matrix_transform.md#lookAt-function
"""
print("\nTestUtilities:test_rotate() START")
eye = vec(1.0, 1.0, 1.0)
target = vec(10,10,10)
up = vec(0.0, 1.0, 0.0)
matLookat = lookat(eye, target, up)
matLookat2 = lookat((0.0, 0.0, -1.0), (0.0, 0.0, 0.0), (0.0, 1.0, 0.0))
mLat = np.array([
[-0.707107,0.0,0.707107,-0.0],
[-0.408248,0.816497,-0.408248,-0.0],
[-0.57735,-0.57735,-0.577353,1.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F') #glm.lookAtLH
mLat2 = np.array([
[1.0,0.0,0.0,-0.0],
[0.0,1.0,0.0,-0.0],
[0.0,0.0,1.0,1.0],
[0.0,0.0,0.0,1.0],
],dtype=np.float,order='F') #glm.lookAtLH
#self.assertAlmostEquals(matRot.all(), mR.all(),6)
np.testing.assert_array_almost_equal(matLookat,mLat,decimal=5)
np.testing.assert_array_almost_equal(matLookat2,mLat2,decimal=5)
print(matLookat)
print(mLat)
print(matLookat2)
print(mLat2)
print("TestUtilities:test_lookat() END")
def test_quaternion(self):
"""
test_quaternion to test quaternion algebra elements from individual components or vec4
tested against scipy.spatial.transform.Rotation (by default produces normalised quaternions)
and glm.quat: NOTE GLM IS SCALAR-FIRST
"""
print("\nTestUtilities:test_quaternion() START")
quat_a = quaternion(1.0,1.0,1.0,1.0)
vec_a = vec(1.0, 1.0, 1.0)
quat_a_vec = quaternion(vec_a, 1.0)
quat_a_vec_norm = normalise(quat_a_vec)
quat_np_a = np.array([1.0,1.0,1.0,1.0],dtype=np.float,order='F')
rot = R.from_quat(quat_np_a)
quat_b = quaternion(1.0,2.0,3.0,4.0)
quat_b = normalise(quat_b)
rot_b = R.from_quat([1.0, 2.0, 3.0, 4.0])
rot_c = R.from_rotvec(np.pi/2 * np.array([0, 0, 1]))
quat_c = quaternion_from_axis_angle([0.0,0.0,1.0],90.0)
rot_euler = R.from_euler('y', [90], degrees=True)
quat_euler = quaternion_from_euler(0.0,90.0,0.0)
rot_ab = rot * rot_b; #somehow this scipy quat mult yields different results than ours or glm!!!
rot_ab_glm = R.from_quat( [0.365148, 0.182574,0.547723, -0.730297])
quat_ab = quaternion_mul(quat_a_vec_norm, quat_b)
quat_ab_matrix = quaternion_matrix(quat_ab)
quat_slerp = quaternion_slerp(quat_a_vec_norm, quat_b, 0.5)
rot_ab_glm_slerp = R.from_quat( [0.348973, 0.442316, 0.535659, 0.629002])
"""key_rots = np.array([rot, rot_b])
key_times = [0, 1]
slerp = Slerp(key_times,key_rots)
rot_ab_slerp = slerp([0.5])
"""
print("\nquat_a:\t", quat_a)
print("quat_a_vec:\t", quat_a_vec)
print("rot.as_quat():\t", rot.as_quat())
print("quat_a_vec_norm:\t",quat_a_vec_norm)
print("\nrot_b.as_quat():\t", rot_b.as_quat())
print("quat_b:\t", quat_b)
print("\nrot_c.as_quat():\t ", rot_c.as_quat())
print("quat_c:\t ", quat_c)
print("\nrot_euler.as_quat():\t ", rot_euler.as_quat())
print("quat_euler:\t ", quat_euler)
print("\nrot_ab_glm.as_quat():\t ", rot_ab_glm.as_quat())
print("quat_ab:\t ", quat_ab)
print("rot_ab.as_quat():\t ", rot_ab.as_quat())
print("\nquat_ab_matrix: ",quat_ab_matrix)
print("rot_ab_glm.as_matrix(): ",rot_ab_glm.as_matrix())
print("\nquat_slerp: ",normalise(quat_slerp))
print("rot_ab_glm_slerp: ",rot_ab_glm_slerp.as_quat())
#print("rot_ab_slerp: ",rot_ab_slerp.as_quat()) #quat slerp is untested as it gives different results than glm!
np.testing.assert_array_equal(quat_a,quat_np_a)
np.testing.assert_array_equal(quat_a,quat_a_vec)
np.testing.assert_array_equal(rot.as_quat(),quat_a_vec_norm)
np.testing.assert_array_equal(rot_b.as_quat(),quat_b)
np.testing.assert_array_equal(rot_c.as_quat(),quat_c)
np.testing.assert_array_almost_equal(rot_ab_glm.as_quat(),quat_ab)
np.testing.assert_array_almost_equal(rot_ab_glm_slerp.as_quat(),quat_slerp)
print("TestUtilities:test_quaternion() END")
if __name__ == "__main__":
unittest.main(argv=[''], verbosity=3, exit=False) | [
"numpy.identity",
"numpy.testing.assert_array_almost_equal",
"numpy.ones",
"scipy.spatial.transform.Rotation.from_euler",
"scipy.spatial.transform.Rotation.from_quat",
"numpy.array",
"unittest.main",
"numpy.testing.assert_array_equal"
] | [((15314, 15363), 'unittest.main', 'unittest.main', ([], {'argv': "['']", 'verbosity': '(3)', 'exit': '(False)'}), "(argv=[''], verbosity=3, exit=False)\n", (15327, 15363), False, 'import unittest\n'), ((637, 694), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 1.0]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([1.0, 0.0, 0.0, 1.0], dtype=np.float, order='F')\n", (645, 694), True, 'import numpy as np\n'), ((763, 805), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['vec_a', 'np_a'], {}), '(vec_a, np_a)\n', (792, 805), True, 'import numpy as np\n'), ((1264, 1321), 'numpy.array', 'np.array', (['[2.0, 2.0, 0.0, 1.0]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([2.0, 2.0, 0.0, 1.0], dtype=np.float, order='F')\n", (1272, 1321), True, 'import numpy as np\n'), ((1335, 1407), 'numpy.array', 'np.array', (['[0.666667, 0.666667, 0.0, 0.333333]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([0.666667, 0.666667, 0.0, 0.333333], dtype=np.float, order='F')\n", (1343, 1407), True, 'import numpy as np\n'), ((1542, 1608), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['norm_vec', 'norm_np'], {'decimal': '(5)'}), '(norm_vec, norm_np, decimal=5)\n', (1578, 1608), True, 'import numpy as np\n'), ((1615, 1679), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['norm_a', 'norm_np'], {'decimal': '(5)'}), '(norm_a, norm_np, decimal=5)\n', (1651, 1679), True, 'import numpy as np\n'), ((2532, 2547), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (2539, 2547), True, 'import numpy as np\n'), ((2563, 2577), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2574, 2577), True, 'import numpy as np\n'), ((2593, 2722), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]], dtype=np.float, order='F')\n", (2601, 2722), True, 'import numpy as np\n'), ((3309, 3407), 'numpy.array', 'np.array', (['[[1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3], [0, 0, 0, 1]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3], [0, 0, 0, 1]], dtype=np\n .float, order='F')\n", (3317, 3407), True, 'import numpy as np\n'), ((3475, 3576), 'numpy.array', 'np.array', (['[[1, 0, 0, -1], [0, 1, 0, -2], [0, 0, 1, -3], [0, 0, 0, 1]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1, 0, 0, -1], [0, 1, 0, -2], [0, 0, 1, -3], [0, 0, 0, 1]], dtype\n =np.float, order='F')\n", (3483, 3576), True, 'import numpy as np\n'), ((3670, 3739), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['utilmLatInv', 'mLatInv'], {'decimal': '(5)'}), '(utilmLatInv, mLatInv, decimal=5)\n', (3706, 3739), True, 'import numpy as np\n'), ((4156, 4298), 'numpy.array', 'np.array', (['[[0.01, 0.0, 0.0, 0.0], [0.0, 0.01, 0.0, 0.0], [0.0, 0.0, -0.020202, -\n 1.0202], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[0.01, 0.0, 0.0, 0.0], [0.0, 0.01, 0.0, 0.0], [0.0, 0.0, -\n 0.020202, -1.0202], [0.0, 0.0, 0.0, 1.0]], dtype=np.float, order='F')\n", (4164, 4298), True, 'import numpy as np\n'), ((4829, 4967), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, -1.002, -0.2002], [\n 0.0, 0.0, -1.0, 0.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, -1.002, -\n 0.2002], [0.0, 0.0, -1.0, 0.0]], dtype=np.float, order='F')\n", (4837, 4967), True, 'import numpy as np\n'), ((5091, 5232), 'numpy.array', 'np.array', (['[[1.815, 0.0, 0.0, 0.0], [0.0, 2.414, 0.0, 0.0], [0.0, 0.0, -1.002, -0.2002\n ], [0.0, 0.0, -1.0, 0.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1.815, 0.0, 0.0, 0.0], [0.0, 2.414, 0.0, 0.0], [0.0, 0.0, -1.002,\n -0.2002], [0.0, 0.0, -1.0, 0.0]], dtype=np.float, order='F')\n", (5099, 5232), True, 'import numpy as np\n'), ((5355, 5422), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['matPersp', 'np_Persp'], {'decimal': '(5)'}), '(matPersp, np_Persp, decimal=5)\n', (5391, 5422), True, 'import numpy as np\n'), ((5429, 5498), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['matPersp2', 'np_Persp2'], {'decimal': '(3)'}), '(matPersp2, np_Persp2, decimal=3)\n', (5465, 5498), True, 'import numpy as np\n'), ((5921, 6061), 'numpy.array', 'np.array', (['[[0.01, 0.0, 0.0, 0.0], [0.0, 0.01, 0.0, 0.0], [0.0, 0.0, -1.002, -0.2002],\n [0.0, 0.0, -1.0, 0.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[0.01, 0.0, 0.0, 0.0], [0.0, 0.01, 0.0, 0.0], [0.0, 0.0, -1.002, \n -0.2002], [0.0, 0.0, -1.0, 0.0]], dtype=np.float, order='F')\n", (5929, 6061), True, 'import numpy as np\n'), ((6622, 6751), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 2.0], [0.0, 0.0, 1.0, 3.0], [0.0, \n 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 2.0], [0.0, 0.0, 1.0, 3.0],\n [0.0, 0.0, 0.0, 1.0]], dtype=np.float, order='F')\n", (6630, 6751), True, 'import numpy as np\n'), ((6925, 6968), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['matTrans', 'mT'], {}), '(matTrans, mT)\n', (6954, 6968), True, 'import numpy as np\n'), ((6976, 7020), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['matTrans2', 'mT'], {}), '(matTrans2, mT)\n', (7005, 7020), True, 'import numpy as np\n'), ((7527, 7656), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]], dtype=np.float, order='F')\n", (7535, 7656), True, 'import numpy as np\n'), ((7712, 7845), 'numpy.array', 'np.array', (['[[10.0, 0.0, 0.0, 0.0], [0.0, 10.0, 0.0, 0.0], [0.0, 0.0, 10.0, 0.0], [0.0,\n 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[10.0, 0.0, 0.0, 0.0], [0.0, 10.0, 0.0, 0.0], [0.0, 0.0, 10.0, \n 0.0], [0.0, 0.0, 0.0, 1.0]], dtype=np.float, order='F')\n", (7720, 7845), True, 'import numpy as np\n'), ((10077, 10260), 'numpy.array', 'np.array', (['[[0.333333, -0.244017, 0.910684, 0.0], [0.910684, 0.333333, -0.244017, 0.0],\n [-0.244017, 0.910684, 0.333333, 0.0], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[0.333333, -0.244017, 0.910684, 0.0], [0.910684, 0.333333, -\n 0.244017, 0.0], [-0.244017, 0.910684, 0.333333, 0.0], [0.0, 0.0, 0.0, \n 1.0]], dtype=np.float, order='F')\n", (10085, 10260), True, 'import numpy as np\n'), ((10372, 10431), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['matRot', 'mR'], {'decimal': '(6)'}), '(matRot, mR, decimal=6)\n', (10408, 10431), True, 'import numpy as np\n'), ((10438, 10486), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['matRot', 'mR'], {}), '(matRot, mR)\n', (10474, 10486), True, 'import numpy as np\n'), ((11247, 11426), 'numpy.array', 'np.array', (['[[-0.707107, 0.0, 0.707107, -0.0], [-0.408248, 0.816497, -0.408248, -0.0],\n [-0.57735, -0.57735, -0.577353, 1.0], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[-0.707107, 0.0, 0.707107, -0.0], [-0.408248, 0.816497, -0.408248,\n -0.0], [-0.57735, -0.57735, -0.577353, 1.0], [0.0, 0.0, 0.0, 1.0]],\n dtype=np.float, order='F')\n", (11255, 11426), True, 'import numpy as np\n'), ((11503, 11635), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, -0.0], [0.0, 1.0, 0.0, -0.0], [0.0, 0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0, 1.0]]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([[1.0, 0.0, 0.0, -0.0], [0.0, 1.0, 0.0, -0.0], [0.0, 0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0, 1.0]], dtype=np.float, order='F')\n", (11511, 11635), True, 'import numpy as np\n'), ((11766, 11830), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['matLookat', 'mLat'], {'decimal': '(5)'}), '(matLookat, mLat, decimal=5)\n', (11802, 11830), True, 'import numpy as np\n'), ((11837, 11903), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['matLookat2', 'mLat2'], {'decimal': '(5)'}), '(matLookat2, mLat2, decimal=5)\n', (11873, 11903), True, 'import numpy as np\n'), ((12638, 12695), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {'dtype': 'np.float', 'order': '"""F"""'}), "([1.0, 1.0, 1.0, 1.0], dtype=np.float, order='F')\n", (12646, 12695), True, 'import numpy as np\n'), ((12705, 12727), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quat_np_a'], {}), '(quat_np_a)\n', (12716, 12727), True, 'from scipy.spatial.transform import Rotation as R\n'), ((12833, 12866), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (12844, 12866), True, 'from scipy.spatial.transform import Rotation as R\n'), ((13030, 13067), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""y"""', '[90]'], {'degrees': '(True)'}), "('y', [90], degrees=True)\n", (13042, 13067), True, 'from scipy.spatial.transform import Rotation as R\n'), ((13260, 13314), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['[0.365148, 0.182574, 0.547723, -0.730297]'], {}), '([0.365148, 0.182574, 0.547723, -0.730297])\n', (13271, 13314), True, 'from scipy.spatial.transform import Rotation as R\n'), ((13538, 13591), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['[0.348973, 0.442316, 0.535659, 0.629002]'], {}), '([0.348973, 0.442316, 0.535659, 0.629002])\n', (13549, 13591), True, 'from scipy.spatial.transform import Rotation as R\n'), ((14758, 14806), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['quat_a', 'quat_np_a'], {}), '(quat_a, quat_np_a)\n', (14787, 14806), True, 'import numpy as np\n'), ((14814, 14863), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['quat_a', 'quat_a_vec'], {}), '(quat_a, quat_a_vec)\n', (14843, 14863), True, 'import numpy as np\n'), ((12916, 12935), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (12924, 12935), True, 'import numpy as np\n')] |
import numpy as np
import mpmath
DECIMALS = 60
mpmath.mp.dps = DECIMALS
def jacobi_alpha(n):
if n < 0:
return 0
half = mpmath.mpf('.5')
return (2*n*(n+half)-mpmath.mpf('.25')) / ((2*n+mpmath.mpf('1.5'))*(2*n-half))
def jacobi_beta(n):
if n <= 0:
return 0
half = mpmath.mpf('.5')
return (n*n*(n-half)*(n-half)) / ((2*n-half)**2*(2*n-mpmath.mpf('1.5'))*(2*n+half))
_jacobi_coefs_cache = {}
def jacobi_coefs(n):
if n in _jacobi_coefs_cache:
return _jacobi_coefs_cache[n]
if n == 0:
coefs = np.array([mpmath.mpf(1)])
elif n == 1:
coefs = np.array([-jacobi_alpha(0), mpmath.mpf(1)])
else:
coefs2 = jacobi_coefs(n - 2)
coefs1 = jacobi_coefs(n - 1)
coefs = (-jacobi_alpha(n - 1)) * coefs1
coefs[1:] += coefs1[:-1]
coefs = np.append(coefs, coefs1[-1])
coefs[:n-1] -= jacobi_beta(n - 1) * coefs2
_jacobi_coefs_cache[n] = coefs
return coefs
def fmt1_erfc(t, m, low=0):
t = mpmath.mpf(t)
low = mpmath.mpf(low)
half = mpmath.mpf('.5')
b = (m + half)
e = half * mpmath.exp(-t)
e1 = half * mpmath.exp(-t * low*low) * mpmath.power(low, 2*m+1)
x = e
x1 = e1
s = e - e1
bi = b
div = 1
delta = s
while abs(delta) > 1e-30:
bi += 1
div *= t / bi
x1 *= low*low
delta = (x - x1) * div
s += delta
f = s / b
out = [f]
for i in range(m):
b -= 1
if low == 0:
f = (e + t * f) / b
else:
e1 /= low*low
f = (e - e1 + t * f) / b
out.append(f)
return np.array(out)[::-1]
def dble_fmt1_erfc(t, m, low=0):
half = .5
b = (m + half)
e = half * np.exp(-t)
e1 = half * np.exp(-t * low*low) * np.power(low, 2*m+1)
x = e
x1 = e1
s = e - e1
bi = b
div = 1
delta = s
while abs(delta) > 1e-30:
bi += 1
div *= t / bi
x1 *= low*low
delta = (x - x1) * div
s += delta
f = s / b
out = [f]
for i in range(m):
b -= 1
if low == 0:
f = (e + t * f) / b
else:
e1 /= low*low
f = (e - e1 + t * f) / b
out.append(f)
return np.array(out)[::-1]
def naive_jacobi_mus(n, t, low):
fmt = fmt1_erfc(t, n, low)
mus = []
for i in range(n):
coefs = jacobi_coefs(i)
mus.append((coefs * fmt[:i+1])[::-1].sum())
return np.array(mus)
def dble_naive_jacobi_mus(n, t, low):
fmt = dble_fmt1_erfc(t, n, low)
mus = []
for i in range(n):
coefs = jacobi_coefs(i).astype(float)
order = abs(coefs).argsort()
mus.append((coefs[order] * fmt[:i+1][order]).sum())
return np.array(mus)
def jacobi_gs(n, x):
g0 = mpmath.mpf(0)
g1 = mpmath.mpf(1)
gs = [g1]
for i in range(1, n):
g2 = (x - jacobi_alpha(i-1)) * g1 - jacobi_beta(i-1) * g0
gs.append(g2)
g1, g0 = g2, g1
return gs
def jacobi_x(n):
half = mpmath.mpf('.5')
A = (n + half) / (2 * n + half + 1)
B = (2*n + half) / (n + 1)
return A / B
def jacobi_zs(n, low, t):
x = low ** 2
gx = np.array(jacobi_gs(n+1, x))
xs = np.array([jacobi_x(i) for i in range(n)])
et = mpmath.exp(-t*x)*mpmath.sqrt(x)/(2*t)
return et * (xs*gx[:-1]-gx[1:])
def jacobi_us(n, low, t):
half = mpmath.mpf('.5')
zs = jacobi_zs(n, low, t)
ns = np.arange(n)
As = (ns + half) / (2 * ns + half + 1)
Bs = (2*ns + half) / (ns + 1)
xs = As / Bs
cs = xs * 2*ns / (2*ns + 1)
u0 = ((-1/(2*t) + 3/(4*t*(half+1)))*mpmath.exp(-t) -
(-low**3/(2*t) + 3*low/(4*t*(half+1)))*mpmath.exp(-t*low**2))
us = [u0]
for i in range(1, n):
u1 = -zs[i] - u0 * cs[i]
us.append(u1)
u0 = u1
return us
# rn = (n+.5)/t + .5 * (2*n+1) / (2*(2*n+1.5)*(2*n-.5))
def jacobi_rn_part(n):
half = mpmath.mpf('.5')
return half * (2*n+1) / (2*(2*n+mpmath.mpf('1.5'))*(2*n-half))
def jacobi_sn(n):
half = mpmath.mpf('.5')
return mpmath.mpf(n)*(2*n+1)*(2*n-1)*(n-half)/(4*(2*n+half)*(2*n-half)**2*(2*n-mpmath.mpf(1.5)))
def jacobi_mus(n, t, low):
t = mpmath.mpf(t)
low = mpmath.mpf(low)
half = mpmath.mpf('.5')
n_idx = mpmath.mpf(1) * np.arange(1, n)
rn = (2*n_idx+1)/(2*t) - (2*n_idx+1)*(half-1) / (2*(2*n_idx+half+1)*(2*n_idx+half-1))
sn = n_idx*(2*n_idx+1)*(2*n_idx-1)*(n_idx+half-1)/(4*(2*n_idx+half)*(2*n_idx+half-1)**2*(2*n_idx+half-2))
us = jacobi_us(n, low, t)
low2 = low * low
t_inv = half / t
e0 = mpmath.exp(-t) * t_inv
et = mpmath.exp(-t * low2) * low * t_inv
tt = mpmath.sqrt(t)
alpha0 = jacobi_alpha(0)
mu0 = mpmath.sqrt(mpmath.pi) / 2 / tt * (mpmath.erfc(low * tt) - mpmath.erfc(tt))
mu1 = t_inv * mu0 - e0 + et - alpha0 * mu0;
mus = [mu0, mu1]
for i in range(1, n-1):
mu2 = rn[i-1] * mu1 + sn[i-1] * mu0 + us[i-1]
mus.append(mu2)
mu1, mu0 = mu2, mu1
return np.array(mus)
def shifted_jacobi_moments(n, t, low=0):
if 0:
mus = jacobi_mus(n, t, low)
elif 1:
mus = naive_jacobi_mus(n, t, low)
else:
mus = dble_naive_jacobi_mus(n, t, low)
alphas = np.array([jacobi_alpha(i) for i in range(n)])
betas = np.array([jacobi_beta(i) for i in range(n)])
return alphas, betas, mus
def laguerre_mu0(t, low=0):
tt = mpmath.sqrt(t)
return mpmath.sqrt(mpmath.pi) / 2 / tt * (mpmath.erfc(low * tt) - mpmath.erfc(tt))
def t_scaled_laguerre(T, a, n, x=1):
l0 = mpmath.mpf(1)
l1 = mpmath.mpf(x) - (1 + a) / T
ls = [l0, l1]
for i in range(2, n):
l2 = (x - (2*i + a - 1) / T) * l1 - (i - 1) * (i + a - 1) / T**2 * l0
ls.append(l2)
l1, l0 = l2, l1
return np.array(ls)
def laguerre_moments(n, T, low=0):
half = mpmath.mpf('.5')
eta = low**2
moments = (t_scaled_laguerre(T, half, n-1) * mpmath.exp(-T) / (-2 * T) -
t_scaled_laguerre(T, half, n-1, eta) * mpmath.sqrt(eta) * mpmath.exp(-T * eta) / (-2 * T))
moments = np.append(laguerre_mu0(T, low), moments)
idx = mpmath.mpf(1) * np.arange(1, n)
alpha = (idx * 4 - 3) / (2 * T)
beta = (idx - 1) * (idx * 2 - 3) / (2 * T**2)
return alpha, beta, moments
def flocke_jacobi_moments(n, T):
import scipy.special
# Flocke's recipe JCP, 131, 064107
mu1 = 1
mu2 = 0
moments = [mu2, mu1]
additive_for_dp = 20
# Miller algorithm
for i in reversed(range(1, n+1+additive_for_dp)):
r = (2 * i + 1) / (2*T) + (2 * i + 1) / ((4 * i - 1) * (4 * i + 3))
s = (2 * i * (2 * i + 1) * (2 * i - 1)**2 /
((4 * i - 3) * (4 * i + 1) * (4 * i - 1)**2))
mu0 = (mu2 - r * mu1) / s
moments.append(mu0)
mu1, mu2 = mu0, mu1
#if abs(mu0) > 1e8:
# # TODO: scale down moments
# raise RuntimeError
moments = np.array(moments)[::-1]
tt = np.sqrt(T)
fmt0 = np.pi**.5/2. / tt * scipy.special.erf(tt)
moments = moments[:n] * (fmt0 / mu0)
idx = np.arange(n - 1)
alpha = (2 * idx * (idx + .5) - .25) / ((2 * idx + .5)**2 - 1)
beta = (idx**2 * (idx - .5)**2) / ((2 * idx - .5)**2 * (2*idx-1.5) * (2*idx+.5))
return alpha, beta, moments
def wheeler(n, alpha, beta, moments):
sig_m = moments
sig_0 = moments
a0 = mpmath.mpf(alpha[0]) + moments[1] / moments[0]
b0 = mpmath.mpf(0)
val_a = [a0]
val_b = [b0]
for k in range(1, n):
nc = 2 * n - 2 * k
sig_k = (sig_0[2:2+nc] - (a0 - alpha[k:k+nc]) * sig_0[1:1+nc] -
b0 * sig_m[2:2+nc] + beta[k:k+nc] * sig_0[:nc])
a1 = alpha[k] - sig_0[1] / sig_0[0] + sig_k[1] / sig_k[0]
b1 = sig_k[0] / sig_0[0]
val_a.append(a1)
val_b.append(b1)
a0, b0 = a1, b1
sig_0, sig_m = sig_k, sig_0
return np.array(val_a), np.array(val_b)
def roots_and_weights_partial(n, alpha, beta, moments):
a, b = wheeler(n, alpha, beta, moments)
Tmat = np.diag(a.astype(float))
idx = np.arange(n - 1)
Tmat[idx, idx+1] = Tmat[idx+1, idx] = b[1:].astype(float)**.5
roots, c = np.linalg.eigh(Tmat)
weights = c[0]**2 * float(moments[0])
return roots, weights
def roots_and_weights(n, x, low=0):
x = mpmath.mpf(x)
low = mpmath.mpf(low)
if x > 20:
alpha, beta, moments = laguerre_moments(n*2, x, low)
#elif low == 0:
# alpha, beta, moments = flocke_jacobi_moments(n*2, x)
else:
alpha, beta, moments = shifted_jacobi_moments(n*2, x, low)
roots, weights = roots_and_weights_partial(n, alpha, beta, moments)
roots = roots / (1 - roots)
return roots, weights
if __name__ == '__main__':
#for i in range(64):
# mpmath.nprint(jacobi_alpha(i), 36)
# mpmath.nprint(jacobi_beta(i), 36)
# mpmath.nprint(jacobi_x(i), 17)
# mpmath.nprint(2*i / (2*i + 1) * jacobi_x(i), 17)
# mpmath.nprint(jacobi_rn_part(i+1), 36)
# mpmath.nprint(jacobi_sn(i+1), 36)
#gs = jacobi_gs(49, 1)
#for i in gs:
# mpmath.nprint(i, 17)
#for i in range(64):
# print(f'// n = {i}')
# for c in jacobi_coefs(i):
# mpmath.nprint(c, 36)
#for i in range(64):
# cs = jacobi_coefs(i)
# print(', '.join([str(i) for i in abs(cs.astype(float)).argsort()]))
print(roots_and_weights_partial(2, *laguerre_moments(2*2, 5.7, 0)))
print(roots_and_weights_partial(2, *flocke_jacobi_moments(2*2, 5.7)))
print(roots_and_weights_partial(2, *laguerre_moments(2*2, 5.7, 0.1)))
print(roots_and_weights_partial(2, *shifted_jacobi_moments(2*2, 5.7, 0.1)))
print(roots_and_weights_partial(3, *laguerre_moments(3*2, 11.7, 0)))
print(roots_and_weights_partial(3, *flocke_jacobi_moments(3*2, 11.7)))
print(roots_and_weights_partial(3, *laguerre_moments(3*2, 11.7, 0.2)))
print(roots_and_weights_partial(3, *shifted_jacobi_moments(3*2, 11.7, 0.2)))
print(roots_and_weights_partial(4, *laguerre_moments(4*2, 2.7, 0)))
print(roots_and_weights_partial(4, *flocke_jacobi_moments(4*2, 2.7)))
print(roots_and_weights_partial(4, *laguerre_moments(4*2, 2.7, 0.2)))
print(roots_and_weights_partial(4, *shifted_jacobi_moments(4*2, 2.7, 0.2)))
print(roots_and_weights_partial(5, *laguerre_moments(5*2, 1.7, 0)))
print(roots_and_weights_partial(5, *flocke_jacobi_moments(5*2, 1.7)))
print(roots_and_weights_partial(5, *laguerre_moments(5*2, 1.7, 0.2)))
print(roots_and_weights_partial(5, *shifted_jacobi_moments(5*2, 1.7, 0.2)))
print(roots_and_weights_partial(6, *laguerre_moments(6*2, 1.1, 0)))
print(roots_and_weights_partial(6, *flocke_jacobi_moments(6*2, 1.1)))
print(roots_and_weights_partial(6, *laguerre_moments(6*2, 1.1, 0.2)))
print(roots_and_weights_partial(6, *shifted_jacobi_moments(6*2, 1.1, 0.2)))
| [
"mpmath.exp",
"mpmath.erfc",
"numpy.sqrt",
"numpy.power",
"numpy.exp",
"numpy.array",
"numpy.append",
"mpmath.power",
"mpmath.sqrt",
"numpy.linalg.eigh",
"numpy.arange",
"mpmath.mpf"
] | [((136, 152), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (146, 152), False, 'import mpmath\n'), ((300, 316), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (310, 316), False, 'import mpmath\n'), ((1008, 1021), 'mpmath.mpf', 'mpmath.mpf', (['t'], {}), '(t)\n', (1018, 1021), False, 'import mpmath\n'), ((1032, 1047), 'mpmath.mpf', 'mpmath.mpf', (['low'], {}), '(low)\n', (1042, 1047), False, 'import mpmath\n'), ((1059, 1075), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (1069, 1075), False, 'import mpmath\n'), ((2468, 2481), 'numpy.array', 'np.array', (['mus'], {}), '(mus)\n', (2476, 2481), True, 'import numpy as np\n'), ((2747, 2760), 'numpy.array', 'np.array', (['mus'], {}), '(mus)\n', (2755, 2760), True, 'import numpy as np\n'), ((2792, 2805), 'mpmath.mpf', 'mpmath.mpf', (['(0)'], {}), '(0)\n', (2802, 2805), False, 'import mpmath\n'), ((2815, 2828), 'mpmath.mpf', 'mpmath.mpf', (['(1)'], {}), '(1)\n', (2825, 2828), False, 'import mpmath\n'), ((3024, 3040), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (3034, 3040), False, 'import mpmath\n'), ((3382, 3398), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (3392, 3398), False, 'import mpmath\n'), ((3438, 3450), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3447, 3450), True, 'import numpy as np\n'), ((3923, 3939), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (3933, 3939), False, 'import mpmath\n'), ((4037, 4053), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (4047, 4053), False, 'import mpmath\n'), ((4191, 4204), 'mpmath.mpf', 'mpmath.mpf', (['t'], {}), '(t)\n', (4201, 4204), False, 'import mpmath\n'), ((4215, 4230), 'mpmath.mpf', 'mpmath.mpf', (['low'], {}), '(low)\n', (4225, 4230), False, 'import mpmath\n'), ((4242, 4258), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (4252, 4258), False, 'import mpmath\n'), ((4662, 4676), 'mpmath.sqrt', 'mpmath.sqrt', (['t'], {}), '(t)\n', (4673, 4676), False, 'import mpmath\n'), ((5007, 5020), 'numpy.array', 'np.array', (['mus'], {}), '(mus)\n', (5015, 5020), True, 'import numpy as np\n'), ((5404, 5418), 'mpmath.sqrt', 'mpmath.sqrt', (['t'], {}), '(t)\n', (5415, 5418), False, 'import mpmath\n'), ((5553, 5566), 'mpmath.mpf', 'mpmath.mpf', (['(1)'], {}), '(1)\n', (5563, 5566), False, 'import mpmath\n'), ((5783, 5795), 'numpy.array', 'np.array', (['ls'], {}), '(ls)\n', (5791, 5795), True, 'import numpy as np\n'), ((5843, 5859), 'mpmath.mpf', 'mpmath.mpf', (['""".5"""'], {}), "('.5')\n", (5853, 5859), False, 'import mpmath\n'), ((6949, 6959), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (6956, 6959), True, 'import numpy as np\n'), ((7064, 7080), 'numpy.arange', 'np.arange', (['(n - 1)'], {}), '(n - 1)\n', (7073, 7080), True, 'import numpy as np\n'), ((7409, 7422), 'mpmath.mpf', 'mpmath.mpf', (['(0)'], {}), '(0)\n', (7419, 7422), False, 'import mpmath\n'), ((8047, 8063), 'numpy.arange', 'np.arange', (['(n - 1)'], {}), '(n - 1)\n', (8056, 8063), True, 'import numpy as np\n'), ((8146, 8166), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Tmat'], {}), '(Tmat)\n', (8160, 8166), True, 'import numpy as np\n'), ((8280, 8293), 'mpmath.mpf', 'mpmath.mpf', (['x'], {}), '(x)\n', (8290, 8293), False, 'import mpmath\n'), ((8304, 8319), 'mpmath.mpf', 'mpmath.mpf', (['low'], {}), '(low)\n', (8314, 8319), False, 'import mpmath\n'), ((1110, 1124), 'mpmath.exp', 'mpmath.exp', (['(-t)'], {}), '(-t)\n', (1120, 1124), False, 'import mpmath\n'), ((1168, 1196), 'mpmath.power', 'mpmath.power', (['low', '(2 * m + 1)'], {}), '(low, 2 * m + 1)\n', (1180, 1196), False, 'import mpmath\n'), ((1636, 1649), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (1644, 1649), True, 'import numpy as np\n'), ((1738, 1748), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (1744, 1748), True, 'import numpy as np\n'), ((1788, 1812), 'numpy.power', 'np.power', (['low', '(2 * m + 1)'], {}), '(low, 2 * m + 1)\n', (1796, 1812), True, 'import numpy as np\n'), ((2252, 2265), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2260, 2265), True, 'import numpy as np\n'), ((4271, 4284), 'mpmath.mpf', 'mpmath.mpf', (['(1)'], {}), '(1)\n', (4281, 4284), False, 'import mpmath\n'), ((4287, 4302), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (4296, 4302), True, 'import numpy as np\n'), ((4585, 4599), 'mpmath.exp', 'mpmath.exp', (['(-t)'], {}), '(-t)\n', (4595, 4599), False, 'import mpmath\n'), ((5576, 5589), 'mpmath.mpf', 'mpmath.mpf', (['x'], {}), '(x)\n', (5586, 5589), False, 'import mpmath\n'), ((6125, 6138), 'mpmath.mpf', 'mpmath.mpf', (['(1)'], {}), '(1)\n', (6135, 6138), False, 'import mpmath\n'), ((6141, 6156), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (6150, 6156), True, 'import numpy as np\n'), ((6916, 6933), 'numpy.array', 'np.array', (['moments'], {}), '(moments)\n', (6924, 6933), True, 'import numpy as np\n'), ((7353, 7373), 'mpmath.mpf', 'mpmath.mpf', (['alpha[0]'], {}), '(alpha[0])\n', (7363, 7373), False, 'import mpmath\n'), ((7867, 7882), 'numpy.array', 'np.array', (['val_a'], {}), '(val_a)\n', (7875, 7882), True, 'import numpy as np\n'), ((7884, 7899), 'numpy.array', 'np.array', (['val_b'], {}), '(val_b)\n', (7892, 7899), True, 'import numpy as np\n'), ((178, 195), 'mpmath.mpf', 'mpmath.mpf', (['""".25"""'], {}), "('.25')\n", (188, 195), False, 'import mpmath\n'), ((839, 867), 'numpy.append', 'np.append', (['coefs', 'coefs1[-1]'], {}), '(coefs, coefs1[-1])\n', (848, 867), True, 'import numpy as np\n'), ((1141, 1167), 'mpmath.exp', 'mpmath.exp', (['(-t * low * low)'], {}), '(-t * low * low)\n', (1151, 1167), False, 'import mpmath\n'), ((1765, 1787), 'numpy.exp', 'np.exp', (['(-t * low * low)'], {}), '(-t * low * low)\n', (1771, 1787), True, 'import numpy as np\n'), ((3270, 3288), 'mpmath.exp', 'mpmath.exp', (['(-t * x)'], {}), '(-t * x)\n', (3280, 3288), False, 'import mpmath\n'), ((3287, 3301), 'mpmath.sqrt', 'mpmath.sqrt', (['x'], {}), '(x)\n', (3298, 3301), False, 'import mpmath\n'), ((3618, 3632), 'mpmath.exp', 'mpmath.exp', (['(-t)'], {}), '(-t)\n', (3628, 3632), False, 'import mpmath\n'), ((3684, 3709), 'mpmath.exp', 'mpmath.exp', (['(-t * low ** 2)'], {}), '(-t * low ** 2)\n', (3694, 3709), False, 'import mpmath\n'), ((4617, 4638), 'mpmath.exp', 'mpmath.exp', (['(-t * low2)'], {}), '(-t * low2)\n', (4627, 4638), False, 'import mpmath\n'), ((4751, 4772), 'mpmath.erfc', 'mpmath.erfc', (['(low * tt)'], {}), '(low * tt)\n', (4762, 4772), False, 'import mpmath\n'), ((4775, 4790), 'mpmath.erfc', 'mpmath.erfc', (['tt'], {}), '(tt)\n', (4786, 4790), False, 'import mpmath\n'), ((5465, 5486), 'mpmath.erfc', 'mpmath.erfc', (['(low * tt)'], {}), '(low * tt)\n', (5476, 5486), False, 'import mpmath\n'), ((5489, 5504), 'mpmath.erfc', 'mpmath.erfc', (['tt'], {}), '(tt)\n', (5500, 5504), False, 'import mpmath\n'), ((205, 222), 'mpmath.mpf', 'mpmath.mpf', (['"""1.5"""'], {}), "('1.5')\n", (215, 222), False, 'import mpmath\n'), ((565, 578), 'mpmath.mpf', 'mpmath.mpf', (['(1)'], {}), '(1)\n', (575, 578), False, 'import mpmath\n'), ((4137, 4152), 'mpmath.mpf', 'mpmath.mpf', (['(1.5)'], {}), '(1.5)\n', (4147, 4152), False, 'import mpmath\n'), ((4716, 4738), 'mpmath.sqrt', 'mpmath.sqrt', (['mpmath.pi'], {}), '(mpmath.pi)\n', (4727, 4738), False, 'import mpmath\n'), ((5430, 5452), 'mpmath.sqrt', 'mpmath.sqrt', (['mpmath.pi'], {}), '(mpmath.pi)\n', (5441, 5452), False, 'import mpmath\n'), ((5926, 5940), 'mpmath.exp', 'mpmath.exp', (['(-T)'], {}), '(-T)\n', (5936, 5940), False, 'import mpmath\n'), ((6027, 6047), 'mpmath.exp', 'mpmath.exp', (['(-T * eta)'], {}), '(-T * eta)\n', (6037, 6047), False, 'import mpmath\n'), ((374, 391), 'mpmath.mpf', 'mpmath.mpf', (['"""1.5"""'], {}), "('1.5')\n", (384, 391), False, 'import mpmath\n'), ((642, 655), 'mpmath.mpf', 'mpmath.mpf', (['(1)'], {}), '(1)\n', (652, 655), False, 'import mpmath\n'), ((3976, 3993), 'mpmath.mpf', 'mpmath.mpf', (['"""1.5"""'], {}), "('1.5')\n", (3986, 3993), False, 'import mpmath\n'), ((4065, 4078), 'mpmath.mpf', 'mpmath.mpf', (['n'], {}), '(n)\n', (4075, 4078), False, 'import mpmath\n'), ((6008, 6024), 'mpmath.sqrt', 'mpmath.sqrt', (['eta'], {}), '(eta)\n', (6019, 6024), False, 'import mpmath\n')] |
# coding=utf-8
# Copyright 2021 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for Readers and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import gin.tf
from meta_dataset.data import config
from meta_dataset.data import reader
from meta_dataset.data import sampling
from meta_dataset.data.dataset_spec import DatasetSpecification
from meta_dataset.data.learning_spec import Split
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
# DatasetSpecification to use in tests
DATASET_SPEC = DatasetSpecification(
name=None,
classes_per_split={
Split.TRAIN: 15,
Split.VALID: 5,
Split.TEST: 10
},
images_per_class=dict(enumerate([10, 20, 30] * 10)),
class_names=None,
path=None,
file_pattern='{}.tfrecords')
# Define defaults and set Gin configuration for EpisodeDescriptionConfig
MIN_WAYS = 5
MAX_WAYS_UPPER_BOUND = 50
MAX_NUM_QUERY = 10
MAX_SUPPORT_SET_SIZE = 500
MAX_SUPPORT_SIZE_CONTRIB_PER_CLASS = 100
MIN_LOG_WEIGHT = np.log(0.5)
MAX_LOG_WEIGHT = np.log(2)
gin.bind_parameter('EpisodeDescriptionConfig.num_ways', None)
gin.bind_parameter('EpisodeDescriptionConfig.num_support', None)
gin.bind_parameter('EpisodeDescriptionConfig.num_query', None)
gin.bind_parameter('EpisodeDescriptionConfig.min_ways', MIN_WAYS)
gin.bind_parameter('EpisodeDescriptionConfig.max_ways_upper_bound',
MAX_WAYS_UPPER_BOUND)
gin.bind_parameter('EpisodeDescriptionConfig.max_num_query', MAX_NUM_QUERY)
gin.bind_parameter('EpisodeDescriptionConfig.max_support_set_size',
MAX_SUPPORT_SET_SIZE)
gin.bind_parameter(
'EpisodeDescriptionConfig.max_support_size_contrib_per_class',
MAX_SUPPORT_SIZE_CONTRIB_PER_CLASS)
gin.bind_parameter('EpisodeDescriptionConfig.min_log_weight', MIN_LOG_WEIGHT)
gin.bind_parameter('EpisodeDescriptionConfig.max_log_weight', MAX_LOG_WEIGHT)
gin.bind_parameter('EpisodeDescriptionConfig.ignore_dag_ontology', False)
gin.bind_parameter('EpisodeDescriptionConfig.ignore_bilevel_ontology', False)
def split_into_chunks(batch, chunk_sizes):
"""Returns batch split in 3 according to chunk_sizes.
Args:
batch: A sequence of length sum(chunk_sizes), usually examples or targets.
chunk_sizes: A tuple of 3 ints (flush_size, support_size, query_size).
Returns:
A tuple of 3 sequences (flush_chunk, support_chunk, query_chunk).
"""
assert sum(chunk_sizes) == len(batch)
flush_chunk_size, support_chunk_size, _ = chunk_sizes
query_start = flush_chunk_size + support_chunk_size
flush_chunk = batch[:flush_chunk_size]
support_chunk = batch[flush_chunk_size:query_start]
query_chunk = batch[query_start:]
return (flush_chunk, support_chunk, query_chunk)
class DatasetIDGenTest(tf.test.TestCase):
"""Tests `reader.dataset_id_generator`."""
def setUp(self):
super(DatasetIDGenTest, self).setUp()
self.dataset_spec = DATASET_SPEC
self.split = Split.TRAIN
def check_expected_structure(self, sampler):
"""Checks the stream of dataset indices is as expected."""
chunk_sizes = sampler.compute_chunk_sizes()
batch_size = sum(chunk_sizes)
placeholder_id = len(self.dataset_spec.get_classes(self.split))
generator = reader.dataset_id_generator(self.dataset_spec, self.split, None,
sampler)
for _ in range(3):
# Re-assemble batch.
# TODO(lamblinp): update if we change dataset_id_generator to return
# the whole batch at once
batch = list(itertools.islice(generator, batch_size))
self.assertEqual(len(batch), batch_size)
flush_chunk, support_chunk, query_chunk = split_into_chunks(
batch, chunk_sizes)
# flush_chunk is slightly oversized: if we actually had support_chunk_size
# + query_chunk_size examples remaining, we could have used them.
# Therefore, the last element of flush_chunk should be padding.
self.assertEqual(flush_chunk[-1], placeholder_id)
# TODO(lamblinp): check more about the content of flush_chunk
# The padding should be at the end of each chunk.
for chunk in (flush_chunk, support_chunk, query_chunk):
num_actual_examples = sum(
class_id != placeholder_id for class_id in chunk)
self.assertNotIn(placeholder_id, chunk[:num_actual_examples])
self.assertTrue(
all(placeholder_id == class_id
for class_id in chunk[num_actual_examples:]))
def test_default(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig())
self.check_expected_structure(sampler)
def test_fixed_query(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(num_query=5))
self.check_expected_structure(sampler)
def test_no_query(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(num_query=5))
self.check_expected_structure(sampler)
def test_fixed_shots(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(
num_support=3, num_query=7))
self.check_expected_structure(sampler)
def test_fixed_ways(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(num_ways=12))
self.check_expected_structure(sampler)
def test_fixed_episodes(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(
num_ways=12, num_support=3, num_query=7))
self.check_expected_structure(sampler)
def construct_fake_datasets(class_ids,
examples_per_class,
repeat=True,
shuffle=True,
shuffle_seed=None):
"""Construct a list of in-memory fake datasets.
Args:
class_ids: A list of ints, one for each dataset to build.
examples_per_class: A list of int, how many examples there are in each
dataset.
repeat: A Boolean indicating whether each of the datasets should be repeated
(to provide an infinite stream).
shuffle: A Boolean indicating whether each dataset should be shuffled.
shuffle_seed: Optional, an int containing the seed passed to
tf.data.Dataset.shuffle.
Returns:
A list of tf.data.Dataset. Each one contains a series of pairs:
(a string formatted like '<class_id>.<example_id>', an int: class_id).
"""
datasets = []
for i, class_id in enumerate(class_ids):
num_examples = examples_per_class[i]
example_string_dataset = tf.data.Dataset.from_tensor_slices(
['{}.{}'.format(class_id, ex_id) for ex_id in range(num_examples)])
if shuffle:
example_string_dataset = example_string_dataset.shuffle(
buffer_size=num_examples,
seed=shuffle_seed,
reshuffle_each_iteration=True)
if repeat:
example_string_dataset = example_string_dataset.repeat()
class_id_dataset = tf.data.Dataset.from_tensors(class_id).repeat()
dataset = tf.data.Dataset.zip((example_string_dataset, class_id_dataset))
datasets.append(dataset)
return datasets
class FakeEpisodeReader(reader.EpisodeReader):
"""Subclass of EpisodeReader that builds class datasets in-memory."""
def construct_class_datasets(self,
pool=None,
repeat=True,
shuffle=True,
shuffle_seed=None):
class_ids = [
self.class_set[dataset_id] for dataset_id in range(self.num_classes)
]
examples_per_class = [
self.dataset_spec.get_total_images_per_class(class_id)
for class_id in class_ids
]
shuffle = self.shuffle_buffer_size > 0
return construct_fake_datasets(class_ids, examples_per_class, repeat,
shuffle, shuffle_seed)
class EpisodeReaderTest(tf.test.TestCase):
"""Tests behaviour of Reader.
To avoid reading from the filesystem, we actually test a subclass,
FakeEpisodeReader, that overrides Reader.construct_class_datasets,
replacing it with a method building small, in-memory datasets instead.
"""
def setUp(self):
super(EpisodeReaderTest, self).setUp()
self.dataset_spec = DATASET_SPEC
self.split = Split.TRAIN
self.shuffle_buffer_size = 30
self.read_buffer_size_bytes = None
self.num_prefetch = 0
def generate_episodes(self,
sampler,
num_episodes,
shuffle=True,
shuffle_seed=None):
dataset_spec = sampler.dataset_spec
split = sampler.split
if shuffle:
shuffle_buffer_size = self.shuffle_buffer_size
else:
shuffle_buffer_size = 0
episode_reader = FakeEpisodeReader(dataset_spec, split, shuffle_buffer_size,
self.read_buffer_size_bytes,
self.num_prefetch)
input_pipeline = episode_reader.create_dataset_input_pipeline(
sampler, shuffle_seed=shuffle_seed)
iterator = input_pipeline.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
episodes = [sess.run(next_element) for _ in range(num_episodes)]
return episodes
def check_episode_consistency(self, examples, targets, chunk_sizes):
"""Tests that a given episode is correctly built and consistent.
In particular:
- test that examples come from the right class
- test that the overall "flush, support, query" structure is respected
- test that within each chunk, the padding is at the end
Args:
examples: A 1D array of strings.
targets: A 1D array of ints.
chunk_sizes: A tuple of 3 ints, describing the structure of the episode.
"""
self.check_consistent_class(examples, targets)
batch_size = sum(chunk_sizes)
self.assertEqual(batch_size, len(examples), len(targets))
flush_examples, support_examples, query_examples = split_into_chunks(
examples, chunk_sizes)
flush_targets, support_targets, query_targets = split_into_chunks(
targets, chunk_sizes)
self.check_end_padding(flush_examples, flush_targets)
self.check_end_padding(support_examples, support_targets)
self.check_end_padding(query_examples, query_targets)
def check_consistent_class(self, examples, targets):
"""Checks that the content of examples corresponds to the target.
This assumes the datasets were generated from `construct_fake_datasets`,
with a placeholder class of PLACEHOLDER_CLASS_ID with empty string examples.
Args:
examples: A 1D array of strings.
targets: A 1D array of ints.
"""
self.assertEqual(len(examples), len(targets))
for (example, target) in zip(examples, targets):
if example:
expected_target, _ = example.decode().split('.')
self.assertEqual(int(expected_target), target)
else:
self.assertEqual(target, reader.PLACEHOLDER_CLASS_ID)
def check_end_padding(self, examples_chunk, targets_chunk):
"""Checks the padding is at the end of each chunk.
Args:
examples_chunk: A 1D array of strings.
targets_chunk: A 1D array of ints.
"""
num_actual = sum(
class_id != reader.PLACEHOLDER_CLASS_ID for class_id in targets_chunk)
self.assertNotIn(reader.PLACEHOLDER_CLASS_ID, targets_chunk[:num_actual])
self.assertNotIn(b'', examples_chunk[:num_actual])
self.assertTrue(
all(reader.PLACEHOLDER_CLASS_ID == target
for target in targets_chunk[num_actual:]))
self.assertAllInSet(examples_chunk[num_actual:], [b''])
def generate_and_check(self, sampler, num_episodes):
chunk_sizes = sampler.compute_chunk_sizes()
episodes = self.generate_episodes(sampler, num_episodes)
for episode in episodes:
examples, targets = episode
self.check_episode_consistency(examples, targets, chunk_sizes)
def test_train(self):
"""Tests that a few episodes are consistent."""
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
Split.TRAIN,
episode_descr_config=config.EpisodeDescriptionConfig())
self.generate_and_check(sampler, 10)
def test_valid(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
Split.VALID,
episode_descr_config=config.EpisodeDescriptionConfig())
self.generate_and_check(sampler, 10)
def test_test(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
Split.TEST,
episode_descr_config=config.EpisodeDescriptionConfig())
self.generate_and_check(sampler, 10)
def test_fixed_query(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(num_query=5))
self.generate_and_check(sampler, 10)
def test_no_query(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(num_query=0))
self.generate_and_check(sampler, 10)
def test_fixed_shots(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(
num_support=3, num_query=7))
self.generate_and_check(sampler, 10)
def test_fixed_ways(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(num_ways=12))
self.generate_and_check(sampler, 10)
def test_fixed_episodes(self):
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig(
num_ways=12, num_support=3, num_query=7))
self.generate_and_check(sampler, 10)
def test_non_deterministic_shuffle(self):
"""Different Readers generate different episode compositions.
Even with the same episode descriptions, the content should be different.
"""
num_episodes = 10
init_rng = sampling.RNG
seed = 20181120
episode_streams = []
chunk_sizes = []
try:
for _ in range(2):
sampling.RNG = np.random.RandomState(seed)
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig())
episodes = self.generate_episodes(sampler, num_episodes)
episode_streams.append(episodes)
chunk_size = sampler.compute_chunk_sizes()
chunk_sizes.append(chunk_size)
for examples, targets in episodes:
self.check_episode_consistency(examples, targets, chunk_size)
finally:
# Restore the original RNG
sampling.RNG = init_rng
self.assertEqual(chunk_sizes[0], chunk_sizes[1])
# It is unlikely that all episodes will be the same
num_identical_episodes = 0
for ((examples1, targets1), (examples2, targets2)) in zip(*episode_streams):
self.check_episode_consistency(examples1, targets1, chunk_sizes[0])
self.check_episode_consistency(examples2, targets2, chunk_sizes[1])
self.assertAllEqual(targets1, targets2)
if all(examples1 == examples2):
num_identical_episodes += 1
self.assertNotEqual(num_identical_episodes, num_episodes)
def test_deterministic_noshuffle(self):
"""Tests episode generation determinism when there is noshuffle queue."""
num_episodes = 10
init_rng = sampling.RNG
seed = 20181120
episode_streams = []
chunk_sizes = []
try:
for _ in range(2):
sampling.RNG = np.random.RandomState(seed)
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig())
episodes = self.generate_episodes(sampler, num_episodes, shuffle=False)
episode_streams.append(episodes)
chunk_size = sampler.compute_chunk_sizes()
chunk_sizes.append(chunk_size)
for examples, targets in episodes:
self.check_episode_consistency(examples, targets, chunk_size)
finally:
# Restore the original RNG
sampling.RNG = init_rng
self.assertEqual(chunk_sizes[0], chunk_sizes[1])
for ((examples1, targets1), (examples2, targets2)) in zip(*episode_streams):
self.assertAllEqual(examples1, examples2)
self.assertAllEqual(targets1, targets2)
def test_deterministic_tfseed(self):
"""Tests episode generation determinism when shuffle queues are seeded."""
num_episodes = 10
seed = 20181120
episode_streams = []
chunk_sizes = []
init_rng = sampling.RNG
try:
for _ in range(2):
sampling.RNG = np.random.RandomState(seed)
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
self.split,
episode_descr_config=config.EpisodeDescriptionConfig())
episodes = self.generate_episodes(
sampler, num_episodes, shuffle_seed=seed)
episode_streams.append(episodes)
chunk_size = sampler.compute_chunk_sizes()
chunk_sizes.append(chunk_size)
for examples, targets in episodes:
self.check_episode_consistency(examples, targets, chunk_size)
finally:
# Restore the original RNG
sampling.RNG = init_rng
self.assertEqual(chunk_sizes[0], chunk_sizes[1])
for ((examples1, targets1), (examples2, targets2)) in zip(*episode_streams):
self.check_episode_consistency(examples1, targets1, chunk_sizes[0])
self.check_episode_consistency(examples2, targets2, chunk_sizes[1])
self.assertAllEqual(examples1, examples2)
self.assertAllEqual(targets1, targets2)
def check_description_vs_target_chunks(self, description,
target_support_chunk,
target_query_chunk, offset):
"""Checks that target chunks are consistent with the description.
The number of support and query exampes should correspond to the
description, and no other class ID (except PLACEHOLDER_CLASS_ID) should be
present.
Args:
description: A sequence of (class_id, num_support, num_query) tuples of
ints, describing the content of an episode.
target_support_chunk: A sequence of ints, padded.
target_query_chunk: A sequence of ints, padded.
offset: An int, the difference between the absolute class IDs in the
target, and the relative class IDs in the episode description.
"""
support_cursor = 0
query_cursor = 0
for class_id, num_support, num_query in description:
self.assertAllEqual(
target_support_chunk[support_cursor:support_cursor + num_support],
[class_id + offset] * num_support)
support_cursor += num_support
self.assertAllEqual(
target_query_chunk[query_cursor:query_cursor + num_query],
[class_id + offset] * num_query)
query_cursor += num_query
self.assertTrue(
all(target_support_chunk[support_cursor:] ==
reader.PLACEHOLDER_CLASS_ID))
self.assertTrue(
all(target_query_chunk[query_cursor:] == reader.PLACEHOLDER_CLASS_ID))
def check_same_as_generator(self, split, offset):
"""Tests that the targets are the one requested by the generator.
Args:
split: A value of the Split enum, which split to generate from.
offset: An int, the difference between the absolute class IDs in the
source, and the relative class IDs in the episodes.
"""
num_episodes = 10
seed = 20181121
init_rng = sampling.RNG
try:
sampling.RNG = np.random.RandomState(seed)
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
split,
episode_descr_config=config.EpisodeDescriptionConfig())
# Each description is a (class_id, num_support, num_query) tuple.
descriptions = [
sampler.sample_episode_description() for _ in range(num_episodes)
]
sampling.RNG = np.random.RandomState(seed)
sampler = sampling.EpisodeDescriptionSampler(
self.dataset_spec,
split,
episode_descr_config=config.EpisodeDescriptionConfig())
episodes = self.generate_episodes(sampler, num_episodes)
chunk_sizes = sampler.compute_chunk_sizes()
self.assertEqual(len(descriptions), len(episodes))
for (description, episode) in zip(descriptions, episodes):
examples, targets = episode
self.check_episode_consistency(examples, targets, chunk_sizes)
_, targets_support_chunk, targets_query_chunk = split_into_chunks(
targets, chunk_sizes)
self.check_description_vs_target_chunks(description,
targets_support_chunk,
targets_query_chunk, offset)
finally:
sampling.RNG = init_rng
def test_same_as_generator(self):
# The offset corresponds to the difference between the absolute class ID as
# used in the episode pipeline, and class ID relative to the split (provided
# by the episode generator).
offset = 0
for split in Split:
self.check_same_as_generator(split, offset)
offset += len(self.dataset_spec.get_classes(split))
def test_flush_logic(self):
"""Tests the "flush" logic avoiding example duplication in an episode."""
# Generate two episodes from un-shuffled data sources. For classes where
# there are enough examples for both, new examples should be used for the
# second episodes. Otherwise, the first examples should be re-used.
# A data_spec with classes between 10 and 29 examples.
num_classes = 30
dataset_spec = DatasetSpecification(
name=None,
classes_per_split={
Split.TRAIN: num_classes,
Split.VALID: 0,
Split.TEST: 0
},
images_per_class={i: 10 + i for i in range(num_classes)},
class_names=None,
path=None,
file_pattern='{}.tfrecords')
# Sample from all train classes, 5 + 5 examples from each episode
sampler = sampling.EpisodeDescriptionSampler(
dataset_spec,
Split.TRAIN,
episode_descr_config=config.EpisodeDescriptionConfig(
num_ways=num_classes, num_support=5, num_query=5))
episodes = self.generate_episodes(sampler, num_episodes=2, shuffle=False)
# The "flush" part of the second episode should contain 0 from class_id 0, 1
# for 1, ..., 9 for 9, and then 0 for 10 and the following.
chunk_sizes = sampler.compute_chunk_sizes()
_, episode2 = episodes
examples2, targets2 = episode2
flush_target2, _, _ = split_into_chunks(targets2, chunk_sizes)
for class_id in range(10):
self.assertEqual(
sum(target == class_id for target in flush_target2), class_id)
for class_id in range(10, num_classes):
self.assertEqual(sum(target == class_id for target in flush_target2), 0)
# The "support" part of the second episode should start at example 0 for
# class_ids from 0 to 9 (included), and at example 10 for class_id 10 and
# higher.
_, support_examples2, query_examples2 = split_into_chunks(
examples2, chunk_sizes)
def _build_class_id_to_example_ids(examples):
# Build a mapping: class_id -> list of example ids
mapping = collections.defaultdict(list)
for example in examples:
if not example:
# Padding is at the end
break
class_id, example_id = example.decode().split('.')
mapping[int(class_id)].append(int(example_id))
return mapping
support2_example_ids = _build_class_id_to_example_ids(support_examples2)
query2_example_ids = _build_class_id_to_example_ids(query_examples2)
for class_id in range(10):
self.assertCountEqual(support2_example_ids[class_id], list(range(5)))
self.assertCountEqual(query2_example_ids[class_id], list(range(5, 10)))
for class_id in range(10, num_classes):
self.assertCountEqual(support2_example_ids[class_id], list(range(10, 15)))
self.assertCountEqual(query2_example_ids[class_id], list(range(15, 20)))
if __name__ == '__main__':
tf.test.main()
| [
"itertools.islice",
"six.moves.range",
"meta_dataset.data.reader.dataset_id_generator",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.data.Dataset.from_tensors",
"numpy.log",
"tensorflow.compat.v1.test.main",
"collections.defaultdict",
"meta_dataset.data.config.EpisodeDescriptionConfig",
"... | [((1715, 1726), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (1721, 1726), True, 'import numpy as np\n'), ((1744, 1753), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1750, 1753), True, 'import numpy as np\n'), ((25634, 25648), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (25646, 25648), True, 'import tensorflow.compat.v1 as tf\n'), ((3921, 3994), 'meta_dataset.data.reader.dataset_id_generator', 'reader.dataset_id_generator', (['self.dataset_spec', 'self.split', 'None', 'sampler'], {}), '(self.dataset_spec, self.split, None, sampler)\n', (3948, 3994), False, 'from meta_dataset.data import reader\n'), ((4052, 4060), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (4057, 4060), False, 'from six.moves import range\n'), ((8167, 8230), 'tensorflow.compat.v1.data.Dataset.zip', 'tf.data.Dataset.zip', (['(example_string_dataset, class_id_dataset)'], {}), '((example_string_dataset, class_id_dataset))\n', (8186, 8230), True, 'import tensorflow.compat.v1 as tf\n'), ((11954, 11976), 'six.moves.zip', 'zip', (['examples', 'targets'], {}), '(examples, targets)\n', (11957, 11976), False, 'from six.moves import zip\n'), ((16308, 16329), 'six.moves.zip', 'zip', (['*episode_streams'], {}), '(*episode_streams)\n', (16311, 16329), False, 'from six.moves import zip\n'), ((17677, 17698), 'six.moves.zip', 'zip', (['*episode_streams'], {}), '(*episode_streams)\n', (17680, 17698), False, 'from six.moves import zip\n'), ((18824, 18845), 'six.moves.zip', 'zip', (['*episode_streams'], {}), '(*episode_streams)\n', (18827, 18845), False, 'from six.moves import zip\n'), ((24171, 24180), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (24176, 24180), False, 'from six.moves import range\n'), ((24299, 24321), 'six.moves.range', 'range', (['(10)', 'num_classes'], {}), '(10, num_classes)\n', (24304, 24321), False, 'from six.moves import range\n'), ((25233, 25242), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (25238, 25242), False, 'from six.moves import range\n'), ((25419, 25441), 'six.moves.range', 'range', (['(10)', 'num_classes'], {}), '(10, num_classes)\n', (25424, 25441), False, 'from six.moves import range\n'), ((10330, 10342), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (10340, 10342), True, 'import tensorflow.compat.v1 as tf\n'), ((15482, 15490), 'six.moves.range', 'range', (['(2)'], {}), '(2)\n', (15487, 15490), False, 'from six.moves import range\n'), ((16923, 16931), 'six.moves.range', 'range', (['(2)'], {}), '(2)\n', (16928, 16931), False, 'from six.moves import range\n'), ((18053, 18061), 'six.moves.range', 'range', (['(2)'], {}), '(2)\n', (18058, 18061), False, 'from six.moves import range\n'), ((21041, 21068), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (21062, 21068), True, 'import numpy as np\n'), ((21434, 21461), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (21455, 21461), True, 'import numpy as np\n'), ((21832, 21859), 'six.moves.zip', 'zip', (['descriptions', 'episodes'], {}), '(descriptions, episodes)\n', (21835, 21859), False, 'from six.moves import zip\n'), ((24791, 24820), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (24814, 24820), False, 'import collections\n'), ((4215, 4254), 'itertools.islice', 'itertools.islice', (['generator', 'batch_size'], {}), '(generator, batch_size)\n', (4231, 4254), False, 'import itertools\n'), ((5318, 5351), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (5349, 5351), False, 'from meta_dataset.data import config\n'), ((5553, 5597), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_query': '(5)'}), '(num_query=5)\n', (5584, 5597), False, 'from meta_dataset.data import config\n'), ((5796, 5840), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_query': '(5)'}), '(num_query=5)\n', (5827, 5840), False, 'from meta_dataset.data import config\n'), ((6042, 6101), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_support': '(3)', 'num_query': '(7)'}), '(num_support=3, num_query=7)\n', (6073, 6101), False, 'from meta_dataset.data import config\n'), ((6315, 6359), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_ways': '(12)'}), '(num_ways=12)\n', (6346, 6359), False, 'from meta_dataset.data import config\n'), ((6564, 6636), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_ways': '(12)', 'num_support': '(3)', 'num_query': '(7)'}), '(num_ways=12, num_support=3, num_query=7)\n', (6595, 6636), False, 'from meta_dataset.data import config\n'), ((8105, 8143), 'tensorflow.compat.v1.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (['class_id'], {}), '(class_id)\n', (8133, 8143), True, 'import tensorflow.compat.v1 as tf\n'), ((8691, 8714), 'six.moves.range', 'range', (['self.num_classes'], {}), '(self.num_classes)\n', (8696, 8714), False, 'from six.moves import range\n'), ((13326, 13359), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (13357, 13359), False, 'from meta_dataset.data import config\n'), ((13554, 13587), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (13585, 13587), False, 'from meta_dataset.data import config\n'), ((13780, 13813), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (13811, 13813), False, 'from meta_dataset.data import config\n'), ((14013, 14057), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_query': '(5)'}), '(num_query=5)\n', (14044, 14057), False, 'from meta_dataset.data import config\n'), ((14254, 14298), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_query': '(0)'}), '(num_query=0)\n', (14285, 14298), False, 'from meta_dataset.data import config\n'), ((14498, 14557), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_support': '(3)', 'num_query': '(7)'}), '(num_support=3, num_query=7)\n', (14529, 14557), False, 'from meta_dataset.data import config\n'), ((14769, 14813), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_ways': '(12)'}), '(num_ways=12)\n', (14800, 14813), False, 'from meta_dataset.data import config\n'), ((15016, 15088), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_ways': '(12)', 'num_support': '(3)', 'num_query': '(7)'}), '(num_ways=12, num_support=3, num_query=7)\n', (15047, 15088), False, 'from meta_dataset.data import config\n'), ((15515, 15542), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (15536, 15542), True, 'import numpy as np\n'), ((16956, 16983), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (16977, 16983), True, 'import numpy as np\n'), ((18086, 18113), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (18107, 18113), True, 'import numpy as np\n'), ((23654, 23739), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {'num_ways': 'num_classes', 'num_support': '(5)', 'num_query': '(5)'}), '(num_ways=num_classes, num_support=5,\n num_query=5)\n', (23685, 23739), False, 'from meta_dataset.data import config\n'), ((7796, 7815), 'six.moves.range', 'range', (['num_examples'], {}), '(num_examples)\n', (7801, 7815), False, 'from six.moves import range\n'), ((10402, 10421), 'six.moves.range', 'range', (['num_episodes'], {}), '(num_episodes)\n', (10407, 10421), False, 'from six.moves import range\n'), ((21198, 21231), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (21229, 21231), False, 'from meta_dataset.data import config\n'), ((21384, 21403), 'six.moves.range', 'range', (['num_episodes'], {}), '(num_episodes)\n', (21389, 21403), False, 'from six.moves import range\n'), ((21591, 21624), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (21622, 21624), False, 'from meta_dataset.data import config\n'), ((25309, 25317), 'six.moves.range', 'range', (['(5)'], {}), '(5)\n', (25314, 25317), False, 'from six.moves import range\n'), ((25383, 25395), 'six.moves.range', 'range', (['(5)', '(10)'], {}), '(5, 10)\n', (25388, 25395), False, 'from six.moves import range\n'), ((25508, 25521), 'six.moves.range', 'range', (['(10)', '(15)'], {}), '(10, 15)\n', (25513, 25521), False, 'from six.moves import range\n'), ((25587, 25600), 'six.moves.range', 'range', (['(15)', '(20)'], {}), '(15, 20)\n', (25592, 25600), False, 'from six.moves import range\n'), ((15685, 15718), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (15716, 15718), False, 'from meta_dataset.data import config\n'), ((17126, 17159), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (17157, 17159), False, 'from meta_dataset.data import config\n'), ((18256, 18289), 'meta_dataset.data.config.EpisodeDescriptionConfig', 'config.EpisodeDescriptionConfig', ([], {}), '()\n', (18287, 18289), False, 'from meta_dataset.data import config\n'), ((23359, 23377), 'six.moves.range', 'range', (['num_classes'], {}), '(num_classes)\n', (23364, 23377), False, 'from six.moves import range\n')] |
#
# Copyright (c) 2020 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
import logging
from collections.abc import Iterable
import numpy as np
from os.path import splitext, isfile, join, dirname
__all__ = ['invert_dictionary',
'invert_dictionary_with_iterables',
'amfeti_dir']
def invert_dictionary(dict_map):
"""
Invert a dictionary-mapping such that values point to keys.
Parameters
----------
dict_map : dict
dictionary, that shall be inverted
Returns
-------
dict_map_inv : dict
inverted dictionary
"""
def add_new_value_to_key(dictionary, key, value):
if key in dictionary:
if not isinstance(dictionary[key], list):
dictionary[key] = [dictionary[key]]
dictionary[key].append(value)
else:
dictionary[key] = value
return dictionary
dict_map_inv = dict()
for k, v in dict_map.items():
dict_map_inv = add_new_value_to_key(dict_map_inv, v, k)
return dict_map_inv
def invert_dictionary_with_iterables(dict_map):
"""
Invert a dictionary-mapping such that values point to keys. Values may only be iterables and the new keys are the
iterables' entries.
Parameters
----------
dict_map : dict
dictionary, that shall be inverted
Returns
-------
dict_map_inv : dict
inverted dictionary
"""
def add_new_value_to_key(dictionary, key, value, value_type=None):
if key in dictionary:
if isinstance(dictionary[key], np.ndarray):
dictionary[key] = np.append(dictionary[key], np.array([value], dtype=object))
elif isinstance(dictionary[key], tuple) or isinstance(dictionary[key], str):
dictionary[key] += (value,)
elif isinstance(dictionary[key], list):
dictionary[key].append(value)
else:
dictionary[key] = value
else:
if value_type is np.ndarray:
dictionary[key] = np.array([value], dtype=object)
elif value_type is tuple or isinstance(value, str):
dictionary[key] = (value,)
elif value_type is list:
dictionary[key] = [value]
else:
dictionary[key] = value
return dictionary
dict_map_inv = dict()
for k, v in dict_map.items():
if not isinstance(v, Iterable) or type(v) is None:
raise ValueError('Unknown type of value in dictionary, when inverting dictionary.')
else:
if not isinstance(v, (list, str, np.ndarray, tuple)):
logger = logging.getLogger(__name__)
logger.warning(
["The datatype " + str(type(v)) + " is not explicitly supported and might lead to unexpected "
"behaviour."])
for vi in v:
dict_map_inv = add_new_value_to_key(dict_map_inv, vi, k, type(v))
return dict_map_inv
def amfeti_dir(filename=''):
'''
Return the absolute path of the filename given relative to the amfeti directory.
Parameters
----------
filename : string, optional
relative path to something inside the amfeti directory.
Returns
-------
dir : string
string of the filename inside the amfeti-directory. Default value is '', so the amfeti-directory is returned.
'''
amfeti_abs_path = dirname(__file__)
return join(amfeti_abs_path, filename.lstrip('/'))
| [
"logging.getLogger",
"os.path.dirname",
"numpy.array"
] | [((3693, 3710), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (3700, 3710), False, 'from os.path import splitext, isfile, join, dirname\n'), ((2252, 2283), 'numpy.array', 'np.array', (['[value]'], {'dtype': 'object'}), '([value], dtype=object)\n', (2260, 2283), True, 'import numpy as np\n'), ((2875, 2902), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2892, 2902), False, 'import logging\n'), ((1841, 1872), 'numpy.array', 'np.array', (['[value]'], {'dtype': 'object'}), '([value], dtype=object)\n', (1849, 1872), True, 'import numpy as np\n')] |
import json
import os.path
import struct
from os import path
from os.path import join
import pathlib
import re
import numpy
parent_dir = pathlib.Path(__file__).parent.resolve()
code_dir = parent_dir.parent
data_dir = code_dir.parent
# collect ecat header maps, this program will not work without these as ECAT data varies in the byte location of its
# data depending on the version of ECAT it was formatted with.
try:
with open(join(parent_dir, 'ecat_headers.json'), 'r') as infile:
ecat_header_maps = json.load(infile)
except FileNotFoundError:
raise Exception("Unable to load header definitions and map from ecat_headers.json. Aborting.")
# noinspection PyShadowingNames
def get_ecat_bytes(path_to_ecat: str):
"""
Opens an ecat file and reads the entry file into memory to return a bytes object
not terribly memory efficient for large or parallel reading of ecat files.
:param path_to_ecat: path to an ecat file, however will literally open any file an read it
in as bytes.
:return: a bytes object
"""
# check if file exists
if path.isfile(path_to_ecat):
with open(path_to_ecat, 'rb') as infile:
ecat_bytes = infile.read()
else:
raise Exception(f"No such file found at {path_to_ecat}")
return ecat_bytes
def read_bytes(path_to_bytes: str, byte_start: int, byte_stop: int = -1):
"""
Open a file at path to bytes and reads in the information byte by byte.
:param path_to_bytes: Path to file to read
:param byte_start: Position to place the seek head at before reading bytes
:param byte_stop: Position to stop reading bytes at
:return: the bytes located at the position sought when invoking this function.
"""
if not os.path.isfile(path_to_bytes):
raise Exception(f"{path_to_bytes} is not a valid file.")
# open that file
bytes_to_read = open(path_to_bytes, 'rb')
# move to start byte
bytes_to_read.seek(byte_start, 0)
# read a section of bytes from bytestart to byte stop
byte_width = byte_stop
sought_bytes = bytes_to_read.read(byte_width)
bytes_to_read.close()
return sought_bytes
def collect_specific_bytes(bytes_object: bytes, start_position: int = 0, width: int = 0):
"""
Collects specific bytes within a bytes object.
:param bytes_object: an opened bytes object
:param start_position: the position to start to read at
:param width: how far to read from the start position
:param relative_to: position relative to 0 -> start of file/object, 1 -> current position of seek head,
2 -> end of file/object
:return: the bytes starting at position
"""
# navigate to byte position
content = bytes_object[start_position: start_position + width]
return {"content": content, "new_position": start_position + width}
def get_buffer_size(data_type: str, variable_name: str):
"""
Determine the byte width of a variable as defined in the ecat_headers.json
such that Fill(6) will return 6
:param data_type:
:param variable_name:
:return: the number of bytes to expand a buffer to
"""
first_split = variable_name.split('(')
if len(first_split) == 2:
fill_scalar = int(first_split[1][:-1])
else:
fill_scalar = 1
scalar = int(re.findall(r'\d+', data_type)[0]) * fill_scalar
return scalar
def get_header_data(header_data_map: dict, ecat_file: str = '', byte_offset: int = 0, clean=True):
"""
ECAT header data is contained in json files translated from ECAT documentation provided via the Turku PET Inst.
For machine and human readability the original Siemens PDF/Scanned ECAT file documentation has been rewritten into
json to provide easy access to byte position, byte width, variable name, and byte data type. Json's including this
header information are sub-divided into ECAT versions with sub-dictionaries corresponding to each imaging type as
mentioned in the original ECAT spec.
:param header_data_map: schema for reading in header data, this is stored in dictionary that is read in from a
json file.
:param ecat_file: path to an ecat file, file is opened and byte blocks containing header data are then extracted,
read, and cleaned into python float, int, or list data types
:param byte_offset: off set from the head of the file to read from
:param clean: Whether to remove byte padding or not, if not provided strings will include padding w/ \0x bytes and
lists/arrays of data will be returned as tuples instead of lists. Uncleaned data will be of format b''.
:return: a dictionary with variable names of header fields as keys and cleaned/uncleaned header fields as values
"""
header = {}
for value in header_data_map:
byte_position, variable_name, struct_fmt = value['byte'], value['variable_name'], '>' + value['struct']
byte_width = struct.calcsize(struct_fmt)
relative_byte_position = byte_position + byte_offset
raw_bytes = read_bytes(ecat_file, relative_byte_position, byte_width)
header[variable_name] = struct.unpack(struct_fmt, raw_bytes)
if clean and 'fill' not in variable_name.lower():
header[variable_name] = filter_bytes(header[variable_name], struct_fmt)
read_head_position = relative_byte_position + byte_width
return header, read_head_position
def filter_bytes(unfiltered: bytes, struct_fmt: str):
"""
Cleans up byte strings and bytes types into python int, float, string, and list data types, additionally
struct.unpack returns a tuple object even if only a single value is available, this function determines when to
return a single value or a list of values based on the contents of the unfiltered object.
:param unfiltered: a raw bytes type object
:param struct_fmt: the c struct type of the object
:return: a cleaned python int, float, string, or list
"""
if len(unfiltered) == 1:
unfiltered = unfiltered[0]
elif len(unfiltered) > 1:
unfiltered = list(unfiltered)
if 's' in struct_fmt:
filtered = str(bytes(filter(None, unfiltered)), 'UTF-8')
else:
filtered = unfiltered
return filtered
def get_directory_data(byte_block: bytes, ecat_file: str, return_raw: bool = False):
"""
Collects the directory data within an ECAT file. The directory data refers to the 512 byte table that describes the
byte location of each frame, subheader, number of frames, and additional directory tables within the file.
:param byte_block: A block of file bytes to convert into a 2 dimensional numpy array.
:param ecat_file: the path to the ecat file
:param return_raw: return the directory tables as extracted, if left False this will return the directory tables
combined into a single table. The single table is all that is needed in order to read information in from an
ECAT.
:return: Individual tables corresponding to up to 31 frames each or a combined directory table consisting of no
more columns than than are number of frames in the image/PET scan.
"""
directory = None # used to keep track of state in the event of a directory spanning more than one 512 byte block
raw = []
while True:
# The exit conditions for this loop are below
# if [4,1] of the directory is 0 break as there are 31 or less frames in this 512 byte buffer
# if [2,1] of the directory is 2 break ????, up for interpretation as to the exact meaning but,
# observed to signal the end of an additional 512 byte block/buffer when the number of frames
# exceeds 31
read_byte_array = numpy.frombuffer(byte_block, dtype=numpy.dtype('>i4'), count=-1)
# reshape 1d array into 2d, a 4 row by 32 column table is expected
reshaped = numpy.transpose(numpy.reshape(read_byte_array, (-1, 4)))
raw.append(reshaped)
# chop off columns after 32, rows after 32 appear to be noise
reshaped = reshaped[:, 0:read_byte_array[3] + 1]
# get directory size/number of frames in dir from 1st column 4th row of the array in the buffer
directory_size = reshaped[3, 0]
if directory_size == 0:
break
# on the first pass do this
if directory is None:
directory = reshaped[:, 1:directory_size + 1]
else:
directory = numpy.append(directory, reshaped[:, 1:directory_size + 1], axis=1)
# determine if this is the last directory by examining the 2nd row of the first column of the buffer
next_directory_position = reshaped[1, 0]
if next_directory_position == 2:
break
# looks like there is more directory to read, collect some more bytes
byte_block = read_bytes(
path_to_bytes=ecat_file,
byte_start=(next_directory_position - 1) * 512,
byte_stop=512
)
# sort the directory contents as they're sometimes out of order
sorted_directory = directory[:, directory[0].argsort()]
if return_raw:
return raw
else:
return sorted_directory
def read_ecat(ecat_file: str, calibrated: bool = False, collect_pixel_data: bool = True):
"""
Reads in an ecat file and collects the main header data, subheader data, and imagining data.
:param ecat_file: path to an ecat file, does not handle compression currently
:param calibrated: if True, will scale the raw imaging data by the SCALE_FACTOR in the subheader and
:param collect_pixel_data: By default collects the entire ecat, can be passed false to only return headers
CALIBRATION_FACTOR in the main header
:return: main_header, a list of subheaders for each frame, the imagining data from the subheaders
"""
# try to determine what type of ecat this is
possible_ecat_headers = {}
for entry, dictionary in ecat_header_maps['ecat_headers'].items():
possible_ecat_headers[entry] = dictionary['mainheader']
confirmed_version = None
for version, dictionary in possible_ecat_headers.items():
try:
possible_header, _ = get_header_data(dictionary, ecat_file)
if version == str(possible_header['SW_VERSION']):
confirmed_version = version
break
except UnicodeDecodeError:
continue
if not confirmed_version:
raise Exception(f"Unable to determine ECAT File Type from these types {possible_ecat_headers.keys()}")
ecat_main_header = ecat_header_maps['ecat_headers'][confirmed_version]['mainheader']
main_header, read_to = get_header_data(ecat_main_header, ecat_file)
"""
Some notes about the file directory/sorted directory:
Comments referencing matrix/table indexing may vary by +-1 in relation to code written.
Python is 0 indexed by default so it can be taken as truth w/ relation to element location.
Deviation from this convention are intended to clarify what is happening to a human reader
although we are aware that this most likely has the opposite effect.
The first or 0th column of the file directory correspond to the nature of the directory itself:
row 0: ??? No idea, some integer
row 1: Byte position of this table/directory
row 2: not sure in testing it seems to be 0 most times..
row 3: The number of frames/additional columns in the file. If the number of columns of this array
is n, it would contain n-1 frames.
The values in sorted_directory correspond to the following for all columns except the first column
row 0: Not sure, but we sort on this, perhaps it's the frame start time
row 1: the start byte block position of the frame data
row 2: end byte block position of the frame data
row 3: ??? Number of frames contained in w/ in the byte blocks between row 1 and 2?
"""
# Collecting First Part of File Directory/Index this Directory lies directly after the main header byte block
next_block = read_bytes(
path_to_bytes=ecat_file,
byte_start=read_to,
byte_stop=read_to + 512)
directory = get_directory_data(next_block, ecat_file)
# determine subheader type by checking main header
subheader_type_number = main_header['FILE_TYPE']
"""
ECAT 7.2 Only
Subheader types correspond to these enumerated types as defined below:
00 = unknown,
01 = Sinogram,
02 = Image - 16,
03 = Attenuation Correction,
04 = Normalization,
05 = PolarMap,
06 = Volume 8,
07 = Volume 16,
08 = Projection 8,
09 = Projection 16,
10 = Image 8,
11 = 3D Sinogram 16,
12 = 3D Sinogram 8,
13 = 3D Normalization,
14 = 3D Sinogram Fit)
Presently, only types 03, 05, 07, 11, and 13 correspond to known subheader types for 72. If the
value in FILE_TYPE is outside of this range the subheaders will not be read and this will
raise an exception.
ECAT 7.3 Only
00 = unknown
01 = unknown
02 = unknown
03 = Attenuation Correction
04 = unknown
05 = unknown
06 = unknown
07 = Volume 16
08 = unknown
09 = unknown
10 = unknown
11 = 3D Sinogram 16
"""
# collect the bytes map file for the designated subheader, note some are not supported.
subheader_map = ecat_header_maps['ecat_headers'][confirmed_version][str(subheader_type_number)]
if not subheader_map:
raise Exception(f"Unsupported data type: {subheader_type_number}")
# collect subheaders and pixel data
subheaders, data = [], []
for i in range(len(directory.T)):
frame_number = i + 1
if collect_pixel_data:
print(f"Reading subheader from frame {frame_number}")
# collect frame info/column
frame_info = directory[:, i]
frame_start = frame_info[1]
frame_stop = frame_info[2]
frame_start_byte_position = 512 * (frame_start - 1) # sure why not
# read subheader
subheader, byte_position = get_header_data(subheader_map,
ecat_file,
byte_offset=frame_start_byte_position)
if collect_pixel_data:
# collect pixel data from file
pixel_data = read_bytes(path_to_bytes=ecat_file,
byte_start=512 * frame_start,
byte_stop=512 * frame_stop)
# calculate size of matrix for pixel data, may vary depending on image type (polar, 3d, etc.)
if subheader_type_number == 7:
image_size = [subheader['X_DIMENSION'], subheader['Y_DIMENSION'], subheader['Z_DIMENSION']]
# check subheader for pixel datatype
dt_val = subheader['DATA_TYPE']
if dt_val == 5:
formatting = '>f4'
pixel_data_type = numpy.dtype(formatting)
elif dt_val == 6:
pixel_data_type = '>H'
else:
raise ValueError(
f"Unable to determine pixel data type from value: {dt_val} extracted from {subheader}")
# read it into a one dimensional matrix
pixel_data_matrix_3d = numpy.frombuffer(pixel_data,
dtype=pixel_data_type,
count=image_size[0] * image_size[1] * image_size[2]).reshape(
*image_size, order='F')
else:
raise Exception(f"Unable to determine frame image size, unsupported image type {subheader_type_number}")
# we assume the user doesn't want to do multiplication to adjust for calibration here
if calibrated:
calibration_factor = subheader['SCALE_FACTOR'] * main_header['ECAT_CALIBRATION_FACTOR']
calibrated_pixel_data_matrix_3d = calibration_factor * pixel_data_matrix_3d
data.append(calibrated_pixel_data_matrix_3d)
else:
data.append(pixel_data_matrix_3d)
else:
data = None
subheaders.append(subheader)
if collect_pixel_data:
# return 4d array instead of list of 3d arrays
pixel_data_matrix_4d = numpy.zeros(tuple(image_size + [len(data)]), dtype=numpy.dtype(pixel_data_type))
for index, frame in enumerate(data):
pixel_data_matrix_4d[:, :, :, index] = frame
else:
pixel_data_matrix_4d = None
return main_header, subheaders, pixel_data_matrix_4d
| [
"numpy.dtype",
"struct.calcsize",
"numpy.reshape",
"pathlib.Path",
"os.path.join",
"os.path.isfile",
"numpy.append",
"struct.unpack",
"json.load",
"numpy.frombuffer",
"re.findall"
] | [((1091, 1116), 'os.path.isfile', 'path.isfile', (['path_to_ecat'], {}), '(path_to_ecat)\n', (1102, 1116), False, 'from os import path\n'), ((516, 533), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (525, 533), False, 'import json\n'), ((4941, 4968), 'struct.calcsize', 'struct.calcsize', (['struct_fmt'], {}), '(struct_fmt)\n', (4956, 4968), False, 'import struct\n'), ((5140, 5176), 'struct.unpack', 'struct.unpack', (['struct_fmt', 'raw_bytes'], {}), '(struct_fmt, raw_bytes)\n', (5153, 5176), False, 'import struct\n'), ((138, 160), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'import pathlib\n'), ((434, 471), 'os.path.join', 'join', (['parent_dir', '"""ecat_headers.json"""'], {}), "(parent_dir, 'ecat_headers.json')\n", (438, 471), False, 'from os.path import join\n'), ((7913, 7952), 'numpy.reshape', 'numpy.reshape', (['read_byte_array', '(-1, 4)'], {}), '(read_byte_array, (-1, 4))\n', (7926, 7952), False, 'import numpy\n'), ((8468, 8534), 'numpy.append', 'numpy.append', (['directory', 'reshaped[:, 1:directory_size + 1]'], {'axis': '(1)'}), '(directory, reshaped[:, 1:directory_size + 1], axis=1)\n', (8480, 8534), False, 'import numpy\n'), ((3310, 3339), 're.findall', 're.findall', (['"""\\\\d+"""', 'data_type'], {}), "('\\\\d+', data_type)\n", (3320, 3339), False, 'import re\n'), ((7773, 7791), 'numpy.dtype', 'numpy.dtype', (['""">i4"""'], {}), "('>i4')\n", (7784, 7791), False, 'import numpy\n'), ((16489, 16517), 'numpy.dtype', 'numpy.dtype', (['pixel_data_type'], {}), '(pixel_data_type)\n', (16500, 16517), False, 'import numpy\n'), ((15020, 15043), 'numpy.dtype', 'numpy.dtype', (['formatting'], {}), '(formatting)\n', (15031, 15043), False, 'import numpy\n'), ((15388, 15496), 'numpy.frombuffer', 'numpy.frombuffer', (['pixel_data'], {'dtype': 'pixel_data_type', 'count': '(image_size[0] * image_size[1] * image_size[2])'}), '(pixel_data, dtype=pixel_data_type, count=image_size[0] *\n image_size[1] * image_size[2])\n', (15404, 15496), False, 'import numpy\n')] |
"""
PyramidNet for CIFAR, implemented in Chainer.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a84_cifar10',
'pyramidnet110_a84_cifar100', 'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100',
'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet200_a240_bn_cifar10',
'pyramidnet200_a240_bn_cifar100', 'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100',
'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv3x3_block, SimpleSequential
from .preresnet import PreResActivation
from .pyramidnet import PyrUnit
class CIFARPyramidNet(Chain):
"""
PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10):
super(CIFARPyramidNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
activation=None,
activate=False))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, 'post_activ', PreResActivation(in_channels=in_channels))
setattr(self.features, 'final_pool', partial(
F.average_pooling_2d,
ksize=8,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, 'flatten', partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, 'fc', L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_pyramidnet_cifar(classes,
blocks,
alpha,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join('~', '.chainer', 'models'),
**kwargs):
"""
Create PyramidNet for CIFAR model with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def pyramidnet110_a48_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar10",
**kwargs)
def pyramidnet110_a48_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar100",
**kwargs)
def pyramidnet110_a84_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar10",
**kwargs)
def pyramidnet110_a84_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
def pyramidnet110_a270_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar10",
**kwargs)
def pyramidnet110_a270_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar100",
**kwargs)
def pyramidnet164_a270_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar10",
**kwargs)
def pyramidnet164_a270_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar100",
**kwargs)
def pyramidnet200_a240_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar10",
**kwargs)
def pyramidnet200_a240_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar100",
**kwargs)
def pyramidnet236_a220_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar10",
**kwargs)
def pyramidnet236_a220_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar100",
**kwargs)
def pyramidnet272_a200_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar10",
**kwargs)
def pyramidnet272_a200_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar100",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
(pyramidnet110_a48_cifar10, 10),
(pyramidnet110_a48_cifar100, 100),
(pyramidnet110_a84_cifar10, 10),
(pyramidnet110_a84_cifar100, 100),
(pyramidnet110_a270_cifar10, 10),
(pyramidnet110_a270_cifar100, 100),
(pyramidnet164_a270_bn_cifar10, 10),
(pyramidnet164_a270_bn_cifar100, 100),
(pyramidnet200_a240_bn_cifar10, 10),
(pyramidnet200_a240_bn_cifar100, 100),
(pyramidnet236_a220_bn_cifar10, 10),
(pyramidnet236_a220_bn_cifar100, 100),
(pyramidnet272_a200_bn_cifar10, 10),
(pyramidnet272_a200_bn_cifar100, 100),
]
for model, classes in models:
net = model(pretrained=pretrained, classes=classes)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706)
assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556)
assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446)
assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536)
assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477)
assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307)
assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021)
assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071)
assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702)
assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952)
assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046)
assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096)
assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842)
assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692)
x = np.zeros((1, 3, 32, 32), np.float32)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| [
"functools.partial",
"chainer.links.Linear",
"numpy.zeros",
"os.path.join"
] | [((4079, 4118), 'os.path.join', 'os.path.join', (['"""~"""', '""".chainer"""', '"""models"""'], {}), "('~', '.chainer', 'models')\n", (4091, 4118), False, 'import os\n'), ((17779, 17815), 'numpy.zeros', 'np.zeros', (['(1, 3, 32, 32)', 'np.float32'], {}), '((1, 3, 32, 32), np.float32)\n', (17787, 17815), True, 'import numpy as np\n'), ((3258, 3306), 'functools.partial', 'partial', (['F.average_pooling_2d'], {'ksize': '(8)', 'stride': '(1)'}), '(F.average_pooling_2d, ksize=8, stride=1)\n', (3265, 3306), False, 'from functools import partial\n'), ((3506, 3549), 'functools.partial', 'partial', (['F.reshape'], {'shape': '(-1, in_channels)'}), '(F.reshape, shape=(-1, in_channels))\n', (3513, 3549), False, 'from functools import partial\n'), ((3635, 3682), 'chainer.links.Linear', 'L.Linear', ([], {'in_size': 'in_channels', 'out_size': 'classes'}), '(in_size=in_channels, out_size=classes)\n', (3643, 3682), True, 'import chainer.links as L\n')] |
#================================================================
# Functions to show the evolutions of omega in function of t
#================================================================
# Load modules
import numpy as np
import matplotlib.pyplot as plt
# Load personal modules
from stellapy.utils.decorators import verbose_wrapper
from stellapy.plot.utils import load_plotbox2d
from stellapy.simulations.utils.get_simulation import get_simulation
from stellapy.simulations.utils.get_experiment import get_experiment
from stellapy.plot.utils import get_axisOfScan
from stellapy.plot.utils import get_lineColors
@verbose_wrapper
def plot_potentialVsZ(\
# Specify which simulations to plot
research=None,\
experiment_id="All experiments",\
simulation_id="All simulations",\
# Specify data range
units="normalized",\
x_quantity="z",\
y_quantity="phi_real",\
x_range=None,\
y_range=None,\
plotted_modes="unstable",\
kx_range=-0.0,\
ky_range=[0,100],\
# Labels
x_label=None,\
y_label=None,\
title=None,\
# For the GUI the figure object already exists
ax=None,\
Progress=None,\
root=None,\
# Appearance options
font_size=20,\
handle_length=1):
''' Plot phi(z) to see if <nfield_periods> was big enough.
The z-axis can be displaced in zeta <zeta>, in z <z>, in poloidal turns <pol> and toroidal turns <tor>.
Data files needed
-----------------
*.in
*.out.nc or *.out.h5
*.final_fields
Parameters
----------
units : {normalized; SI units}
y_quantity : {phi2; phi_real; phi_imag}
x_quantity : {zeta; z; pol}
Notes
-----
Structure of the *.final_fields file
# z z-thet0 aky akx real(phi) imag(phi) real(apar) imag(apar) z_eqarc-thet0
'''
# Update the progress bar of the GUI
if Progress: Progress.move(0,"Plot the potential versus z.")
# Decide whether to scan modes along the x-axis or y-axis
scan, k_fixed, k_range = get_axisOfScan(kx_range, ky_range, root)
if scan == False: return
# Set the labels, and change the standard labels depending on the units
label = determine_labels(x_label, y_label, title, units)
load_plotbox2d(x_label=label[x_quantity], y_label=label[y_quantity], title=label["title"], ax=ax);
# Get a reference to the experiment and its simulations
experiment = get_experiment(research, experiment_id)[0]
simulations = get_simulation(experiment, simulation_id)
# The number of lines defines the colors so the colors of the lines change gradually with k
color = get_lineColors(simulations, scan, k_fixed, k_range, plotted_modes); plot_i = 0
# Save the axis limits
xlims=[0,0]; ylims=[0,0]
# Iterate over the simulations
for simulation in simulations:
# Get the modes of this simulation that need to be plotted
vec_k = simulation.get_modesForAOneDimensionalScan(scan, k_fixed, k_range, plotted_modes)
# Get the z and potential data from the simulation
z_data = simulation.zeta if x_quantity=="zeta" else (simulation.z_kxky if x_quantity=="z" else simulation.z_poloidal)
y_data = simulation.phi_real if y_quantity=="phi_real" else (simulation.phi_imag if y_quantity=="phi_imag" else simulation.phi2)
# Iterate over the modes
for k in vec_k:
# Update the progress bar of the GUI
if Progress: Progress.move(vec_k.index(k)/len(vec_k)*100,"Plotting modes ("+str(vec_k.index(k))+"/"+str(len(vec_k))+")")
# Labels for the legend
simulation_id = simulation.marker_label.replace('_', ' ')
if isinstance(kx_range, float): label = "$k_y\\rho_i = " + "{0:.2f}".format(k) + "$"
if isinstance(ky_range, float): label = "$k_x\\rho_i = " + "{0:.2f}".format(k) + "$"
if len(simulations) > 1: label = label + ": " + simulation_id
if len(vec_k)==1: label = simulation_id
# Get the z and potential data for this specific mode and remove the NaN and Inf values
i_kx, i_ky = simulation.get_indicesOfMode(scan, k_fixed, k)
vec_z = z_data[:,i_kx,i_ky][np.isfinite(y_data[:,i_kx,i_ky])]
vec_y = y_data[:,i_kx,i_ky][np.isfinite(y_data[:,i_kx,i_ky])]
# Plot the normalized potential since we are interested in the shape
try: vec_y = vec_y/np.nanmax([np.nanmax(vec_y), abs(np.nanmin(vec_y))])
except: print("Warning: The potential vector was empty.")
# Plot omega(t) and gamma(t)
ax.plot(vec_z, vec_y, lw=2, linestyle='-', color=color[plot_i], label=label, clip_on=False); plot_i += 1
# Keep track of the axis limits
xlims = [min(z_data.min(), xlims[0]), max(z_data.max(), xlims[1])]
ylims = [min(y_data.min(), ylims[0]), max(y_data.max(), ylims[1])]
# If there were modes to be plotted, rescale and show the figure.
if xlims != [0,0] and ylims != [0,0]:
# Rescale the axis
if x_range==None: x_range = xlims
if y_range==None: y_range = [0,1] if y_quantity=="phi2" else [-1,1]
rescale_axes(ax, x_range, y_range, units, font_size, handle_length)
# Show the figure if we're not in a GUI
if not root: plt.show()
# If there were no modes, just clear the axis
else:
ax.clear()
print("WARNING: There were no modes to be plotted.")
# End
if True: return
#################################################################
# METHODS
#################################################################
def rescale_axes(ax, x_range, y_range, units, font_size, handle_length):
# Change the range of the axes
ax.set_xlim(x_range)
ax.set_ylim(y_range)
# Change the ticker
if units=="normalized":
ax.ticklabel_format(style='plain', axis='x')
ax.ticklabel_format(style='plain', axis='y')
if units=="SI units":
ax.ticklabel_format(style='sci', scilimits=(0,0), axis='x')
ax.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
# Add the legend
ax.legend(\
loc='upper center', \
bbox_to_anchor=(1.11, 1.03), \
shadow=True, \
ncol=1, \
labelspacing=0.0, \
prop={'size': font_size}, \
handlelength = handle_length)
return
#----------------------------
def determine_labels(x_label, y_label, title, units):
''' Changes the labels if the units changed. '''
label = {'z' : x_label, 'zeta' : x_label, 'pol' : x_label, 'title' : title,\
'phi_real' : y_label, 'phi_imag' : y_label, 'phi2' : y_label}
if label["z"] in [None, '$z/a$', '$z$ [m]']:
if units=="normalized": label["z"] = '$z/a$'
if units=="SI units": label["z"] = '$z$ [m]'
if label["zeta"] in [None, '$\\zeta$']:
if units=="normalized": label["zeta"] = '$\\zeta$'
if units=="SI units": label["zeta"] = '$\\zeta$'
if label["pol"] in [None, '$Poloidal turns$' ]:
if units=="normalized": label["pol"] = '$Poloidal turns$'
if units=="SI units": label["pol"] = '$Poloidal turns$'
if label["phi_real"] in [None, 'Re($\\phi)/Re($\\phi)_{max}$']:
if units=="normalized": label["phi_real"] = 'Re($\\phi)/Re($\\phi)_{max}$'
if units=="SI units": label["phi_real"] = 'Re($\\phi)/Re($\\phi)_{max}$'
if label["phi_imag"] in [None, 'Im($\\phi)/Re($\\phi)_{max}$']:
if units=="normalized": label["phi_imag"] = 'Im($\\phi$)/Im($\\phi)_{max}$'
if units=="SI units": label["phi_imag"] = 'Im($\\phi$)/Im($\\phi)_{max}$'
if label["phi2"] in [None, '$|\\phi|^2/|\\phi_{max}|^2$']:
if units=="normalized": label["phi2"] = '$|\\phi|^2/|\\phi_{max}|^2$'
if units=="SI units": label["phi2"] = '$|\\phi|^2/|\\phi_{max}|^2$'
return label
| [
"stellapy.simulations.utils.get_simulation.get_simulation",
"stellapy.plot.utils.load_plotbox2d",
"stellapy.plot.utils.get_lineColors",
"numpy.isfinite",
"stellapy.simulations.utils.get_experiment.get_experiment",
"numpy.nanmax",
"stellapy.plot.utils.get_axisOfScan",
"numpy.nanmin",
"matplotlib.pypl... | [((2334, 2374), 'stellapy.plot.utils.get_axisOfScan', 'get_axisOfScan', (['kx_range', 'ky_range', 'root'], {}), '(kx_range, ky_range, root)\n', (2348, 2374), False, 'from stellapy.plot.utils import get_axisOfScan\n'), ((2552, 2654), 'stellapy.plot.utils.load_plotbox2d', 'load_plotbox2d', ([], {'x_label': 'label[x_quantity]', 'y_label': 'label[y_quantity]', 'title': "label['title']", 'ax': 'ax'}), "(x_label=label[x_quantity], y_label=label[y_quantity], title=\n label['title'], ax=ax)\n", (2566, 2654), False, 'from stellapy.plot.utils import load_plotbox2d\n'), ((2796, 2837), 'stellapy.simulations.utils.get_simulation.get_simulation', 'get_simulation', (['experiment', 'simulation_id'], {}), '(experiment, simulation_id)\n', (2810, 2837), False, 'from stellapy.simulations.utils.get_simulation import get_simulation\n'), ((2951, 3017), 'stellapy.plot.utils.get_lineColors', 'get_lineColors', (['simulations', 'scan', 'k_fixed', 'k_range', 'plotted_modes'], {}), '(simulations, scan, k_fixed, k_range, plotted_modes)\n', (2965, 3017), False, 'from stellapy.plot.utils import get_lineColors\n'), ((2735, 2774), 'stellapy.simulations.utils.get_experiment.get_experiment', 'get_experiment', (['research', 'experiment_id'], {}), '(research, experiment_id)\n', (2749, 2774), False, 'from stellapy.simulations.utils.get_experiment import get_experiment\n'), ((5762, 5772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5770, 5772), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4626), 'numpy.isfinite', 'np.isfinite', (['y_data[:, i_kx, i_ky]'], {}), '(y_data[:, i_kx, i_ky])\n', (4603, 4626), True, 'import numpy as np\n'), ((4666, 4700), 'numpy.isfinite', 'np.isfinite', (['y_data[:, i_kx, i_ky]'], {}), '(y_data[:, i_kx, i_ky])\n', (4677, 4700), True, 'import numpy as np\n'), ((4837, 4853), 'numpy.nanmax', 'np.nanmax', (['vec_y'], {}), '(vec_y)\n', (4846, 4853), True, 'import numpy as np\n'), ((4859, 4875), 'numpy.nanmin', 'np.nanmin', (['vec_y'], {}), '(vec_y)\n', (4868, 4875), True, 'import numpy as np\n')] |
from typing import Tuple, List
from pathlib import Path
from itertools import chain
from functools import reduce
import cv2
import numpy as np
import tensorflow.keras as keras
from utils.transform import Transform
class Eval:
def __init__(self, filename: str):
self.image = np.expand_dims(cv2.imread(filename) / 255., axis=0)
def set_result(self, image: np.ndarray):
self.image = image
return self
def to_png(self, filename: str):
*path, ext = filename.split('.')
filename = '.'.join(path) + '-result' + ext
cv2.imwrite(filename, self.image)
class Dataset(keras.utils.Sequence):
def __init__(self, train: bool = True,
source_transforms: List[Transform] = None,
target_transforms: List[Transform] = None,
batch: int = 32, shuffle: bool = True):
self.batch = batch
self.shuffle = shuffle
self.channels = 3
self.is_training = True
(self.x_train, _), (self.x_test, _) = keras.datasets.cifar10.load_data()
self.images = self.x_train
self.size = self.x_train[0].shape[:2]
self.source_transforms = source_transforms or []
self.target_transforms = target_transforms or []
self.indices = np.arange(len(self.x_train))
def train(self, flag: bool = True):
self.is_training = flag
def eval(self):
self.train(False)
def on_epoch_end(self) \
-> None:
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self) \
-> int:
return len(self.images)
def __getitem__(self, item: int) \
-> Tuple[np.ndarray, np.ndarray]:
sources = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
targets = np.empty((self.batch, *self.size, self.channels), dtype=np.float32)
indices = np.roll(self.indices, item)
for b in range(self.batch):
image = self.images[indices[b]]
sources[b] = reduce(lambda i, t: t(i), [image / 255.] + self.source_transforms)
targets[b] = reduce(lambda i, t: t(i), [image / 255.] + self.target_transforms)
return sources, targets
| [
"cv2.imwrite",
"numpy.roll",
"tensorflow.keras.datasets.cifar10.load_data",
"numpy.empty",
"cv2.imread",
"numpy.random.shuffle"
] | [((582, 615), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'self.image'], {}), '(filename, self.image)\n', (593, 615), False, 'import cv2\n'), ((1038, 1072), 'tensorflow.keras.datasets.cifar10.load_data', 'keras.datasets.cifar10.load_data', ([], {}), '()\n', (1070, 1072), True, 'import tensorflow.keras as keras\n'), ((1744, 1811), 'numpy.empty', 'np.empty', (['(self.batch, *self.size, self.channels)'], {'dtype': 'np.float32'}), '((self.batch, *self.size, self.channels), dtype=np.float32)\n', (1752, 1811), True, 'import numpy as np\n'), ((1830, 1897), 'numpy.empty', 'np.empty', (['(self.batch, *self.size, self.channels)'], {'dtype': 'np.float32'}), '((self.batch, *self.size, self.channels), dtype=np.float32)\n', (1838, 1897), True, 'import numpy as np\n'), ((1917, 1944), 'numpy.roll', 'np.roll', (['self.indices', 'item'], {}), '(self.indices, item)\n', (1924, 1944), True, 'import numpy as np\n'), ((1530, 1561), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (1547, 1561), True, 'import numpy as np\n'), ((305, 325), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (315, 325), False, 'import cv2\n')] |
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np
class Preprocessor:
def __init__(self):
pass
def normalize(self, df, columns):
min_max_scaler = preprocessing.MinMaxScaler()
df[columns] = min_max_scaler.fit_transform(df[columns])
return df
def select(self, input_df, columns):
return input_df[columns]
def filter(self, input_df, col, valid_status):
return input_df[input_df[col].isin(valid_status)]
def transform(self):
pass
def under_sample(self, input_df, ratio=1.0, random_state=3):
"""Undersamples the majority class to reach a ratio by default
equal to 1 between the majority and minority classes"""
count_class_0, count_class_1 = input_df["Status"].value_counts()
df_class_0 = input_df[input_df["Status"] == "paid"]
df_class_1 = input_df[input_df["Status"] == "defaulted"]
df_class_0_under = df_class_0.sample(
int(ratio * count_class_1), random_state=random_state
)
df_train_under = pd.concat([df_class_0_under, df_class_1], axis=0)
return df_train_under
def over_sample(self, input_df, ratio=1.0, random_state=3):
"""Oversamples the minority class to reach a ratio by default
equal to 1 between the majority and mionority classes"""
count_class_0, count_class_1 = input_df["Status"].value_counts()
df_class_0 = input_df[input_df["Status"] == "paid"]
df_class_1 = input_df[input_df["Status"] == "defaulted"]
df_class_1_over = df_class_1.sample(
int(ratio * count_class_0), replace=True, random_state=random_state
)
df_train_over = pd.concat([df_class_0, df_class_1_over], axis=0)
return df_train_over
def split(self, input_df, test_size=0.3, random_state=3):
train, test = train_test_split(input_df, test_size=test_size, random_state=random_state)
return train, test
def ohe_encode(self, input_df, categorical_columns, ordinal_columns):
ohe = preprocessing.OneHotEncoder(handle_unknown="ignore", sparse=False)
X = np.transpose(ohe.fit_transform(input_df[categorical_columns]))
for c in ordinal_columns:
X = np.vstack([X, input_df[c]])
X = np.transpose(X)
features = ohe.get_feature_names(categorical_columns).tolist()
for c in ordinal_columns:
features.append(c)
X_df = pd.DataFrame(X, columns=features)
return X_df
def label_encode(self, df, categorical_columns):
for cal in categorical_columns:
df[cal] = df[cal].astype('category')
cat_cols = df.select_dtypes(['category']).columns
df[cat_cols] = df[cat_cols].apply(lambda x: x.cat.codes)
return df
def write_to_csv(self, input_df, path):
input_df.to_csv(path)
def transformFundedTime(self, df):
# A new feature "Funded Time" gives the exact time when the loan was funded.
df["Funded Time"] = df.apply(lambda row: row['Funded Date.year'] + 0.833 * row['Funded Date.month'], axis=1)
return df
def transformCountryCurrency(self, df):
df['Country Currency'] = df.apply(lambda row: row.Country + '_' + row.Currency, axis=1)
return df
def transformStatus(self, df):
df['Status'] = pd.get_dummies(df["Status"], columns=["Status"])["defaulted"]
return df
| [
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"pandas.get_dummies",
"pandas.concat",
"numpy.vstack",
"pandas.DataFrame",
"numpy.transpose",
"sklearn.preprocessing.MinMaxScaler"
] | [((249, 277), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (275, 277), False, 'from sklearn import preprocessing\n'), ((1134, 1183), 'pandas.concat', 'pd.concat', (['[df_class_0_under, df_class_1]'], {'axis': '(0)'}), '([df_class_0_under, df_class_1], axis=0)\n', (1143, 1183), True, 'import pandas as pd\n'), ((1775, 1823), 'pandas.concat', 'pd.concat', (['[df_class_0, df_class_1_over]'], {'axis': '(0)'}), '([df_class_0, df_class_1_over], axis=0)\n', (1784, 1823), True, 'import pandas as pd\n'), ((1938, 2012), 'sklearn.model_selection.train_test_split', 'train_test_split', (['input_df'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(input_df, test_size=test_size, random_state=random_state)\n', (1954, 2012), False, 'from sklearn.model_selection import train_test_split\n'), ((2130, 2196), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'sparse': '(False)'}), "(handle_unknown='ignore', sparse=False)\n", (2157, 2196), False, 'from sklearn import preprocessing\n'), ((2363, 2378), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (2375, 2378), True, 'import numpy as np\n'), ((2531, 2564), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'features'}), '(X, columns=features)\n', (2543, 2564), True, 'import pandas as pd\n'), ((2323, 2350), 'numpy.vstack', 'np.vstack', (['[X, input_df[c]]'], {}), '([X, input_df[c]])\n', (2332, 2350), True, 'import numpy as np\n'), ((3423, 3471), 'pandas.get_dummies', 'pd.get_dummies', (["df['Status']"], {'columns': "['Status']"}), "(df['Status'], columns=['Status'])\n", (3437, 3471), True, 'import pandas as pd\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
def brier_score(y_true, y_pred):
y_true_f = K.flatten(K.round(y_true))
y_pred_f = K.flatten(K.round(y_pred))
# print(y_true_f)
# print(y_pred_f)
# print(K.mean(K.sum(K.pow(y_pred_f - y_true_f, 2))))
# np.mean(np.sum((probs - targets) ** 2, axis=1))
return K.mean(K.sum(K.pow(y_pred_f - y_true_f, 2)))
def actual_accuracy_and_confidence(y_true, y_pred, uncertainty):
acc = K.cast(y_true == K.round(y_pred), dtype='float32')
# print(y_true, y_pred)
# print(K.cast(y_true == K.round(y_pred), dtype='float32'))
# print(K.cast(y_true == K.round(y_pred), dtype='float32'))
#return acc, conf
return acc, 1 - uncertainty, y_pred, y_true
def entropy(y_pred):
return (-y_pred * tf.math.log(y_pred + 1e-10))
def crossentropy(y_pred):
return - K.sum(y_pred * tf.math.log(y_pred + 1e-10))
def compute_TP_and_TN(y_true, y_pred):
y_pred_rounded = np.round(y_pred)
TPs = (y_true == 1) & (y_pred_rounded == 1)
TNs = (y_true == 0) & (y_pred_rounded == 0)
return TPs, TNs
def compute_FTP_and_FTN(y_true, y_pred, uncertainty, thrds=(1, 0.8, 0.75, 0.6, 0.5, 0.4, 0.25, 0.2, 0.0)):
y_pred_rounded = np.round(y_pred)
TPs = (y_true == 1) & (y_pred_rounded == 1)
TNs = (y_true == 0) & (y_pred_rounded == 0)
if 1 not in thrds:
thrds.append(1)
FTPs = {}
FTNs = {}
for thrd in sorted(thrds):
if thrd == 1:
FTPs[thrd] = np.sum(TPs)
FTNs[thrd] = np.sum(TNs)
else:
FTPs[thrd] = np.sum(TPs & (uncertainty < thrd).numpy())
FTNs[thrd] = np.sum(TNs & (uncertainty < thrd).numpy())
return FTPs, FTNs
def compute_filtered_hard_dice(y_true, y_pred, uncertainty, thrds=(1, 0.8, 0.75, 0.6, 0.5, 0.4, 0.25, 0.2, 0.0), smooth=1e-3):
y_true_f = K.flatten(K.round(y_true))
y_pred_f = K.flatten(K.round(y_pred))
uncertainty_f = K.flatten(uncertainty)
filtered_hard_dices = {}
for thrd in sorted(thrds):
if thrd == 1:
filtered_y_true_f = y_true_f
filtered_y_pred_f = y_pred_f
else:
filtered_y_true_f = tf.where(uncertainty_f < thrd, y_true_f, 0)
filtered_y_pred_f = tf.where(uncertainty_f < thrd, y_pred_f, 0)
intersection = K.sum(filtered_y_true_f * filtered_y_pred_f)
filtered_hard_dices[thrd] = 100. * (2. * intersection + smooth) / (K.sum(filtered_y_true_f) + K.sum(filtered_y_pred_f) + smooth)
return filtered_hard_dices
#def compute_correct_ece(accs, confds, n_bins, pred_probs):
def compute_mce_and_correct_ece(accs, confds, n_bins, pred_probs, y_true):
# plot_x_pred_prob = []
# plot_x_conf = []
# plot_y = []
bin_eces = []
bin_mces = []
accs = np.array(accs)
confds = np.array(confds)
# pred_probs = pred_probs.flatten()
# y_true = y_true.flatten()
probs_min = np.min(confds)
h_w_wise_bins_len = (np.max(confds) - probs_min) / n_bins
for j in range(n_bins):
# tf.print(tf.convert_to_tensor(accs).shape, tf.convert_to_tensor(probs).shape)
#print(f'\n---BORDERS of {j} bin:', probs_min + (h_w_wise_bins_len * j), probs_min + (h_w_wise_bins_len * (j + 1)))
if j == 0:
include_flags = np.logical_and(confds >= probs_min + (h_w_wise_bins_len * j), confds <= probs_min + (h_w_wise_bins_len * (j + 1)))
else:
include_flags = np.logical_and(confds > probs_min + (h_w_wise_bins_len * j), confds <= probs_min + (h_w_wise_bins_len * (j + 1)))
if np.sum(include_flags) == 0:
continue
included_accs = accs[include_flags]
included_probs = confds[include_flags]
#print(np.unique(included_accs, return_counts=True))
#print(np.unique(np.round(np.asarray(pred_probs[include_flags])) == np.asarray(y_true[include_flags]), return_counts=True))
#print(np.unique(np.abs(np.asarray(pred_probs[include_flags]) - np.asarray(y_true[include_flags]))<=0.25, return_counts=True))
# a = (np.abs(np.asarray(pred_probs[include_flags]) - np.asarray(y_true[include_flags])))
#print(np.min(a), np.max(a))
mean_accuracy = included_accs.mean()
#print(tf.reduce_mean(included_accs))
mean_confidence = included_probs.mean()
bin_scaled_ece = np.abs(mean_accuracy-mean_confidence)*np.sum(include_flags, axis=-1)/accs.shape[-1]
# bin_scaled_ece = np.abs(mean_accuracy-mean_confidence)*len(include_flags)/
bin_eces.append(bin_scaled_ece)
bin_mces.append(np.abs(mean_accuracy-mean_confidence))
# plot_x_pred_prob.append(pred_probs[include_flags].mean())
# plot_x_conf.append(mean_confidence)
# plot_y.append(mean_accuracy)
# pixel_wise_ece = np.sum(np.asarray(bin_eces), axis=0) / accs.shape[-1]
#print('\nPixel-wise eces:\n', np.asarray(bin_eces)/accs.shape[-1])
#print('\nX pred_prob:\n', np.asarray(plot_x_pred_prob))
#print('\nX conf:\n', np.asarray(plot_x_conf))
#print('\nY:\n', np.asarray(plot_y))
return max(bin_mces), np.sum(bin_eces)
def computeMI(x, y):
sum_mi = 0.0
x_value_list = np.unique(x)
y_value_list = np.unique(y)
Px = np.array([ len(x[x==xval])/float(len(x)) for xval in x_value_list ]) #P(x)
Py = np.array([ len(y[y==yval])/float(len(y)) for yval in y_value_list ]) #P(y)
for i in range(len(x_value_list)):
if Px[i] ==0.:
continue
sy = y[x == x_value_list[i]]
if len(sy)== 0:
continue
pxy = np.array([len(sy[sy==yval])/float(len(y)) for yval in y_value_list]) #p(x,y)
t = pxy[Py>0.]/Py[Py>0.] /Px[i] # log(P(x,y)/( P(x)*P(y))
sum_mi += sum(pxy[t>0]*np.log2( t[t>0]) ) # sum ( P(x,y)* log(P(x,y)/( P(x)*P(y)) )
return sum_mi
| [
"numpy.abs",
"numpy.unique",
"tensorflow.keras.backend.flatten",
"numpy.logical_and",
"tensorflow.math.log",
"tensorflow.keras.backend.round",
"numpy.max",
"tensorflow.keras.backend.pow",
"numpy.array",
"numpy.sum",
"tensorflow.where",
"numpy.min",
"numpy.log2",
"numpy.round",
"tensorflo... | [((984, 1000), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (992, 1000), True, 'import numpy as np\n'), ((1247, 1263), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (1255, 1263), True, 'import numpy as np\n'), ((1967, 1989), 'tensorflow.keras.backend.flatten', 'K.flatten', (['uncertainty'], {}), '(uncertainty)\n', (1976, 1989), True, 'import tensorflow.keras.backend as K\n'), ((2809, 2823), 'numpy.array', 'np.array', (['accs'], {}), '(accs)\n', (2817, 2823), True, 'import numpy as np\n'), ((2837, 2853), 'numpy.array', 'np.array', (['confds'], {}), '(confds)\n', (2845, 2853), True, 'import numpy as np\n'), ((2942, 2956), 'numpy.min', 'np.min', (['confds'], {}), '(confds)\n', (2948, 2956), True, 'import numpy as np\n'), ((5186, 5198), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (5195, 5198), True, 'import numpy as np\n'), ((5218, 5230), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5227, 5230), True, 'import numpy as np\n'), ((140, 155), 'tensorflow.keras.backend.round', 'K.round', (['y_true'], {}), '(y_true)\n', (147, 155), True, 'import tensorflow.keras.backend as K\n'), ((182, 197), 'tensorflow.keras.backend.round', 'K.round', (['y_pred'], {}), '(y_pred)\n', (189, 197), True, 'import tensorflow.keras.backend as K\n'), ((810, 837), 'tensorflow.math.log', 'tf.math.log', (['(y_pred + 1e-10)'], {}), '(y_pred + 1e-10)\n', (821, 837), True, 'import tensorflow as tf\n'), ((1888, 1903), 'tensorflow.keras.backend.round', 'K.round', (['y_true'], {}), '(y_true)\n', (1895, 1903), True, 'import tensorflow.keras.backend as K\n'), ((1930, 1945), 'tensorflow.keras.backend.round', 'K.round', (['y_pred'], {}), '(y_pred)\n', (1937, 1945), True, 'import tensorflow.keras.backend as K\n'), ((2343, 2387), 'tensorflow.keras.backend.sum', 'K.sum', (['(filtered_y_true_f * filtered_y_pred_f)'], {}), '(filtered_y_true_f * filtered_y_pred_f)\n', (2348, 2387), True, 'import tensorflow.keras.backend as K\n'), ((5109, 5125), 'numpy.sum', 'np.sum', (['bin_eces'], {}), '(bin_eces)\n', (5115, 5125), True, 'import numpy as np\n'), ((379, 408), 'tensorflow.keras.backend.pow', 'K.pow', (['(y_pred_f - y_true_f)', '(2)'], {}), '(y_pred_f - y_true_f, 2)\n', (384, 408), True, 'import tensorflow.keras.backend as K\n'), ((505, 520), 'tensorflow.keras.backend.round', 'K.round', (['y_pred'], {}), '(y_pred)\n', (512, 520), True, 'import tensorflow.keras.backend as K\n'), ((1513, 1524), 'numpy.sum', 'np.sum', (['TPs'], {}), '(TPs)\n', (1519, 1524), True, 'import numpy as np\n'), ((1550, 1561), 'numpy.sum', 'np.sum', (['TNs'], {}), '(TNs)\n', (1556, 1561), True, 'import numpy as np\n'), ((2200, 2243), 'tensorflow.where', 'tf.where', (['(uncertainty_f < thrd)', 'y_true_f', '(0)'], {}), '(uncertainty_f < thrd, y_true_f, 0)\n', (2208, 2243), True, 'import tensorflow as tf\n'), ((2276, 2319), 'tensorflow.where', 'tf.where', (['(uncertainty_f < thrd)', 'y_pred_f', '(0)'], {}), '(uncertainty_f < thrd, y_pred_f, 0)\n', (2284, 2319), True, 'import tensorflow as tf\n'), ((2982, 2996), 'numpy.max', 'np.max', (['confds'], {}), '(confds)\n', (2988, 2996), True, 'import numpy as np\n'), ((3306, 3421), 'numpy.logical_and', 'np.logical_and', (['(confds >= probs_min + h_w_wise_bins_len * j)', '(confds <= probs_min + h_w_wise_bins_len * (j + 1))'], {}), '(confds >= probs_min + h_w_wise_bins_len * j, confds <= \n probs_min + h_w_wise_bins_len * (j + 1))\n', (3320, 3421), True, 'import numpy as np\n'), ((3463, 3577), 'numpy.logical_and', 'np.logical_and', (['(confds > probs_min + h_w_wise_bins_len * j)', '(confds <= probs_min + h_w_wise_bins_len * (j + 1))'], {}), '(confds > probs_min + h_w_wise_bins_len * j, confds <= \n probs_min + h_w_wise_bins_len * (j + 1))\n', (3477, 3577), True, 'import numpy as np\n'), ((3588, 3609), 'numpy.sum', 'np.sum', (['include_flags'], {}), '(include_flags)\n', (3594, 3609), True, 'import numpy as np\n'), ((4588, 4627), 'numpy.abs', 'np.abs', (['(mean_accuracy - mean_confidence)'], {}), '(mean_accuracy - mean_confidence)\n', (4594, 4627), True, 'import numpy as np\n'), ((894, 921), 'tensorflow.math.log', 'tf.math.log', (['(y_pred + 1e-10)'], {}), '(y_pred + 1e-10)\n', (905, 921), True, 'import tensorflow as tf\n'), ((4355, 4394), 'numpy.abs', 'np.abs', (['(mean_accuracy - mean_confidence)'], {}), '(mean_accuracy - mean_confidence)\n', (4361, 4394), True, 'import numpy as np\n'), ((4393, 4423), 'numpy.sum', 'np.sum', (['include_flags'], {'axis': '(-1)'}), '(include_flags, axis=-1)\n', (4399, 4423), True, 'import numpy as np\n'), ((5753, 5770), 'numpy.log2', 'np.log2', (['t[t > 0]'], {}), '(t[t > 0])\n', (5760, 5770), True, 'import numpy as np\n'), ((2463, 2487), 'tensorflow.keras.backend.sum', 'K.sum', (['filtered_y_true_f'], {}), '(filtered_y_true_f)\n', (2468, 2487), True, 'import tensorflow.keras.backend as K\n'), ((2490, 2514), 'tensorflow.keras.backend.sum', 'K.sum', (['filtered_y_pred_f'], {}), '(filtered_y_pred_f)\n', (2495, 2514), True, 'import tensorflow.keras.backend as K\n')] |
import os
import glob
import time
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
import subprocess
from datetime import datetime
from tensorflow.python.ops import control_flow_ops
from modules.videosr_ops import *
class EASYFLOW(object):
def __init__(self):
self.num_frames = 7
self.crop_size = 100
self.max_steps = int(1e6)
self.batch_size = 20
self.learning_rate = 1e-4
self.train_dir = './easyflow_log/model1'
self.pathlist = open('./data/train/filelist_train.txt', 'rt').read().splitlines()
def input_producer(self, batch_size=10):
def read_data():
data_seq = tf.random_crop(self.data_queue, [1, self.num_frames])
input = tf.stack([tf.image.decode_png(tf.read_file(data_seq[0][i]), channels=3) for i in range(self.num_frames)])
input = preprocessing(input)
print('Input producer shape: ', input.get_shape())
return input
def preprocessing(input):
input = tf.cast(input, tf.float32) / 255.0
shape = tf.shape(input)[1:]
size = tf.convert_to_tensor([self.crop_size, self.crop_size, 3], dtype=tf.int32, name="size")
check = tf.Assert(tf.reduce_all(shape >= size), ["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = tf.random_uniform(tf.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=None) % limit
offset_in = tf.concat([[0], offset], axis=-1)
size_in = tf.concat([[self.num_frames], size], axis=-1)
input = tf.slice(input, offset_in, size_in)
input.set_shape([self.num_frames, self.crop_size, self.crop_size, 3])
return input
with tf.variable_scope('input'):
inList_all = []
for dataPath in self.pathlist:
inList = sorted(glob.glob(os.path.join(dataPath, 'input/*.png')))
inList_all.append(inList)
inList_all = tf.convert_to_tensor(inList_all, dtype=tf.string)
self.data_queue = tf.train.slice_input_producer([inList_all], capacity=40)
input = read_data()
batch_in = tf.train.batch([input], batch_size=batch_size, num_threads=3, capacity=40)
return batch_in
#
def forward(self, imga, imgb, scope='easyflow', reuse=False):
dims = len(imga.get_shape())
if dims == 5:
n, num_frame, height, width, num_channels = imga.get_shape().as_list()
imga = tf.reshape(imga, [n * num_frame, height, width, num_channels])
imgb = tf.reshape(imgb, [n * num_frame, height, width, num_channels])
n, h, w, c = imga.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)), \
slim.arg_scope([slim.conv2d_transpose], activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inputs = tf.concat([imga, imgb], 3, name='flow_inp')
c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1')
c2 = slim.conv2d(c1, 24, [3, 3], scope='c2')
c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3')
c4 = slim.conv2d(c3, 24, [3, 3], scope='c4')
c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')
c5_hr = tf.reshape(c5, [n, h//4, w//4, 2, 4, 4])
c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3])
c5_hr = tf.reshape(c5_hr, [n, h, w, 2])
img_warp = imwarp_backward(c5_hr, imgb, [h, w])
c5_pack = tf.concat([inputs, c5_hr, img_warp], 3, name='cat')
s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1')
s2 = slim.conv2d(s1, 24, [3, 3], scope='s2')
s3 = slim.conv2d(s2, 24, [3, 3], scope='s3')
s4 = slim.conv2d(s3, 24, [3, 3], scope='s4')
s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')
s5_hr = tf.reshape(s5, [n, h // 2, w //2, 2, 2, 2])
s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3])
s5_hr = tf.reshape(s5_hr, [n, h, w, 2])
uv = c5_hr + s5_hr
if dims == 5:
uv = tf.reshape(uv, [self.batch_size, num_frame, height, width, 2])
return uv
def build_model(self):
frames_lr = self.input_producer(batch_size=self.batch_size)
n, t, h, w, c = frames_lr.get_shape().as_list()
idx0 = self.num_frames // 2
frames_y = rgb2y(frames_lr)
frames_ref_y = frames_y[:, idx0:idx0 + 1, :, :, :]
frames_ref_y = tf.tile(frames_ref_y, [1, self.num_frames, 1, 1, 1])
uv = self.forward(frames_y, frames_ref_y)
frames_ref_warp = imwarp_backward(uv, frames_ref_y, [h, w])
loss_data = tf.reduce_mean(tf.abs(frames_y - frames_ref_warp))
loss_tv = tf.reduce_sum(tf.image.total_variation(uv)) / uv.shape.num_elements()
self.loss = loss_data + 0.01 * loss_tv
def train(self):
def train_op_func(loss, var_list, is_gradient_clip=False):
if is_gradient_clip:
train_op = tf.train.AdamOptimizer(lr)
grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)
unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]
rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]
rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]
capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)
capped_gvs = list(zip(capped_grad, rnn_var))
train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)
else:
train_op = tf.train.AdamOptimizer(lr).minimize(loss, var_list=var_list, global_step=global_step)
return train_op
"""Train easyflow network"""
global_step = tf.Variable(initial_value=0, trainable=False)
# Create folder for logs
if not tf.gfile.Exists(self.train_dir):
tf.gfile.MakeDirs(self.train_dir)
self.build_model()
decay_steps = 3e5
lr = tf.train.polynomial_decay(self.learning_rate, global_step, decay_steps, end_learning_rate=1e-6, power=0.9)
vars_all = tf.trainable_variables()
vars_sr = [v for v in vars_all if 'srmodel' in v.name]
vars_srcnn = [v for v in vars_all if 'srcnn' in v.name]
vars_flownet = [v for v in vars_all if 'flownet' in v.name]
train_all = train_op_func(self.loss, vars_all, is_gradient_clip=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
# self.load(sess, os.path.join(self.train_dir, 'checkpoints'))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)
for step in range(sess.run(global_step), self.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_all, self.loss])
duration = time.time() - start_time
# print loss_value
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 5 == 0:
num_examples_per_step = self.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.4f (%.1f data/s; %.3f s/batch)')
print((format_str % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), step, loss_value * 100,
examples_per_sec, sec_per_batch)))
# Save the model checkpoint periodically.
if step % 500 == 499 or (step + 1) == self.max_steps:
checkpoint_path = os.path.join(self.train_dir, 'checkpoints')
self.save(sess, checkpoint_path, step)
def save(self, sess, checkpoint_dir, step):
if not hasattr(self,'saver'):
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
model_name = "easyflow.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load(self, sess, checkpoint_dir='./easyflow_log/model1/checkpoints', step=None):
print(" [*] Reading checkpoints...")
model_name = "easyflow.model"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading checkpoints... Success{}".format(ckpt_name))
return True
else:
print(" [*] Reading checkpoints... ERROR")
return False
def load_easyflow(self, sess, checkpoint_dir='./easyflow_log/model1/checkpoints'):
print(" [*] Reading EasyFlow checkpoints...")
model_name = "easyflow.model"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
flownets_var = [var for var in tf.trainable_variables() if 'easyflow' in var.name]
saver = tf.train.Saver(var_list=flownets_var)
saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading checkpoints...{} Success".format(ckpt_name))
return True
else:
print(" [*] Reading checkpoints... ERROR")
return False
def main(_):
model = EASYFLOW()
model.train()
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.image.total_variation",
"tensorflow.gfile.MakeDirs",
"tensorflow.cast",
"tensorflow.clip_by_global_norm",
"tensorflow.app.run",
"tensorflow.slice",
"os.path.exis... | [((10647, 10659), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (10657, 10659), True, 'import tensorflow as tf\n'), ((5169, 5221), 'tensorflow.tile', 'tf.tile', (['frames_ref_y', '[1, self.num_frames, 1, 1, 1]'], {}), '(frames_ref_y, [1, self.num_frames, 1, 1, 1])\n', (5176, 5221), True, 'import tensorflow as tf\n'), ((6568, 6613), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'trainable': '(False)'}), '(initial_value=0, trainable=False)\n', (6579, 6613), True, 'import tensorflow as tf\n'), ((6809, 6920), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['self.learning_rate', 'global_step', 'decay_steps'], {'end_learning_rate': '(1e-06)', 'power': '(0.9)'}), '(self.learning_rate, global_step, decay_steps,\n end_learning_rate=1e-06, power=0.9)\n', (6834, 6920), True, 'import tensorflow as tf\n'), ((6935, 6959), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6957, 6959), True, 'import tensorflow as tf\n'), ((7249, 7261), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7259, 7261), True, 'import tensorflow as tf\n'), ((7336, 7399), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(50)', 'keep_checkpoint_every_n_hours': '(1)'}), '(max_to_keep=50, keep_checkpoint_every_n_hours=1)\n', (7350, 7399), True, 'import tensorflow as tf\n'), ((7488, 7510), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (7508, 7510), True, 'import tensorflow as tf\n'), ((7529, 7581), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess', 'coord': 'coord'}), '(sess=sess, coord=coord)\n', (7557, 7581), True, 'import tensorflow as tf\n'), ((7604, 7626), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (7624, 7626), True, 'import tensorflow as tf\n'), ((7652, 7716), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.train_dir', 'sess.graph'], {'flush_secs': '(30)'}), '(self.train_dir, sess.graph, flush_secs=30)\n', (7673, 7716), True, 'import tensorflow as tf\n'), ((9343, 9388), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9372, 9388), True, 'import tensorflow as tf\n'), ((9975, 10020), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (10004, 10020), True, 'import tensorflow as tf\n'), ((688, 741), 'tensorflow.random_crop', 'tf.random_crop', (['self.data_queue', '[1, self.num_frames]'], {}), '(self.data_queue, [1, self.num_frames])\n', (702, 741), True, 'import tensorflow as tf\n'), ((1147, 1237), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[self.crop_size, self.crop_size, 3]'], {'dtype': 'tf.int32', 'name': '"""size"""'}), "([self.crop_size, self.crop_size, 3], dtype=tf.int32,\n name='size')\n", (1167, 1237), True, 'import tensorflow as tf\n'), ((1363, 1413), 'tensorflow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', (['[check]', 'shape'], {}), '([check], shape)\n', (1397, 1413), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((1593, 1626), 'tensorflow.concat', 'tf.concat', (['[[0], offset]'], {'axis': '(-1)'}), '([[0], offset], axis=-1)\n', (1602, 1626), True, 'import tensorflow as tf\n'), ((1649, 1694), 'tensorflow.concat', 'tf.concat', (['[[self.num_frames], size]'], {'axis': '(-1)'}), '([[self.num_frames], size], axis=-1)\n', (1658, 1694), True, 'import tensorflow as tf\n'), ((1715, 1750), 'tensorflow.slice', 'tf.slice', (['input', 'offset_in', 'size_in'], {}), '(input, offset_in, size_in)\n', (1723, 1750), True, 'import tensorflow as tf\n'), ((1873, 1899), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input"""'], {}), "('input')\n", (1890, 1899), True, 'import tensorflow as tf\n'), ((2121, 2170), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['inList_all'], {'dtype': 'tf.string'}), '(inList_all, dtype=tf.string)\n', (2141, 2170), True, 'import tensorflow as tf\n'), ((2202, 2258), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[inList_all]'], {'capacity': '(40)'}), '([inList_all], capacity=40)\n', (2231, 2258), True, 'import tensorflow as tf\n'), ((2314, 2388), 'tensorflow.train.batch', 'tf.train.batch', (['[input]'], {'batch_size': 'batch_size', 'num_threads': '(3)', 'capacity': '(40)'}), '([input], batch_size=batch_size, num_threads=3, capacity=40)\n', (2328, 2388), True, 'import tensorflow as tf\n'), ((2642, 2704), 'tensorflow.reshape', 'tf.reshape', (['imga', '[n * num_frame, height, width, num_channels]'], {}), '(imga, [n * num_frame, height, width, num_channels])\n', (2652, 2704), True, 'import tensorflow as tf\n'), ((2724, 2786), 'tensorflow.reshape', 'tf.reshape', (['imgb', '[n * num_frame, height, width, num_channels]'], {}), '(imgb, [n * num_frame, height, width, num_channels])\n', (2734, 2786), True, 'import tensorflow as tf\n'), ((2849, 2886), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (2866, 2886), True, 'import tensorflow as tf\n'), ((4781, 4843), 'tensorflow.reshape', 'tf.reshape', (['uv', '[self.batch_size, num_frame, height, width, 2]'], {}), '(uv, [self.batch_size, num_frame, height, width, 2])\n', (4791, 4843), True, 'import tensorflow as tf\n'), ((5378, 5412), 'tensorflow.abs', 'tf.abs', (['(frames_y - frames_ref_warp)'], {}), '(frames_y - frames_ref_warp)\n', (5384, 5412), True, 'import tensorflow as tf\n'), ((6663, 6694), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['self.train_dir'], {}), '(self.train_dir)\n', (6678, 6694), True, 'import tensorflow as tf\n'), ((6708, 6741), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['self.train_dir'], {}), '(self.train_dir)\n', (6725, 6741), True, 'import tensorflow as tf\n'), ((7279, 7312), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7310, 7312), True, 'import tensorflow as tf\n'), ((7810, 7821), 'time.time', 'time.time', ([], {}), '()\n', (7819, 7821), False, 'import time\n'), ((8875, 8938), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(50)', 'keep_checkpoint_every_n_hours': '(1)'}), '(max_to_keep=50, keep_checkpoint_every_n_hours=1)\n', (8889, 8938), True, 'import tensorflow as tf\n'), ((8992, 9022), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9006, 9022), False, 'import os\n'), ((9036, 9063), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9047, 9063), False, 'import os\n'), ((9094, 9134), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'model_name'], {}), '(checkpoint_dir, model_name)\n', (9106, 9134), False, 'import os\n'), ((9461, 9505), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (9477, 9505), False, 'import os\n'), ((10093, 10137), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (10109, 10137), False, 'import os\n'), ((10253, 10290), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'flownets_var'}), '(var_list=flownets_var)\n', (10267, 10290), True, 'import tensorflow as tf\n'), ((1052, 1078), 'tensorflow.cast', 'tf.cast', (['input', 'tf.float32'], {}), '(input, tf.float32)\n', (1059, 1078), True, 'import tensorflow as tf\n'), ((1108, 1123), 'tensorflow.shape', 'tf.shape', (['input'], {}), '(input)\n', (1116, 1123), True, 'import tensorflow as tf\n'), ((1264, 1292), 'tensorflow.reduce_all', 'tf.reduce_all', (['(shape >= size)'], {}), '(shape >= size)\n', (1277, 1292), True, 'import tensorflow as tf\n'), ((3444, 3487), 'tensorflow.concat', 'tf.concat', (['[imga, imgb]', '(3)'], {'name': '"""flow_inp"""'}), "([imga, imgb], 3, name='flow_inp')\n", (3453, 3487), True, 'import tensorflow as tf\n'), ((3509, 3562), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['inputs', '(24)', '[5, 5]'], {'stride': '(2)', 'scope': '"""c1"""'}), "(inputs, 24, [5, 5], stride=2, scope='c1')\n", (3520, 3562), True, 'import tensorflow.contrib.slim as slim\n'), ((3584, 3623), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['c1', '(24)', '[3, 3]'], {'scope': '"""c2"""'}), "(c1, 24, [3, 3], scope='c2')\n", (3595, 3623), True, 'import tensorflow.contrib.slim as slim\n'), ((3645, 3694), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['c2', '(24)', '[5, 5]'], {'stride': '(2)', 'scope': '"""c3"""'}), "(c2, 24, [5, 5], stride=2, scope='c3')\n", (3656, 3694), True, 'import tensorflow.contrib.slim as slim\n'), ((3716, 3755), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['c3', '(24)', '[3, 3]'], {'scope': '"""c4"""'}), "(c3, 24, [3, 3], scope='c4')\n", (3727, 3755), True, 'import tensorflow.contrib.slim as slim\n'), ((3777, 3842), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['c4', '(32)', '[3, 3]'], {'activation_fn': 'tf.nn.tanh', 'scope': '"""c5"""'}), "(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')\n", (3788, 3842), True, 'import tensorflow.contrib.slim as slim\n'), ((3868, 3912), 'tensorflow.reshape', 'tf.reshape', (['c5', '[n, h // 4, w // 4, 2, 4, 4]'], {}), '(c5, [n, h // 4, w // 4, 2, 4, 4])\n', (3878, 3912), True, 'import tensorflow as tf\n'), ((3933, 3972), 'tensorflow.transpose', 'tf.transpose', (['c5_hr', '[0, 1, 4, 2, 5, 3]'], {}), '(c5_hr, [0, 1, 4, 2, 5, 3])\n', (3945, 3972), True, 'import tensorflow as tf\n'), ((3997, 4028), 'tensorflow.reshape', 'tf.reshape', (['c5_hr', '[n, h, w, 2]'], {}), '(c5_hr, [n, h, w, 2])\n', (4007, 4028), True, 'import tensorflow as tf\n'), ((4120, 4171), 'tensorflow.concat', 'tf.concat', (['[inputs, c5_hr, img_warp]', '(3)'], {'name': '"""cat"""'}), "([inputs, c5_hr, img_warp], 3, name='cat')\n", (4129, 4171), True, 'import tensorflow as tf\n'), ((4194, 4248), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['c5_pack', '(24)', '[5, 5]'], {'stride': '(2)', 'scope': '"""s1"""'}), "(c5_pack, 24, [5, 5], stride=2, scope='s1')\n", (4205, 4248), True, 'import tensorflow.contrib.slim as slim\n'), ((4270, 4309), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['s1', '(24)', '[3, 3]'], {'scope': '"""s2"""'}), "(s1, 24, [3, 3], scope='s2')\n", (4281, 4309), True, 'import tensorflow.contrib.slim as slim\n'), ((4331, 4370), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['s2', '(24)', '[3, 3]'], {'scope': '"""s3"""'}), "(s2, 24, [3, 3], scope='s3')\n", (4342, 4370), True, 'import tensorflow.contrib.slim as slim\n'), ((4392, 4431), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['s3', '(24)', '[3, 3]'], {'scope': '"""s4"""'}), "(s3, 24, [3, 3], scope='s4')\n", (4403, 4431), True, 'import tensorflow.contrib.slim as slim\n'), ((4453, 4517), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['s4', '(8)', '[3, 3]'], {'activation_fn': 'tf.nn.tanh', 'scope': '"""s5"""'}), "(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')\n", (4464, 4517), True, 'import tensorflow.contrib.slim as slim\n'), ((4543, 4587), 'tensorflow.reshape', 'tf.reshape', (['s5', '[n, h // 2, w // 2, 2, 2, 2]'], {}), '(s5, [n, h // 2, w // 2, 2, 2, 2])\n', (4553, 4587), True, 'import tensorflow as tf\n'), ((4611, 4650), 'tensorflow.transpose', 'tf.transpose', (['s5_hr', '[0, 1, 4, 2, 5, 3]'], {}), '(s5_hr, [0, 1, 4, 2, 5, 3])\n', (4623, 4650), True, 'import tensorflow as tf\n'), ((4675, 4706), 'tensorflow.reshape', 'tf.reshape', (['s5_hr', '[n, h, w, 2]'], {}), '(s5_hr, [n, h, w, 2])\n', (4685, 4706), True, 'import tensorflow as tf\n'), ((5446, 5474), 'tensorflow.image.total_variation', 'tf.image.total_variation', (['uv'], {}), '(uv)\n', (5470, 5474), True, 'import tensorflow as tf\n'), ((5699, 5725), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (5721, 5725), True, 'import tensorflow as tf\n'), ((6122, 6167), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['rnn_grad'], {'clip_norm': '(3)'}), '(rnn_grad, clip_norm=3)\n', (6144, 6167), True, 'import tensorflow as tf\n'), ((7906, 7917), 'time.time', 'time.time', ([], {}), '()\n', (7915, 7917), False, 'import time\n'), ((7985, 8005), 'numpy.isnan', 'np.isnan', (['loss_value'], {}), '(loss_value)\n', (7993, 8005), True, 'import numpy as np\n'), ((8664, 8707), 'os.path.join', 'os.path.join', (['self.train_dir', '"""checkpoints"""'], {}), "(self.train_dir, 'checkpoints')\n", (8676, 8707), False, 'import os\n'), ((9543, 9582), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (9555, 9582), False, 'import os\n'), ((10323, 10362), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (10335, 10362), False, 'import os\n'), ((1491, 1506), 'tensorflow.shape', 'tf.shape', (['shape'], {}), '(shape)\n', (1499, 1506), True, 'import tensorflow as tf\n'), ((10181, 10205), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10203, 10205), True, 'import tensorflow as tf\n'), ((792, 820), 'tensorflow.read_file', 'tf.read_file', (['data_seq[0][i]'], {}), '(data_seq[0][i])\n', (804, 820), True, 'import tensorflow as tf\n'), ((2014, 2051), 'os.path.join', 'os.path.join', (['dataPath', '"""input/*.png"""'], {}), "(dataPath, 'input/*.png')\n", (2026, 2051), False, 'import os\n'), ((3013, 3063), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(True)'}), '(uniform=True)\n', (3049, 3063), True, 'import tensorflow as tf\n'), ((3116, 3144), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3139, 3144), True, 'import tensorflow as tf\n'), ((3284, 3334), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(True)'}), '(uniform=True)\n', (3320, 3334), True, 'import tensorflow as tf\n'), ((3387, 3415), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3410, 3415), True, 'import tensorflow as tf\n'), ((6394, 6420), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (6416, 6420), True, 'import tensorflow as tf\n'), ((8367, 8381), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8379, 8381), False, 'from datetime import datetime\n')] |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
# Series Problem
s1 = pd.Series(-3, index=range(2, 11, 2))
s2 = pd.Series({'Bill':31, 'Sarah':28, 'Jane':34, 'Joe':26})
# Random Walk Problem
# five random walks of length 100 plotted together
N = 100
for i in xrange(5):
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .5, size=N-1)*2-1
s1 = pd.Series(s1)
s1 = s1.cumsum()
s1.plot()
plt.show()
# biased random walks
N = 100 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(311)
s1.plot()
N = 10000 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(312)
s1.plot()
N = 100000 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(313)
s1.plot()
plt.show()
# SQL SELECT problem
studentInfo[(studentInfo['Age']>19)&(studentInfo['Sex']=='M')][['ID', 'Name']]
# SQL JOIN problem
pd.merge(studentInfo[studentInfo['Sex']=='M'], otherInfo, on='ID')[['ID', 'Age', 'GPA']]
# final Crime Data problem
# load in the data
crimeDF = pd.read_csv("crime_data.txt", header=1, skiprows=0, index_col=0)
# create crime rate column
crimeDF['Crime Rate'] = crimeDF['Total']/crimeDF['Population']
# plot the crime rate as function of year
crimeDF.plot(y='Crime Rate')
# list 5 year with highest crime rate in descending order
crimeDF.sort(columns="Crime Rate", ascending=False).index[:5]
# calculate average total number of crimes, and average number of burglaries
avg = crimeDF.mean(axis=0)[['Total', 'Burglary']]
# find the years for total crime is below average, but burglary is above average
crimeDF[(crimeDF['Total']<avg['Total']) & (crimeDF['Burglary']>avg['Burglary'])].index
# plot murders as function of population
crimeDF.plot(x='Population', y='Murder')
# make histogram of Robbery and Burglary, plot side-by-side
crimeDF.hist(column=['Robbery', 'Burglary'])
# select Population, Violent, and Robbery columns for years in the 80s, save to csv file.
crimeDF.loc[1980:1989,['Population', 'Violent', 'Robbery']].to_csv("crime_subset.txt")
| [
"pandas.Series",
"pandas.read_csv",
"pandas.merge",
"numpy.zeros",
"matplotlib.pyplot.subplot",
"numpy.random.binomial",
"matplotlib.pyplot.show"
] | [((141, 200), 'pandas.Series', 'pd.Series', (["{'Bill': 31, 'Sarah': 28, 'Jane': 34, 'Joe': 26}"], {}), "({'Bill': 31, 'Sarah': 28, 'Jane': 34, 'Joe': 26})\n", (150, 200), True, 'import pandas as pd\n'), ((431, 441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (439, 441), True, 'from matplotlib import pyplot as plt\n'), ((508, 519), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (516, 519), True, 'import numpy as np\n'), ((590, 603), 'pandas.Series', 'pd.Series', (['s1'], {}), '(s1)\n', (599, 603), True, 'import pandas as pd\n'), ((635, 651), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (646, 651), True, 'from matplotlib import pyplot as plt\n'), ((708, 719), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (716, 719), True, 'import numpy as np\n'), ((790, 803), 'pandas.Series', 'pd.Series', (['s1'], {}), '(s1)\n', (799, 803), True, 'import pandas as pd\n'), ((835, 851), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (846, 851), True, 'from matplotlib import pyplot as plt\n'), ((909, 920), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (917, 920), True, 'import numpy as np\n'), ((991, 1004), 'pandas.Series', 'pd.Series', (['s1'], {}), '(s1)\n', (1000, 1004), True, 'import pandas as pd\n'), ((1036, 1052), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (1047, 1052), True, 'from matplotlib import pyplot as plt\n'), ((1066, 1076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1074, 1076), True, 'from matplotlib import pyplot as plt\n'), ((1344, 1408), 'pandas.read_csv', 'pd.read_csv', (['"""crime_data.txt"""'], {'header': '(1)', 'skiprows': '(0)', 'index_col': '(0)'}), "('crime_data.txt', header=1, skiprows=0, index_col=0)\n", (1355, 1408), True, 'import pandas as pd\n'), ((308, 319), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (316, 319), True, 'import numpy as np\n'), ((382, 395), 'pandas.Series', 'pd.Series', (['s1'], {}), '(s1)\n', (391, 395), True, 'import pandas as pd\n'), ((1198, 1266), 'pandas.merge', 'pd.merge', (["studentInfo[studentInfo['Sex'] == 'M']", 'otherInfo'], {'on': '"""ID"""'}), "(studentInfo[studentInfo['Sex'] == 'M'], otherInfo, on='ID')\n", (1206, 1266), True, 'import pandas as pd\n'), ((529, 571), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.51)'], {'size': '(N - 1,)'}), '(1, 0.51, size=(N - 1,))\n', (547, 571), True, 'import numpy as np\n'), ((729, 771), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.51)'], {'size': '(N - 1,)'}), '(1, 0.51, size=(N - 1,))\n', (747, 771), True, 'import numpy as np\n'), ((930, 972), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.51)'], {'size': '(N - 1,)'}), '(1, 0.51, size=(N - 1,))\n', (948, 972), True, 'import numpy as np\n'), ((333, 371), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {'size': '(N - 1)'}), '(1, 0.5, size=N - 1)\n', (351, 371), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
eye.py - Display Eye Diagrams of captured Data
==============================================
Uses demodulated data captured from the ec_978.py '--saveraw' command to display
eye diagrams.
Note: In the code, you will often see that we drop the first sample.
This is because ec_978.py supplies an extra sample at the beginning. The first
data point is actually packet[1].
Data is displayed in smoothed or non-smoothed mode. Non-smoothed is just
the raw data samples. Smoothed is the same data interpolated at eight times
the original data. Some diagrams will also use an alpha (transparency) value
for display.
"""
import sys
import os
import numpy as np
import argparse
from argparse import RawTextHelpFormatter
import matplotlib.pyplot as plt
import matplotlib
from scipy import signal
from colour import Color
from ec_978 import PACKET_LENGTH_FISB, PACKET_LENGTH_ADSB
# number of points to show in x-axis (3 = 0,1,2,3)
# The number of samples for each interval is (points * 2) + 1
nPoints = 3
def plotSingleEye(packet, n, nPoints, ax, xAxis, clr, scale):
"""
Plot a single single set of data points.
Args:
packet (np array): Complete set of demodulated data.
n (int): Which set of points to plot. 0 is the first set,
1 is the second, etc.
nPoints (int): Number of complete set of data points to plot,
usually set by the global 'nPoints'.
ax (Axes): matplotlib Axes object.
xAxis (array): Point values for x-axis as array.
clr (str): Color. None implies each plot will use a different
color.
scale (int): 1 if plotting data points with no interpolation,
or 8 if using 8 point interpolation.
"""
nPointsSampleNum = ((nPoints * 2) * scale) + scale
start = n * (nPointsSampleNum - scale)
finish = start + nPointsSampleNum
ax.plot(xAxis, packet[start:finish], c=clr)
def totalAvailableSamples(packet, nPoints, scale):
"""
Compute the total number of available plots available for a
given packet as well as the number of data points in a single
plot.
Args:
packet (np array): Complete set of demodulated data.
nPoints (int): Number of complete set of data points to plot,
usually set by the global 'nPoints'.
scale (int): 1 if plotting data points with no interpolation,
or 8 if using 8 point interpolation.
Returns:
tuple: Tuple containing:
* Number of points for each sample (int).
* Total number of plots that can be plotted for this packet (int)
"""
nPointsSampleNum = ((nPoints * 2) * scale) + scale
packetSize = packet.size
return nPointsSampleNum, int(packet.size / nPointsSampleNum)
def eyeDiagramEarlyAndLate(packet):
"""
Plot eye diagram with 1st 10% and last 10% of samples.
Note that for ADS-B short packets, the last 10% is probably
just baseline noise.
No smoothing.
The 1st 10% of the samples will be gray and the last 10% will be tan.
The reason for the color difference is to test for shifting which might
indicate a wrong sampling rate (or the SDR card sampling rate is off
from what it should be).
Args:
packet (np array): Complete set of demodulated data.
"""
# Drop the first sample
packet = packet[1:]
nPointsSampleNum, maxSamples = totalAvailableSamples(packet, nPoints, 1)
sampleSizeInPercent = int(maxSamples * 0.10)
xAxis = np.linspace(0, nPoints, nPointsSampleNum)
fig, ax = plt.subplots(figsize=(6, 7.5))
plt.axhline(y=0, c='red', linestyle='--')
plt.xlabel('Data points (2 samples / data point)')
plt.ylabel('Values')
plt.title('First 10% gray, last 10% tan')
for x in range(0, sampleSizeInPercent):
# Grey (alpha .5)
plotSingleEye(packet, x, nPoints, ax, xAxis, (.57, .58, .57, .5), 1)
for x in range(maxSamples - sampleSizeInPercent, maxSamples):
# Tan (alpha .5)
plotSingleEye(packet, x, nPoints, ax, xAxis, (.81, .69, .43, .5), 1)
plt.show()
def eyeDiagramEarlyAndLateSmooth(packet):
"""
Plot eye diagram with 1st 10% and last 10% of samples.
Note that for ADS-B short packets, the last 10% is probably
just baseline noise.
8x smoothing is applied.
Each plot will get a different color.
Args:
packet (np array): Complete set of demodulated data.
"""
# Drop the first sample
packet = packet[1:]
packet1 = signal.resample(packet, packet.size * 8)
nPointsSampleNum, maxSamples = totalAvailableSamples(packet1, nPoints, 8)
sampleSizeInPercent = int(maxSamples * 0.10)
xAxis = np.linspace(0, nPoints, nPointsSampleNum)
fig, ax = plt.subplots(figsize=(6, 7.5))
plt.axhline(y=0, c='black', linestyle='--')
plt.xlabel('Data points (2 samples / data point)')
plt.ylabel('Values')
plt.title('First 10%, last 10% data points (w/smoothing)')
for x in range(0, sampleSizeInPercent):
plotSingleEye(packet1, x, nPoints, ax, xAxis, None, 8)
for x in range(maxSamples - sampleSizeInPercent, maxSamples):
plotSingleEye(packet1, x, nPoints, ax, xAxis, None, 8)
plt.show()
def eyeDiagramSmooth(packet):
"""
Plot eye diagram using all samples.
8x smoothing is applied.
Args:
packet (np array): Complete set of demodulated data.
"""
# Drop the first sample
packet = packet[1:]
packet1 = signal.resample(packet, packet.size * 8)
nPointsSampleNum, maxSamples = totalAvailableSamples(packet1, nPoints, 8)
xAxis = np.linspace(0, nPoints, nPointsSampleNum)
fig, ax = plt.subplots(figsize=(6, 7.5))
plt.axhline(y=0, c='black', linestyle='--')
plt.xlabel('Data points (2 samples / data point)')
plt.ylabel('Values')
plt.title('All data points eye diagram (with smoothing)')
for x in range(0, maxSamples):
plotSingleEye(packet1, x, nPoints, ax, xAxis, None, 8)
plt.show()
def eyeDiagramSmoothGradient(packet, isForward):
"""
Plot eye diagram using all samples. The colors are a gradient starting
with red, then going to yellow, green and blue. The idea is that you
can look at outliers and see where in the packet they are located.
8x smoothing is applied.
Args:
packet (np array): Complete set of demodulated data.
isForward (bool): True if displaying the packet from front to
back. False if displaying packet from back to front. Red is
always used for the front packets, and blue for the rear
packets.
"""
# Drop the first sample
packet = packet[1:]
packet1 = signal.resample(packet, packet.size * 8)
nPointsSampleNum, maxSamples = totalAvailableSamples(packet1, nPoints, 8)
if isForward:
start = 0
stop = maxSamples
interval = 1
pltTitle = 'All points, smooth, gradient front to back'
else:
start = maxSamples - 1
stop = -1
interval = -1
pltTitle = 'All points, smooth, gradient back to front'
red = Color('red')
colors = list(red.range_to(Color('blue'), maxSamples))
xAxis = np.linspace(0, nPoints, nPointsSampleNum)
fig, ax = plt.subplots(figsize=(6, 7.5))
plt.axhline(y=0, c='black', linestyle='--')
plt.xlabel('Data points (2 samples / data point)')
plt.ylabel('Values')
plt.title(pltTitle)
if isForward:
start = 0
stop = maxSamples
interval = 1
else:
start = maxSamples - 1
stop = -1
interval = -1
for x in range(start, stop, interval):
plotSingleEye(packet1, x, nPoints, ax, xAxis, \
matplotlib.colors.to_rgba(colors[x].rgb, .1), 8)
plt.show()
def eyeDiagram(packet):
"""
Plot eye diagram using all samples.
No smoothing.
Args:
packet (np array): Complete set of demodulated data.
"""
# Drop the first sample
packet = packet[1:]
nPointsSampleNum, maxSamples = totalAvailableSamples(packet, nPoints, 1)
xAxis = np.linspace(0, nPoints, nPointsSampleNum)
fig, ax = plt.subplots(figsize=(6, 7.5))
plt.axhline(y=0, c='black', linestyle='--')
plt.xlabel('Data points (2 samples / data point)')
plt.ylabel('Values')
plt.title('All data points eye diagram (no smoothing)')
for x in range(0, maxSamples):
plotSingleEye(packet, x, nPoints, ax, xAxis, None, 1)
plt.show()
def main(fname):
"""
Read 'fname' from disk and display requested images.
Args:
fname (str): Filename containing data to process.
"""
# See if file is FIS-B or ADS-B
if '.F.' in fname:
isFisb = True
packetLength = PACKET_LENGTH_FISB
elif '.A' in fname:
isFisb = False
packetLength = PACKET_LENGTH_ADSB
else:
print('Cannot tell if file is FIS-B or ADS-B.')
sys.exit(1)
with open(fname, 'rb') as bfile:
packetBuf = bfile.read(packetLength)
# Numpy will convert the bytes to int32's
packet = np.frombuffer(packetBuf, np.int32)
if showAdpns:
eyeDiagram(packet)
if showAdps:
eyeDiagramSmooth(packet)
if showAdpgfb:
eyeDiagramSmoothGradient(packet, True)
if showAdpgbf:
eyeDiagramSmoothGradient(packet, False)
if showFlns:
eyeDiagramEarlyAndLate(packet)
if showFls:
eyeDiagramEarlyAndLateSmooth(packet)
# Call main function
if __name__ == "__main__":
hlpText = \
"""eye.py: Show signal as eye diagram.
Uses the data from 'ec_978.py' with the '--saveraw' flag set to show an
eye diagram.
Not specifying any optional arguments implies all eye diagrams will be
displayed in sequence.
Each diagram shows approximately 3 data points (6 + 1 samples).
Data is sampled at 2.083334 Mhz, or twice the data rate of 1.041667 Mhz.
Non-smoothed eyes shows only the actual samples. Smoothed samples are
up-sampled and interpolated by a factor of eight.
Either FIS-B or ADSB data can be displayed. ADS-B is tuned for long
ADS-B packets (the most common). If, when looking at an ADS-B eye
diagram, you see a thin horizontal line, this denotes a short packet.
The horizontal line represents no data transmission which is usually
at a very low magnitude compared with the signal. First and last 10%
diagrams will only show the first 10%, because the last 10% is not
modulated.
Some eye diagrams only show the first 10% of data, and the last 10%
of data. For the non-smoothed version, the first 10% is shown in gray,
and the last 10% in tan. This way you can detect an abnormal sampling
frequency. The two colors should mostly overlap.
The smoothed first and last 10% uses a normal color scheme. A full
FIS-B packet can clutter up the display, and only showing 20% of
samples is easier to interpret. Also, FIS-B packets have ones and
zeroes at both ends, while placeholder packets have only zeros in
the middle.
Data points can be shown with 'gradient colors'. Early data will
start off in red, then gradually turn to yellow, green, and finally
blue. You can use gradient colors to observe outlier values
and tell where within the packet they occurred. '--adpgfb' displays
the data front to back (red to blue), and '--adpgbf' will display
the data back to front (blue to red). Red will always be the early
data and blue the last data. Gradient colors also help visualize the
eye better in noisy signals.
If you are displaying more than one diagram, the next diagram will
appear after closing the window of the current diagram.
"""
parser = argparse.ArgumentParser(description= hlpText, \
formatter_class=RawTextHelpFormatter)
parser.add_argument("fname", help='Filename to use.')
parser.add_argument("--all", \
help='Display all graphs in sequence.', action='store_true')
parser.add_argument("--adpns", \
help='Show all data points no smoothing.', action='store_true')
parser.add_argument("--adps", \
help='Show all data points with smoothing.', action='store_true')
parser.add_argument("--adpgfb", \
help='Show all data points gradient colors front to back.', action='store_true')
parser.add_argument("--adpgbf", \
help='Show all data points gradient colors back to front.', action='store_true')
parser.add_argument("--flns", \
help='Show first and last 10 pct no smoothing.', action='store_true')
parser.add_argument("--fls", \
help='Show first and last 10 pct with smoothing.', action='store_true')
args = parser.parse_args()
showAdpns = False
showAdps = False
showAdpgfb = False
showAdpgbf = False
showFlns = False
showFls = False
showAll = False
if args.all:
showAll = True
if args.adpns:
showAdpns = True
if args.adps:
showAdps = True
if args.adpgfb:
showAdpgfb = True
if args.adpgbf:
showAdpgbf = True
if args.flns:
showFlns = True
if args.fls:
showFls = True
# Assume --all if no options set, or --all set
if ((not showAdpns) and (not showAdps) and (not showFlns) and (not showFls) \
and (not showAll) and (not showAdpgfb) and (not showAdpgbf)) or showAll:
showAdpns = True
showAdps = True
showAdpgfb = True
showAdpgbf = True
showFlns = True
showFls = True
main(args.fname)
| [
"numpy.frombuffer",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.colors.to_rgba",
"matplotlib.pyplot.axhline",
"scipy.signal.resample",
"numpy.linspace",
"sys.exit",
"matplotlib.pyplot.title",
"colour.Color",
"matplotlib.pyplot.subplots",
"ma... | [((3358, 3399), 'numpy.linspace', 'np.linspace', (['(0)', 'nPoints', 'nPointsSampleNum'], {}), '(0, nPoints, nPointsSampleNum)\n', (3369, 3399), True, 'import numpy as np\n'), ((3412, 3442), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 7.5)'}), '(figsize=(6, 7.5))\n', (3424, 3442), True, 'import matplotlib.pyplot as plt\n'), ((3445, 3486), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'c': '"""red"""', 'linestyle': '"""--"""'}), "(y=0, c='red', linestyle='--')\n", (3456, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3489, 3539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data points (2 samples / data point)"""'], {}), "('Data points (2 samples / data point)')\n", (3499, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values"""'], {}), "('Values')\n", (3552, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3606), 'matplotlib.pyplot.title', 'plt.title', (['"""First 10% gray, last 10% tan"""'], {}), "('First 10% gray, last 10% tan')\n", (3574, 3606), True, 'import matplotlib.pyplot as plt\n'), ((3909, 3919), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3917, 3919), True, 'import matplotlib.pyplot as plt\n'), ((4315, 4355), 'scipy.signal.resample', 'signal.resample', (['packet', '(packet.size * 8)'], {}), '(packet, packet.size * 8)\n', (4330, 4355), False, 'from scipy import signal\n'), ((4494, 4535), 'numpy.linspace', 'np.linspace', (['(0)', 'nPoints', 'nPointsSampleNum'], {}), '(0, nPoints, nPointsSampleNum)\n', (4505, 4535), True, 'import numpy as np\n'), ((4548, 4578), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 7.5)'}), '(figsize=(6, 7.5))\n', (4560, 4578), True, 'import matplotlib.pyplot as plt\n'), ((4581, 4624), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'c': '"""black"""', 'linestyle': '"""--"""'}), "(y=0, c='black', linestyle='--')\n", (4592, 4624), True, 'import matplotlib.pyplot as plt\n'), ((4627, 4677), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data points (2 samples / data point)"""'], {}), "('Data points (2 samples / data point)')\n", (4637, 4677), True, 'import matplotlib.pyplot as plt\n'), ((4680, 4700), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values"""'], {}), "('Values')\n", (4690, 4700), True, 'import matplotlib.pyplot as plt\n'), ((4703, 4761), 'matplotlib.pyplot.title', 'plt.title', (['"""First 10%, last 10% data points (w/smoothing)"""'], {}), "('First 10%, last 10% data points (w/smoothing)')\n", (4712, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4990, 5000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4998, 5000), True, 'import matplotlib.pyplot as plt\n'), ((5237, 5277), 'scipy.signal.resample', 'signal.resample', (['packet', '(packet.size * 8)'], {}), '(packet, packet.size * 8)\n', (5252, 5277), False, 'from scipy import signal\n'), ((5366, 5407), 'numpy.linspace', 'np.linspace', (['(0)', 'nPoints', 'nPointsSampleNum'], {}), '(0, nPoints, nPointsSampleNum)\n', (5377, 5407), True, 'import numpy as np\n'), ((5420, 5450), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 7.5)'}), '(figsize=(6, 7.5))\n', (5432, 5450), True, 'import matplotlib.pyplot as plt\n'), ((5453, 5496), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'c': '"""black"""', 'linestyle': '"""--"""'}), "(y=0, c='black', linestyle='--')\n", (5464, 5496), True, 'import matplotlib.pyplot as plt\n'), ((5499, 5549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data points (2 samples / data point)"""'], {}), "('Data points (2 samples / data point)')\n", (5509, 5549), True, 'import matplotlib.pyplot as plt\n'), ((5552, 5572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values"""'], {}), "('Values')\n", (5562, 5572), True, 'import matplotlib.pyplot as plt\n'), ((5575, 5632), 'matplotlib.pyplot.title', 'plt.title', (['"""All data points eye diagram (with smoothing)"""'], {}), "('All data points eye diagram (with smoothing)')\n", (5584, 5632), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5739), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5737, 5739), True, 'import matplotlib.pyplot as plt\n'), ((6383, 6423), 'scipy.signal.resample', 'signal.resample', (['packet', '(packet.size * 8)'], {}), '(packet, packet.size * 8)\n', (6398, 6423), False, 'from scipy import signal\n'), ((6767, 6779), 'colour.Color', 'Color', (['"""red"""'], {}), "('red')\n", (6772, 6779), False, 'from colour import Color\n'), ((6850, 6891), 'numpy.linspace', 'np.linspace', (['(0)', 'nPoints', 'nPointsSampleNum'], {}), '(0, nPoints, nPointsSampleNum)\n', (6861, 6891), True, 'import numpy as np\n'), ((6904, 6934), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 7.5)'}), '(figsize=(6, 7.5))\n', (6916, 6934), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6980), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'c': '"""black"""', 'linestyle': '"""--"""'}), "(y=0, c='black', linestyle='--')\n", (6948, 6980), True, 'import matplotlib.pyplot as plt\n'), ((6983, 7033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data points (2 samples / data point)"""'], {}), "('Data points (2 samples / data point)')\n", (6993, 7033), True, 'import matplotlib.pyplot as plt\n'), ((7036, 7056), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values"""'], {}), "('Values')\n", (7046, 7056), True, 'import matplotlib.pyplot as plt\n'), ((7059, 7078), 'matplotlib.pyplot.title', 'plt.title', (['pltTitle'], {}), '(pltTitle)\n', (7068, 7078), True, 'import matplotlib.pyplot as plt\n'), ((7370, 7380), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7378, 7380), True, 'import matplotlib.pyplot as plt\n'), ((7674, 7715), 'numpy.linspace', 'np.linspace', (['(0)', 'nPoints', 'nPointsSampleNum'], {}), '(0, nPoints, nPointsSampleNum)\n', (7685, 7715), True, 'import numpy as np\n'), ((7728, 7758), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 7.5)'}), '(figsize=(6, 7.5))\n', (7740, 7758), True, 'import matplotlib.pyplot as plt\n'), ((7761, 7804), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'c': '"""black"""', 'linestyle': '"""--"""'}), "(y=0, c='black', linestyle='--')\n", (7772, 7804), True, 'import matplotlib.pyplot as plt\n'), ((7807, 7857), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data points (2 samples / data point)"""'], {}), "('Data points (2 samples / data point)')\n", (7817, 7857), True, 'import matplotlib.pyplot as plt\n'), ((7860, 7880), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values"""'], {}), "('Values')\n", (7870, 7880), True, 'import matplotlib.pyplot as plt\n'), ((7883, 7938), 'matplotlib.pyplot.title', 'plt.title', (['"""All data points eye diagram (no smoothing)"""'], {}), "('All data points eye diagram (no smoothing)')\n", (7892, 7938), True, 'import matplotlib.pyplot as plt\n'), ((8034, 8044), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8042, 8044), True, 'import matplotlib.pyplot as plt\n'), ((8592, 8626), 'numpy.frombuffer', 'np.frombuffer', (['packetBuf', 'np.int32'], {}), '(packetBuf, np.int32)\n', (8605, 8626), True, 'import numpy as np\n'), ((11077, 11164), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'hlpText', 'formatter_class': 'RawTextHelpFormatter'}), '(description=hlpText, formatter_class=\n RawTextHelpFormatter)\n', (11100, 11164), False, 'import argparse\n'), ((6809, 6822), 'colour.Color', 'Color', (['"""blue"""'], {}), "('blue')\n", (6814, 6822), False, 'from colour import Color\n'), ((7318, 7363), 'matplotlib.colors.to_rgba', 'matplotlib.colors.to_rgba', (['colors[x].rgb', '(0.1)'], {}), '(colors[x].rgb, 0.1)\n', (7343, 7363), False, 'import matplotlib\n'), ((8447, 8458), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8455, 8458), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import tensorflow as tf
from random import choice, shuffle
from numpy import array
############<NAME>的基于tensorflow写的一个kmeans模板###############
def KMeansCluster(vectors, noofclusters):
"""
K-Means Clustering using TensorFlow.
`vertors`应该是一个n*k的二维的NumPy的数组,其中n代表着K维向量的数目
'noofclusters' 代表了待分的集群的数目,是一个整型值
"""
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
#找出每个向量的维度
dim = len(vectors[0])
#辅助随机地从可得的向量中选取中心点
vector_indices = list(range(len(vectors)))
shuffle(vector_indices)
#计算图
#我们创建了一个默认的计算流的图用于整个算法中,这样就保证了当函数被多次调用
#时,默认的图并不会被从上一次调用时留下的未使用的OPS或者Variables挤满
graph = tf.Graph()
with graph.as_default():
#计算的会话
sess = tf.Session()
##构建基本的计算的元素
##首先我们需要保证每个中心点都会存在一个Variable矩阵
##从现有的点集合中抽取出一部分作为默认的中心点
centroids = [tf.Variable((vectors[vector_indices[i]]))
for i in range(noofclusters)]
##创建一个placeholder用于存放各个中心点可能的分类的情况
centroid_value = tf.placeholder("float64", [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##对于每个独立向量的分属的类别设置为默认值0
assignments = [tf.Variable(0) for i in range(len(vectors))]
##这些节点在后续的操作中会被分配到合适的值
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment,
assignment_value))
##下面创建用于计算平均值的操作节点
#输入的placeholder
mean_input = tf.placeholder("float", [None, dim])
#节点/OP接受输入,并且计算0维度的平均值,譬如输入的向量列表
mean_op = tf.reduce_mean(mean_input, 0)
##用于计算欧几里得距离的节点
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(
v1, v2), 2)))
##这个OP会决定应该将向量归属到哪个节点
##基于向量到中心点的欧几里得距离
#Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##初始化所有的状态值
##这会帮助初始化图中定义的所有Variables。Variable-initializer应该定
##义在所有的Variables被构造之后,这样所有的Variables才会被纳入初始化
init_op = tf.global_variables_initializer()
#初始化所有的变量
sess.run(init_op)
##集群遍历
#接下来在K-Means聚类迭代中使用最大期望算法。为了简单起见,只让它执行固
#定的次数,而不设置一个终止条件
noofiterations = 20
for iteration_n in range(noofiterations):
##期望步骤
##基于上次迭代后算出的中心点的未知
##the _expected_ centroid assignments.
#首先遍历所有的向量
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
#计算给定向量与分配的中心节点之间的欧几里得距离
distances = [sess.run(euclid_dist, feed_dict={
v1: vect, v2: sess.run(centroid)})
for centroid in centroids]
#下面可以使用集群分配操作,将上述的距离当做输入
assignment = sess.run(cluster_assignment, feed_dict = {
centroid_distances: distances})
#接下来为每个向量分配合适的值
sess.run(cluster_assigns[vector_n], feed_dict={
assignment_value: assignment})
##最大化的步骤
#基于上述的期望步骤,计算每个新的中心点的距离从而使集群内的平方和最小
for cluster_n in range(noofclusters):
#收集所有分配给该集群的向量
assigned_vects = [vectors[i] for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n]
#计算新的集群中心点
new_location = sess.run(mean_op, feed_dict={
mean_input: array(assigned_vects)})
#为每个向量分配合适的中心点
sess.run(cent_assigns[cluster_n], feed_dict={
centroid_value: new_location})
#返回中心节点和分组
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
############生成测试数据###############
sampleNo = 100 #数据数量
mu =3
# 二维正态分布
mu = np.array([[1, 5]])
Sigma = np.array([[1, 0.5], [1.5, 3]])
R = cholesky(Sigma)
srcdata= np.dot(np.random.randn(sampleNo, 2), R) + mu
plt.plot(srcdata[:,0],srcdata[:,1],'bo')
############kmeans算法计算###############
k=4
center,result=KMeansCluster(srcdata,k)
print(center)
############利用seaborn画图###############
res={"x":[],"y":[],"kmeans_res":[]}
for i in range(len(result)):
res["x"].append(srcdata[i][0])
res["y"].append(srcdata[i][1])
res["kmeans_res"].append(result[i])
pd_res=pd.DataFrame(res)
sns.lmplot("x","y",data=pd_res,fit_reg=False,size=5,hue="kmeans_res")
plt.show() | [
"seaborn.lmplot",
"tensorflow.Graph",
"tensorflow.argmin",
"random.shuffle",
"pandas.DataFrame",
"tensorflow.Variable",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.assign",
"tensorflow.subtrac... | [((4265, 4283), 'numpy.array', 'np.array', (['[[1, 5]]'], {}), '([[1, 5]])\n', (4273, 4283), True, 'import numpy as np\n'), ((4292, 4322), 'numpy.array', 'np.array', (['[[1, 0.5], [1.5, 3]]'], {}), '([[1, 0.5], [1.5, 3]])\n', (4300, 4322), True, 'import numpy as np\n'), ((4327, 4342), 'numpy.linalg.cholesky', 'cholesky', (['Sigma'], {}), '(Sigma)\n', (4335, 4342), False, 'from numpy.linalg import cholesky\n'), ((4397, 4441), 'matplotlib.pyplot.plot', 'plt.plot', (['srcdata[:, 0]', 'srcdata[:, 1]', '"""bo"""'], {}), "(srcdata[:, 0], srcdata[:, 1], 'bo')\n", (4405, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4755, 4772), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (4767, 4772), True, 'import pandas as pd\n'), ((4773, 4847), 'seaborn.lmplot', 'sns.lmplot', (['"""x"""', '"""y"""'], {'data': 'pd_res', 'fit_reg': '(False)', 'size': '(5)', 'hue': '"""kmeans_res"""'}), "('x', 'y', data=pd_res, fit_reg=False, size=5, hue='kmeans_res')\n", (4783, 4847), True, 'import seaborn as sns\n'), ((4843, 4853), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4851, 4853), True, 'import matplotlib.pyplot as plt\n'), ((670, 693), 'random.shuffle', 'shuffle', (['vector_indices'], {}), '(vector_indices)\n', (677, 693), False, 'from random import choice, shuffle\n'), ((810, 820), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (818, 820), True, 'import tensorflow as tf\n'), ((880, 892), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (890, 892), True, 'import tensorflow as tf\n'), ((1169, 1201), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""', '[dim]'], {}), "('float64', [dim])\n", (1183, 1201), True, 'import tensorflow as tf\n'), ((1490, 1513), 'tensorflow.placeholder', 'tf.placeholder', (['"""int32"""'], {}), "('int32')\n", (1504, 1513), True, 'import tensorflow as tf\n'), ((1775, 1811), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, dim]'], {}), "('float', [None, dim])\n", (1789, 1811), True, 'import tensorflow as tf\n'), ((1871, 1900), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['mean_input', '(0)'], {}), '(mean_input, 0)\n', (1885, 1900), True, 'import tensorflow as tf\n'), ((1938, 1968), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[dim]'], {}), "('float', [dim])\n", (1952, 1968), True, 'import tensorflow as tf\n'), ((1982, 2012), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[dim]'], {}), "('float', [dim])\n", (1996, 2012), True, 'import tensorflow as tf\n'), ((2219, 2258), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[noofclusters]'], {}), "('float', [noofclusters])\n", (2233, 2258), True, 'import tensorflow as tf\n'), ((2288, 2320), 'tensorflow.argmin', 'tf.argmin', (['centroid_distances', '(0)'], {}), '(centroid_distances, 0)\n', (2297, 2320), True, 'import tensorflow as tf\n'), ((2472, 2505), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2503, 2505), True, 'import tensorflow as tf\n'), ((4359, 4387), 'numpy.random.randn', 'np.random.randn', (['sampleNo', '(2)'], {}), '(sampleNo, 2)\n', (4374, 4387), True, 'import numpy as np\n'), ((1008, 1047), 'tensorflow.Variable', 'tf.Variable', (['vectors[vector_indices[i]]'], {}), '(vectors[vector_indices[i]])\n', (1019, 1047), True, 'import tensorflow as tf\n'), ((1387, 1401), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), '(0)\n', (1398, 1401), True, 'import tensorflow as tf\n'), ((1295, 1330), 'tensorflow.assign', 'tf.assign', (['centroid', 'centroid_value'], {}), '(centroid, centroid_value)\n', (1304, 1330), True, 'import tensorflow as tf\n'), ((1617, 1656), 'tensorflow.assign', 'tf.assign', (['assignment', 'assignment_value'], {}), '(assignment, assignment_value)\n', (1626, 1656), True, 'import tensorflow as tf\n'), ((2064, 2083), 'tensorflow.subtract', 'tf.subtract', (['v1', 'v2'], {}), '(v1, v2)\n', (2075, 2083), True, 'import tensorflow as tf\n'), ((3879, 3900), 'numpy.array', 'array', (['assigned_vects'], {}), '(assigned_vects)\n', (3884, 3900), False, 'from numpy import array\n')] |
"""
error mitigation with extrapolation
"""
from qulacs import Observable
from qulacs import QuantumState, QuantumCircuit
from qulacs.gate import Probabilistic, X, Y, Z
import matplotlib.pyplot as plt
from numpy import polyfit
def error_mitigation_extrapolate_poly(quantum_circuit_list, error_list, initial_state,
obs, n_circuit_sample = 1000, return_full = False):
"""error_mitigation_extrapolate_poly
Args:
quantum_circuit_list (:class:`list`):
list of quantum circuit with deifferent error rate
initial_state (:class:`qulacs.QuantumState`):
list of quantum circuit with deifferent error rat
error_list (:class:`list`):
list of error rate for each quantum circuit
obs (:class:`qulacs.Observable`):
measured observable
Returns:
:class:`float` when return full is False (default)
:class:`tuple` when return full is True
"""
exp_array = []
state = initial_state.copy()
for quantum_circuit in quantum_circuit_list:
exp = 0
for _ in [0]*n_circuit_sample:
state.load(initial_state)
quantum_circuit.update_quantum_state(state)
exp += obs.get_expectation_value(state)
exp_array.append(exp/n_circuit_sample)
fit_coefs = polyfit(error_list, exp_array, len(error_list)-1)
if return_full:
return fit_coefs[len(error_list)-1], exp_array, fit_coefs
else:
return fit_coefs[len(error_list)-1]
def main():
import numpy as np
n_qubit = 2
obs = Observable(n_qubit)
initial_state = QuantumState(n_qubit)
obs.add_operator(1, "Z 0 Z 1")
circuit_list = []
p_list = [0.01, 0.02, 0.03, 0.04, 0.06, 0.08]
#prepare circuit list
for p in p_list:
circuit = QuantumCircuit(n_qubit)
circuit.add_H_gate(0)
circuit.add_RY_gate(1, np.pi/6)
circuit.add_CNOT_gate(0, 1)
circuit.add_gate(Probabilistic([p/4, p/4, p/4], [X(0), Y(0), Z(0)])) #depolarizing noise
circuit.add_gate(Probabilistic([p/4, p/4, p/4], [X(1), Y(1), Z(1)])) #depolarizing noise
circuit_list.append(circuit)
#get mitigated output
mitigated, non_mitigated_array, fit_coefs = error_mitigation_extrapolate_poly(circuit_list, p_list, initial_state, obs, n_circuit_sample = 100000, return_full = True)
#plot the result
p = np.linspace(0, max(p_list), 100)
plt.plot(p, np.polyval(fit_coefs, p), linestyle = "--", label = "linear fit")
plt.scatter(p_list, non_mitigated_array, label = "un-mitigated")
plt.scatter(0, mitigated, label = "mitigated output")
#prepare the clean result
state = QuantumState(n_qubit)
circuit = QuantumCircuit(n_qubit)
circuit.add_H_gate(0)
circuit.add_RY_gate(1, np.pi/6)
circuit.add_CNOT_gate(0, 1)
circuit.update_quantum_state(state)
plt.scatter(0, obs.get_expectation_value(state), label = "True output")
plt.xlabel("error rate")
plt.ylabel("expectation value")
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| [
"qulacs.QuantumState",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"qulacs.gate.X",
"qulacs.gate.Z",
"numpy.polyval",
"qulacs.Observable",
"matplotlib.pyplot.scatter",
"qulacs.gate.Y",
"qulacs.QuantumCircuit",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1650, 1669), 'qulacs.Observable', 'Observable', (['n_qubit'], {}), '(n_qubit)\n', (1660, 1669), False, 'from qulacs import Observable\n'), ((1691, 1712), 'qulacs.QuantumState', 'QuantumState', (['n_qubit'], {}), '(n_qubit)\n', (1703, 1712), False, 'from qulacs import QuantumState, QuantumCircuit\n'), ((2623, 2685), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p_list', 'non_mitigated_array'], {'label': '"""un-mitigated"""'}), "(p_list, non_mitigated_array, label='un-mitigated')\n", (2634, 2685), True, 'import matplotlib.pyplot as plt\n'), ((2693, 2744), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', 'mitigated'], {'label': '"""mitigated output"""'}), "(0, mitigated, label='mitigated output')\n", (2704, 2744), True, 'import matplotlib.pyplot as plt\n'), ((2793, 2814), 'qulacs.QuantumState', 'QuantumState', (['n_qubit'], {}), '(n_qubit)\n', (2805, 2814), False, 'from qulacs import QuantumState, QuantumCircuit\n'), ((2830, 2853), 'qulacs.QuantumCircuit', 'QuantumCircuit', (['n_qubit'], {}), '(n_qubit)\n', (2844, 2853), False, 'from qulacs import QuantumState, QuantumCircuit\n'), ((3074, 3098), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""error rate"""'], {}), "('error rate')\n", (3084, 3098), True, 'import matplotlib.pyplot as plt\n'), ((3104, 3135), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""expectation value"""'], {}), "('expectation value')\n", (3114, 3135), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3153), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3151, 3153), True, 'import matplotlib.pyplot as plt\n'), ((3159, 3169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3167, 3169), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1920), 'qulacs.QuantumCircuit', 'QuantumCircuit', (['n_qubit'], {}), '(n_qubit)\n', (1911, 1920), False, 'from qulacs import QuantumState, QuantumCircuit\n'), ((2552, 2576), 'numpy.polyval', 'np.polyval', (['fit_coefs', 'p'], {}), '(fit_coefs, p)\n', (2562, 2576), True, 'import numpy as np\n'), ((2088, 2092), 'qulacs.gate.X', 'X', (['(0)'], {}), '(0)\n', (2089, 2092), False, 'from qulacs.gate import Probabilistic, X, Y, Z\n'), ((2094, 2098), 'qulacs.gate.Y', 'Y', (['(0)'], {}), '(0)\n', (2095, 2098), False, 'from qulacs.gate import Probabilistic, X, Y, Z\n'), ((2100, 2104), 'qulacs.gate.Z', 'Z', (['(0)'], {}), '(0)\n', (2101, 2104), False, 'from qulacs.gate import Probabilistic, X, Y, Z\n'), ((2186, 2190), 'qulacs.gate.X', 'X', (['(1)'], {}), '(1)\n', (2187, 2190), False, 'from qulacs.gate import Probabilistic, X, Y, Z\n'), ((2192, 2196), 'qulacs.gate.Y', 'Y', (['(1)'], {}), '(1)\n', (2193, 2196), False, 'from qulacs.gate import Probabilistic, X, Y, Z\n'), ((2198, 2202), 'qulacs.gate.Z', 'Z', (['(1)'], {}), '(1)\n', (2199, 2202), False, 'from qulacs.gate import Probabilistic, X, Y, Z\n')] |
import numpy as np
from sherlockpipe.scoring.SignalSelector import SignalSelector, SignalSelection
class BasicSignalSelector(SignalSelector):
"""
Selects the signal with best SNR
"""
def __init__(self):
super().__init__()
def select(self, transit_results, snr_min, detrend_method, wl):
detrends_snr = np.nan_to_num([transit_result.snr
for key, transit_result in transit_results.items()])
best_signal_snr = np.nanmax(detrends_snr)
best_signal_snr_index = np.nanargmax(detrends_snr)
best_signal = transit_results[best_signal_snr_index]
if best_signal_snr > snr_min: # and SDE[a] > SDE_min and FAP[a] < FAP_max):
best_signal_score = 1
else:
best_signal_score = 0
return SignalSelection(best_signal_score, best_signal_snr_index, best_signal) | [
"sherlockpipe.scoring.SignalSelector.SignalSelection",
"numpy.nanargmax",
"numpy.nanmax"
] | [((492, 515), 'numpy.nanmax', 'np.nanmax', (['detrends_snr'], {}), '(detrends_snr)\n', (501, 515), True, 'import numpy as np\n'), ((548, 574), 'numpy.nanargmax', 'np.nanargmax', (['detrends_snr'], {}), '(detrends_snr)\n', (560, 574), True, 'import numpy as np\n'), ((818, 888), 'sherlockpipe.scoring.SignalSelector.SignalSelection', 'SignalSelection', (['best_signal_score', 'best_signal_snr_index', 'best_signal'], {}), '(best_signal_score, best_signal_snr_index, best_signal)\n', (833, 888), False, 'from sherlockpipe.scoring.SignalSelector import SignalSelector, SignalSelection\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image.understanding.object_detection.core.visualization_utils.
Testing with visualization in the following colab:
https://drive.google.com/a/google.com/file/d/0B5HnKS_hMsNARERpU3MtU3I5RFE/view?usp=sharing
"""
import os
import numpy as np
import PIL.Image as Image
import tensorflow as tf
from object_detection.utils import visualization_utils
_TESTDATA_PATH = 'object_detection/test_images'
class VisualizationUtilsTest(tf.test.TestCase):
def create_colorful_test_image(self):
"""This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.
"""
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), axis=2)
imw = np.concatenate((ch128, ch128, ch128), axis=2)
imu = np.concatenate((imr, img), axis=1)
imd = np.concatenate((imb, imw), axis=1)
image = np.concatenate((imu, imd), axis=0)
return image
def test_draw_bounding_box_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax,
xmax)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_box_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image_array(
test_image, ymin, xmin, ymax, xmax)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image(test_image, boxes)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image_array(test_image, boxes)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
[[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape, images_with_boxes_np.shape)
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
print('Writing output image %d to %s' % (i, output_file))
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
visualization_utils.draw_keypoints_on_image(test_image, keypoints)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
visualization_utils.draw_keypoints_on_image_array(test_image, keypoints)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
expected_result = np.asarray([[[0, 0, 0], [0, 0, 127]],
[[0, 0, 127], [0, 0, 127]]], dtype=np.uint8)
visualization_utils.draw_mask_on_image_array(test_image, mask,
color='Blue', alpha=.5)
self.assertAllEqual(test_image, expected_result)
def test_add_cdf_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
cdf_image_summary.eval()
if __name__ == '__main__':
tf.test.main()
| [
"object_detection.utils.visualization_utils.draw_bounding_box_on_image_array",
"numpy.array",
"object_detection.utils.visualization_utils.add_cdf_image_summary",
"tensorflow.Graph",
"numpy.asarray",
"object_detection.utils.visualization_utils.draw_bounding_boxes_on_image",
"numpy.stack",
"numpy.concat... | [((7580, 7594), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (7592, 7594), True, 'import tensorflow as tf\n'), ((1402, 1445), 'numpy.full', 'np.full', (['[100, 200, 1]', '(255)'], {'dtype': 'np.uint8'}), '([100, 200, 1], 255, dtype=np.uint8)\n', (1409, 1445), True, 'import numpy as np\n'), ((1458, 1501), 'numpy.full', 'np.full', (['[100, 200, 1]', '(128)'], {'dtype': 'np.uint8'}), '([100, 200, 1], 128, dtype=np.uint8)\n', (1465, 1501), True, 'import numpy as np\n'), ((1512, 1553), 'numpy.full', 'np.full', (['[100, 200, 1]', '(0)'], {'dtype': 'np.uint8'}), '([100, 200, 1], 0, dtype=np.uint8)\n', (1519, 1553), True, 'import numpy as np\n'), ((1564, 1609), 'numpy.concatenate', 'np.concatenate', (['(ch255, ch128, ch128)'], {'axis': '(2)'}), '((ch255, ch128, ch128), axis=2)\n', (1578, 1609), True, 'import numpy as np\n'), ((1620, 1663), 'numpy.concatenate', 'np.concatenate', (['(ch255, ch255, ch0)'], {'axis': '(2)'}), '((ch255, ch255, ch0), axis=2)\n', (1634, 1663), True, 'import numpy as np\n'), ((1674, 1717), 'numpy.concatenate', 'np.concatenate', (['(ch255, ch0, ch255)'], {'axis': '(2)'}), '((ch255, ch0, ch255), axis=2)\n', (1688, 1717), True, 'import numpy as np\n'), ((1728, 1773), 'numpy.concatenate', 'np.concatenate', (['(ch128, ch128, ch128)'], {'axis': '(2)'}), '((ch128, ch128, ch128), axis=2)\n', (1742, 1773), True, 'import numpy as np\n'), ((1784, 1818), 'numpy.concatenate', 'np.concatenate', (['(imr, img)'], {'axis': '(1)'}), '((imr, img), axis=1)\n', (1798, 1818), True, 'import numpy as np\n'), ((1829, 1863), 'numpy.concatenate', 'np.concatenate', (['(imb, imw)'], {'axis': '(1)'}), '((imb, imw), axis=1)\n', (1843, 1863), True, 'import numpy as np\n'), ((1876, 1910), 'numpy.concatenate', 'np.concatenate', (['(imu, imd)'], {'axis': '(0)'}), '((imu, imd), axis=0)\n', (1890, 1910), True, 'import numpy as np\n'), ((2042, 2069), 'PIL.Image.fromarray', 'Image.fromarray', (['test_image'], {}), '(test_image)\n', (2057, 2069), True, 'import PIL.Image as Image\n'), ((2191, 2277), 'object_detection.utils.visualization_utils.draw_bounding_box_on_image', 'visualization_utils.draw_bounding_box_on_image', (['test_image', 'ymin', 'xmin', 'ymax', 'xmax'], {}), '(test_image, ymin, xmin, ymax,\n xmax)\n', (2237, 2277), False, 'from object_detection.utils import visualization_utils\n'), ((2729, 2821), 'object_detection.utils.visualization_utils.draw_bounding_box_on_image_array', 'visualization_utils.draw_bounding_box_on_image_array', (['test_image', 'ymin', 'xmin', 'ymax', 'xmax'], {}), '(test_image, ymin, xmin,\n ymax, xmax)\n', (2781, 2821), False, 'from object_detection.utils import visualization_utils\n'), ((3123, 3150), 'PIL.Image.fromarray', 'Image.fromarray', (['test_image'], {}), '(test_image)\n', (3138, 3150), True, 'import PIL.Image as Image\n'), ((3217, 3273), 'numpy.array', 'np.array', (['[[0.25, 0.75, 0.4, 0.6], [0.1, 0.1, 0.9, 0.9]]'], {}), '([[0.25, 0.75, 0.4, 0.6], [0.1, 0.1, 0.9, 0.9]])\n', (3225, 3273), True, 'import numpy as np\n'), ((3301, 3368), 'object_detection.utils.visualization_utils.draw_bounding_boxes_on_image', 'visualization_utils.draw_bounding_boxes_on_image', (['test_image', 'boxes'], {}), '(test_image, boxes)\n', (3349, 3368), False, 'from object_detection.utils import visualization_utils\n'), ((3720, 3776), 'numpy.array', 'np.array', (['[[0.25, 0.75, 0.4, 0.6], [0.1, 0.1, 0.9, 0.9]]'], {}), '([[0.25, 0.75, 0.4, 0.6], [0.1, 0.1, 0.9, 0.9]])\n', (3728, 3776), True, 'import numpy as np\n'), ((3804, 3877), 'object_detection.utils.visualization_utils.draw_bounding_boxes_on_image_array', 'visualization_utils.draw_bounding_boxes_on_image_array', (['test_image', 'boxes'], {}), '(test_image, boxes)\n', (3858, 3877), False, 'from object_detection.utils import visualization_utils\n'), ((4278, 4320), 'os.path.join', 'os.path.join', (['_TESTDATA_PATH', '"""image1.jpg"""'], {}), "(_TESTDATA_PATH, 'image1.jpg')\n", (4290, 4320), False, 'import os\n'), ((4380, 4418), 'numpy.stack', 'np.stack', (['(image_np, image_np)'], {'axis': '(0)'}), '((image_np, image_np), axis=0)\n', (4388, 4418), True, 'import numpy as np\n'), ((5776, 5803), 'PIL.Image.fromarray', 'Image.fromarray', (['test_image'], {}), '(test_image)\n', (5791, 5803), True, 'import PIL.Image as Image\n'), ((5930, 5996), 'object_detection.utils.visualization_utils.draw_keypoints_on_image', 'visualization_utils.draw_keypoints_on_image', (['test_image', 'keypoints'], {}), '(test_image, keypoints)\n', (5973, 5996), False, 'from object_detection.utils import visualization_utils\n'), ((6403, 6475), 'object_detection.utils.visualization_utils.draw_keypoints_on_image_array', 'visualization_utils.draw_keypoints_on_image_array', (['test_image', 'keypoints'], {}), '(test_image, keypoints)\n', (6452, 6475), False, 'from object_detection.utils import visualization_utils\n'), ((6717, 6793), 'numpy.asarray', 'np.asarray', (['[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]'], {'dtype': 'np.uint8'}), '([[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)\n', (6727, 6793), True, 'import numpy as np\n'), ((6834, 6878), 'numpy.asarray', 'np.asarray', (['[[0, 1], [1, 1]]'], {'dtype': 'np.uint8'}), '([[0, 1], [1, 1]], dtype=np.uint8)\n', (6844, 6878), True, 'import numpy as np\n'), ((6924, 7011), 'numpy.asarray', 'np.asarray', (['[[[0, 0, 0], [0, 0, 127]], [[0, 0, 127], [0, 0, 127]]]'], {'dtype': 'np.uint8'}), '([[[0, 0, 0], [0, 0, 127]], [[0, 0, 127], [0, 0, 127]]], dtype=np\n .uint8)\n', (6934, 7011), True, 'import numpy as np\n'), ((7045, 7136), 'object_detection.utils.visualization_utils.draw_mask_on_image_array', 'visualization_utils.draw_mask_on_image_array', (['test_image', 'mask'], {'color': '"""Blue"""', 'alpha': '(0.5)'}), "(test_image, mask, color='Blue',\n alpha=0.5)\n", (7089, 7136), False, 'from object_detection.utils import visualization_utils\n'), ((7343, 7414), 'object_detection.utils.visualization_utils.add_cdf_image_summary', 'visualization_utils.add_cdf_image_summary', (['values', '"""PositiveAnchorLoss"""'], {}), "(values, 'PositiveAnchorLoss')\n", (7384, 7414), False, 'from object_detection.utils import visualization_utils\n'), ((4345, 4362), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (4355, 4362), True, 'import PIL.Image as Image\n'), ((4476, 4520), 'tensorflow.constant', 'tf.constant', ([], {'value': 'images_np', 'dtype': 'tf.uint8'}), '(value=images_np, dtype=tf.uint8)\n', (4487, 4520), True, 'import tensorflow as tf\n'), ((4535, 4651), 'tensorflow.constant', 'tf.constant', (['[[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]], [[0.25, 0.25, 0.75, 0.75],\n [0.1, 0.3, 0.6, 1.0]]]'], {}), '([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]], [[0.25, 0.25,\n 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])\n', (4546, 4651), True, 'import tensorflow as tf\n'), ((4691, 4736), 'tensorflow.constant', 'tf.constant', (['[[1, 1], [1, 2]]'], {'dtype': 'tf.int64'}), '([[1, 1], [1, 2]], dtype=tf.int64)\n', (4702, 4736), True, 'import tensorflow as tf\n'), ((4752, 4789), 'tensorflow.constant', 'tf.constant', (['[[0.8, 0.1], [0.6, 0.5]]'], {}), '([[0.8, 0.1], [0.6, 0.5]])\n', (4763, 4789), True, 'import tensorflow as tf\n'), ((4828, 4965), 'object_detection.utils.visualization_utils.draw_bounding_boxes_on_image_tensors', 'visualization_utils.draw_bounding_boxes_on_image_tensors', (['images_tensor', 'boxes', 'classes', 'scores', 'category_index'], {'min_score_thresh': '(0.2)'}), '(images_tensor,\n boxes, classes, scores, category_index, min_score_thresh=0.2)\n', (4884, 4965), False, 'from object_detection.utils import visualization_utils\n'), ((7439, 7484), 'tensorflow.get_collection', 'tf.get_collection', ([], {'key': 'tf.GraphKeys.SUMMARIES'}), '(key=tf.GraphKeys.SUMMARIES)\n', (7456, 7484), True, 'import tensorflow as tf\n'), ((4429, 4439), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4437, 4439), True, 'import tensorflow as tf\n'), ((5106, 5139), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5137, 5139), True, 'import tensorflow as tf\n'), ((5581, 5626), 'PIL.Image.fromarray', 'Image.fromarray', (['images_with_boxes_np[i, ...]'], {}), '(images_with_boxes_np[i, ...])\n', (5596, 5626), True, 'import PIL.Image as Image\n')] |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class BiLSTM(nn.Module):
def __init__(self, hidden_size, vocab_size, class_num=1, layer_num=3):
super(BiLSTM, self).__init__()
self.H = hidden_size
self.V = vocab_size
self.C = class_num
self.L = layer_num
self.embed = nn.Embedding(self.V, self.H, padding_idx=0)
# initialize embedding
scope = np.sqrt(3.0 / self.embed.weight.size(1))
nn.init.uniform_(self.embed.weight, -scope, scope)
self.bilstm_word = nn.LSTM(
input_size=self.H,
hidden_size=self.H,
num_layers=self.L,
bidirectional=True,
batch_first=True,
bias=True)
self.bilstm_sent = nn.LSTM(
input_size=self.H*2,
hidden_size=self.H*2,
num_layers=self.L,
bidirectional=True,
batch_first=True,
bias=True)
self.out_projection = nn.Linear(in_features=self.H * 4, out_features=self.C, bias=True)
# initialize linear
scope = np.sqrt(6.0 / (self.H * 2 + self.C))
nn.init.uniform_(self.out_projection.weight, -scope, scope)
self.out_projection.bias.data.zero_()
self.h0_word = torch.randn(self.L * 2, 1, self.H).float().cuda()
self.c0_word = torch.randn(self.L * 2, 1, self.H).float().cuda()
self.h0_sent = torch.randn(self.L * 2, 1, self.H*2).float().cuda()
self.c0_sent = torch.randn(self.L * 2, 1, self.H*2).float().cuda()
self.loss = nn.BCELoss()
self.sigmoid = nn.Sigmoid()
def forward(self, x_ids, sents_len, words_len, y_mask):
batch_size = len(x_ids)
x_ = []
x_length_max = max(sents_len)
for i in range(batch_size):
# 单词级别的编码
x = self.embed(x_ids[i]) # [N, S, H]
x_length = words_len[i]
packed_x = pack_padded_sequence(x, x_length, batch_first=True, enforce_sorted=False)
hidden = (self.h0_word.repeat(1, x.size(0), 1), self.c0_word.repeat(1, x.size(0), 1))
x, _ = self.bilstm_word(packed_x, hidden)
x, _ = pad_packed_sequence(x, batch_first=True) # [N, S, H*2]
x_.append(torch.cat(
[torch.mean(x, dim=1), torch.zeros([x_length_max-x.size(0), self.H*2], dtype=torch.float32).cuda()], dim=0)
)
# 句子级别的编码
x = torch.stack(x_, dim=0) # [N, S, H*2]
packed_x = pack_padded_sequence(x, sents_len, batch_first=True, enforce_sorted=False)
hidden = (self.h0_sent.repeat(1, x.size(0), 1), self.c0_sent.repeat(1, x.size(0), 1))
x, _ = self.bilstm_sent(packed_x, hidden)
x, _ = pad_packed_sequence(x, batch_first=True) # [N, S, H*4]
logits = self.sigmoid(self.out_projection(x)) # [N, S, 1]
logits = logits.squeeze(-1) * y_mask
return logits
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_loss = pd.read_csv('../output/infos/loss.csv', header=0)
df_metrics = pd.read_csv("../output/infos/metrics.csv", header=0)
loss_train = df_loss["train_loss"].values
loss_valid = df_loss["valid_loss"].values
train_p = df_metrics["train_p"].values
train_r = df_metrics["train_r"].values
train_f1 = df_metrics["train_f1"].values
train_acc = df_metrics["train_acc"].values
valid_p = df_metrics["valid_p"].values
valid_r = df_metrics["valid_r"].values
valid_f1 = df_metrics["valid_f1"].values
valid_acc = df_metrics["valid_acc"].values
val_zi = df_metrics["val_zi"].values
# val_r1_zi = df_metrics["val_r1_zi"].values
# val_r2_zi = df_metrics["val_r2_zi"].values
# val_rl_zi = df_metrics["val_rl_zi"].values
val_ci = df_metrics["val_ci"].values
# val_r1_ci = df_metrics["val_r1_ci"].values
# val_r2_ci = df_metrics["val_r2_ci"].values
# val_rl_ci = df_metrics["val_rl_ci"].values
num_epoch = len(loss_train)
steps = np.arange(1, num_epoch +1)
plt.subplot(2, 2, 1)
plt.plot(steps, loss_train, 'b--', label="train loss")
plt.plot(steps, loss_valid, 'r--', label="valid loss")
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.subplot(2, 2, 2)
plt.plot(steps, val_zi, 'kx-.', label="rouge based on single words")
plt.plot(steps, val_ci, 'ko:', label="rouge based on words")
plt.xlabel('epoch')
plt.ylabel('rouge score')
plt.legend()
plt.subplot(2, 2, 3)
plt.plot(steps, train_p, 'g--', label="train precision")
plt.plot(steps, train_r, 'y--', label="train recall")
plt.plot(steps, train_f1, 'bo--', label="train f1-score")
plt.plot(steps, train_acc, 'r--', label="train accuracy")
plt.xlabel('epoch')
plt.ylabel('train metrics')
plt.legend()
plt.subplot(2, 2, 4)
plt.plot(steps, valid_p, 'g--', label="valid precision")
plt.plot(steps, valid_r, 'y--', label="valid recall")
plt.plot(steps, valid_f1, 'bo--', label="valid f1-score")
plt.plot(steps, valid_acc, 'r--', label="valid accuracy")
plt.xlabel('epoch')
plt.ylabel('valid metrics')
plt.legend()
plt.show()
from _01_data import *
from _02_extractive_bi_lstm import *
from tensorboardX import SummaryWriter
from torch import optim
from rouge import Rouge
import csv
import sys
sys.setrecursionlimit(10000000)
def train_extractive(out_path, hidden_size, learning_rate, max_epoch, batch_size):
writer = SummaryWriter(log_dir='events/')
train_data, vocab_size = load_data(mode="train")
GLOBAL_STEP = 0
model = BiLSTM(hidden_size, vocab_size).cuda()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
with open("../output/infos/loss.csv", 'w') as f:
csv_write = csv.writer(f)
csv_head = ["train_loss", "valid_loss"]
csv_write.writerow(csv_head)
with open("../output/infos/metrics.csv", 'w') as f:
csv_write = csv.writer(f)
csv_head = [
"train_p", "train_r", "train_f1", "train_acc",
"valid_p", "valid_r", "valid_f1", "valid_acc",
"val_zi", "val_r1_zi", "val_r2_zi", "val_rl_zi",
"val_ci", "val_r1_ci", "val_r2_ci", "val_rl_ci",
]
csv_write.writerow(csv_head)
for ep in range(max_epoch):
record_metrics = []
record_loss = []
for batch_index in get_batch_index(len(train_data), batch_size):
# for batch_index in get_batch_index(64, batch_size):
batch_data = get_batch_data(train_data, batch_index)
xs, _, _, sents_len, words_len, y, y_mask = process_batch_data(batch_data)
y = torch.FloatTensor(y).cuda()
y_mask = torch.FloatTensor(y_mask).cuda()
optimizer.zero_grad()
logits = model(
[torch.LongTensor(x).cuda() for x in xs],
sents_len,
words_len,
y_mask
) # [N, S]
loss = model.loss(logits, y)
record_loss.append(loss.item())
p, r, f1, a = get_metrics(logits, y.int(), y_mask.int())
record_metrics.append([p, r, f1, a])
writer.add_scalar('training_loss', loss.item(), GLOBAL_STEP)
GLOBAL_STEP += 1
# backward
loss.backward()
optimizer.step()
avg_metrics = np.mean(np.array(record_metrics), axis=0).tolist()
avg_loss = np.mean(np.array(record_loss))
# do validation every epoch
valid_loss, valid_metric = valid_extractive(model, writer, batch_size, GLOBAL_STEP)
# write records
with open("../output/infos/loss.csv", 'a+', newline="") as f:
csv_write = csv.writer(f)
csv_write.writerow([avg_loss, valid_loss])
with open("../output/infos/metrics.csv", 'a+', newline="") as f:
csv_write = csv.writer(f)
csv_write.writerow(avg_metrics + valid_metric)
torch.save(model, out_path + "/models/hierBilstmExt-epoch_{}.ckpt".format(ep))
def get_metrics(logits, y, y_mask):
eps = 1e-9
y_pred = (logits > 0.5).int()
TP = torch.sum((y_pred & y) * y_mask).float() + eps
TN = torch.sum(((1-y_pred) & (1-y)) * y_mask).float() + eps
FP = torch.sum((y_pred & (1-y)) * y_mask).float() + eps
FN = torch.sum(((1-y_pred) & y) * y_mask).float() + eps
p = TP.div(TP + FP)
r = TP.div(TP + FN)
f1 = (2 * p * r).div(p + r)
a = (TP + TN).div(TP + TN + FP + FN)
return p.item(), r.item(), f1.item(), a.item()
def print_rouge_scores(pred_path, true_path):
get_rouge_scores = Rouge().get_scores
with open(pred_path, 'r') as f:
summaries = f.readlines()
with open(true_path, 'r') as f:
ground_truth = f.readlines()
assert len(summaries) == len(ground_truth)
all_scores = [] # 看不同的长度,那个rouge得分高
for i in range(len(summaries)):
# rouge_scores = get_rouge_scores(summaries[i][j], ground_truth[i])[0]
hyps = ' '.join(list(summaries[i]))
refs = ' '.join(list(ground_truth[i]))
rouge_scores = get_rouge_scores(hyps, refs)[0]
r1f = rouge_scores["rouge-1"]["f"]
r2f = rouge_scores["rouge-2"]["f"]
rlf = rouge_scores["rouge-l"]["f"]
temp = r1f * 0.2 + r2f * 0.4 + rlf * 0.4
all_scores.append([temp, r1f, r2f, rlf])
rouge_based_on_zi = np.mean(np.array(all_scores), axis=0).tolist()
# jieba 分词
all_scores = [] # 看不同的长度,那个rouge得分高
for i in range(len(summaries)):
# rouge_scores = get_rouge_scores(summaries[i][j], ground_truth[i])[0]
hyps = ' '.join([w for w in jieba.cut(summaries[i])])
refs = ' '.join([w for w in jieba.cut(ground_truth[i])])
rouge_scores = get_rouge_scores(hyps, refs)[0]
r1f = rouge_scores["rouge-1"]["f"]
r2f = rouge_scores["rouge-2"]["f"]
rlf = rouge_scores["rouge-l"]["f"]
temp = r1f * 0.2 + r2f * 0.4 + rlf * 0.4
all_scores.append([temp, r1f, r2f, rlf])
rouge_based_on_ci = np.mean(np.array(all_scores), axis=0).tolist()
return rouge_based_on_zi + rouge_based_on_ci
def valid_extractive(model, writer, batch_size, GLOBAL_STEP):
test_data, _ = load_data(mode="test")
summaries_record = []
ground_truth = []
metrics = []
with torch.no_grad():
losses = []
for batch_index in get_batch_index(len(test_data), batch_size):
# for batch_index in get_batch_index(64, batch_size):
batch_data = get_batch_data(test_data, batch_index)
xs, sources, summary, sents_len, words_len, y, y_mask = process_batch_data(batch_data)
y = torch.FloatTensor(y).cuda()
y_mask = torch.FloatTensor(y_mask).cuda()
logits = model(
[torch.LongTensor(x).cuda() for x in xs],
sents_len,
words_len,
y_mask
) # [N, S]
loss = model.loss(logits, y)
p, r, f1, a = get_metrics(logits, y.int(), y_mask.int())
losses.append(loss.item())
metrics.append([p, r, f1, a])
_, src_index = torch.topk(logits, 5, dim=-1)
src_index = src_index.data.cpu().numpy().tolist()
for i in range(batch_size):
summary_i = ""
# summaries_i = []
for j in src_index[i]:
summary_i += sources[i][j] + ' '
# summaries_i.append(summary_i.strip())
summaries_record.append(summary_i)
ground_truth.extend(summary)
pred_path = '../output/preds/pred_y.txt'
true_path = '../output/preds/true_y.txt'
with open(pred_path, 'w', encoding='gbk') as f:
f.writelines([s + '\n' for s in summaries_record])
with open(true_path, 'w', encoding='gbk') as f:
f.writelines([s + '\n' for s in ground_truth])
avg_loss = sum(losses) / len(losses)
writer.add_scalar('validation_loss', avg_loss, GLOBAL_STEP)
rouges = print_rouge_scores(pred_path, true_path)
metrics = np.mean(np.array(metrics), axis=0).tolist()
return avg_loss, metrics + rouges
if __name__ == "__main__":
train_extractive('../output/', 256, 1e-5, 50, 32)
import json
import re
import jieba
import pickle
import random
import numpy as np
import torch
input_path = "../sfzy_small/sfzy_small.json"
output_path = "/output/result.json"
def get_batch_index(num_sample, batch_size):
num_batchs = np.ceil(num_sample / batch_size)
i = 0
indices = list(range(num_sample))
random.shuffle(indices)
while (i+1) * batch_size <= num_sample:
yield indices[i*batch_size: min((i+1)*batch_size, num_sample)]
i += 1
def get_batch_data(data, batch_index):
return [data[i] for i in batch_index]
def process_batch_data(batch_data):
summary = []
labels = []
sources = []
sents = []
sents_len = []
words_len = []
for d in batch_data:
words_len.append([])
summary.append(d[1])
labels.append(d[4])
sources.append(d[2])
sents.append(d[3])
sents_len.append(len(d[3]))
for i, s in enumerate(d[3]):
if len(s):
words_len[-1].append(len(s))
else:
sents[-1].pop(i)
labels[-1].pop(i)
sources[-1].pop(i)
batch_size = len(batch_data)
with open("../input/dicts.pkl", "rb") as p:
vocab, dict_w2i, dict_i2w = pickle.load(p)
xs = []
for i in range(batch_size):
max_sent_len = max(words_len[i])
x = np.zeros([sents_len[i], max_sent_len], dtype=np.int32)
for j, sent in enumerate(sents[i]):
for k, word in enumerate(sent):
word_id = dict_w2i[word] if word in dict_w2i.keys() else dict_w2i["[UNK]"]
x[j, k] = word_id
xs.append(x)
max_sent_num = max(sents_len)
labels_ = np.zeros([batch_size, max_sent_num], dtype=np.float32)
labels_mask = np.zeros([batch_size, max_sent_num], dtype=np.int32)
for i, label_i in enumerate(labels):
for j, _ in enumerate(label_i):
labels_[i, j] = labels[i][j]
labels_mask[i, j] = 1
return xs, sources, summary, sents_len, words_len, labels_, labels_mask
def load_data(mode="train"):
if mode == "train":
with open('../input/train.pkl', 'rb') as p:
data = pickle.load(p)
else:
with open('../input/test.pkl', 'rb') as p:
data = pickle.load(p)
with open("../input/dicts.pkl", "rb") as p:
vocab, _, _ = pickle.load(p)
return data, len(vocab)
def construct_dicts(words, vocab_size=30000, min_tf=1):
dict_full = {}
for w in words:
if w in dict_full: dict_full[w] += 1
else: dict_full[w] = 1
vocab_full = sorted(dict_full.items(), key=lambda x: x[1], reverse=True)
vocab = ["[PAD]", "[UNK]"]
for v in vocab_full:
if v[1] >= min_tf: vocab.append(v[0])
vocab = vocab[:vocab_size]
dict_i2w = dict(zip(list(range(len(vocab))), vocab))
dict_w2i = dict(zip(vocab, list(range(len(vocab)))))
with open("../input/dicts.pkl", "wb") as p:
pickle.dump((vocab, dict_w2i, dict_i2w), p)
def processSourceText(text, all_words):
# 逐个读取列表元素
sents = []
labels = []
src = []
for t in text:
sent = t["sentence"].replace('\u3000', '').replace('\x20', '').replace('\xa0', '').replace(' ', '')
src.append(sent)
sent = re.sub('[0-9a-zA-Z]+', ' # ', sent)
sents.append(sent)
labels.append(t["label"])
sents_cut = []
for i in range(len(sents)):
sent_cut = [word for word in jieba.cut(sents[i], cut_all=False) if word != ' ']
sents_cut.append(sent_cut)
all_words.extend(sent_cut)
return src, sents_cut, labels
if __name__ == "__main__":
with open(input_path, 'r', encoding="utf8") as f:
processed_data = []
all_words = []
for line in f:
data = json.loads(line)
id = data.get("id") # a string
text = data.get("text") # list of dicts
summary = data.get("summary") # a string
sents, sents_cut, labels = processSourceText(text, all_words)
processed_data.append([id, summary, sents, sents_cut, labels])
random.shuffle(processed_data)
train_size = int(len(processed_data) * 0.95)
with open('../input/train.pkl', 'wb') as p:
pickle.dump(processed_data[:train_size], p)
with open('../input/test.pkl', 'wb') as p:
pickle.dump(processed_data[train_size:], p)
# with open('../input/train.pkl', 'rb') as p:
# train_data = pickle.load(p)
# with open('../input/test.pkl', 'rb') as p:
# test_data = pickle.load(p)
construct_dicts(all_words) | [
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"torch.LongTensor",
"numpy.array",
"torch.sum",
"numpy.arange",
"torch.nn.Sigmoid",
"tensorboardX.SummaryWriter",
"torch.nn.LSTM",
"torch.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"rouge.Rouge",
"torch.nn.uti... | [((2690, 2739), 'pandas.read_csv', 'pd.read_csv', (['"""../output/infos/loss.csv"""'], {'header': '(0)'}), "('../output/infos/loss.csv', header=0)\n", (2701, 2739), True, 'import pandas as pd\n'), ((2753, 2805), 'pandas.read_csv', 'pd.read_csv', (['"""../output/infos/metrics.csv"""'], {'header': '(0)'}), "('../output/infos/metrics.csv', header=0)\n", (2764, 2805), True, 'import pandas as pd\n'), ((3600, 3627), 'numpy.arange', 'np.arange', (['(1)', '(num_epoch + 1)'], {}), '(1, num_epoch + 1)\n', (3609, 3627), True, 'import numpy as np\n'), ((3628, 3648), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3639, 3648), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3703), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'loss_train', '"""b--"""'], {'label': '"""train loss"""'}), "(steps, loss_train, 'b--', label='train loss')\n", (3657, 3703), True, 'import matplotlib.pyplot as plt\n'), ((3704, 3758), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'loss_valid', '"""r--"""'], {'label': '"""valid loss"""'}), "(steps, loss_valid, 'r--', label='valid loss')\n", (3712, 3758), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3778), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3769, 3778), True, 'import matplotlib.pyplot as plt\n'), ((3779, 3797), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (3789, 3797), True, 'import matplotlib.pyplot as plt\n'), ((3798, 3810), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3808, 3810), True, 'import matplotlib.pyplot as plt\n'), ((3812, 3832), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3823, 3832), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3901), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'val_zi', '"""kx-."""'], {'label': '"""rouge based on single words"""'}), "(steps, val_zi, 'kx-.', label='rouge based on single words')\n", (3841, 3901), True, 'import matplotlib.pyplot as plt\n'), ((3902, 3962), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'val_ci', '"""ko:"""'], {'label': '"""rouge based on words"""'}), "(steps, val_ci, 'ko:', label='rouge based on words')\n", (3910, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3963, 3982), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3973, 3982), True, 'import matplotlib.pyplot as plt\n'), ((3983, 4008), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""rouge score"""'], {}), "('rouge score')\n", (3993, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4009, 4021), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4019, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4023, 4043), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (4034, 4043), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4100), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'train_p', '"""g--"""'], {'label': '"""train precision"""'}), "(steps, train_p, 'g--', label='train precision')\n", (4052, 4100), True, 'import matplotlib.pyplot as plt\n'), ((4101, 4154), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'train_r', '"""y--"""'], {'label': '"""train recall"""'}), "(steps, train_r, 'y--', label='train recall')\n", (4109, 4154), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4212), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'train_f1', '"""bo--"""'], {'label': '"""train f1-score"""'}), "(steps, train_f1, 'bo--', label='train f1-score')\n", (4163, 4212), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4270), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'train_acc', '"""r--"""'], {'label': '"""train accuracy"""'}), "(steps, train_acc, 'r--', label='train accuracy')\n", (4221, 4270), True, 'import matplotlib.pyplot as plt\n'), ((4271, 4290), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4281, 4290), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4318), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""train metrics"""'], {}), "('train metrics')\n", (4301, 4318), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4331), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4329, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4333, 4353), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (4344, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4354, 4410), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'valid_p', '"""g--"""'], {'label': '"""valid precision"""'}), "(steps, valid_p, 'g--', label='valid precision')\n", (4362, 4410), True, 'import matplotlib.pyplot as plt\n'), ((4411, 4464), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'valid_r', '"""y--"""'], {'label': '"""valid recall"""'}), "(steps, valid_r, 'y--', label='valid recall')\n", (4419, 4464), True, 'import matplotlib.pyplot as plt\n'), ((4465, 4522), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'valid_f1', '"""bo--"""'], {'label': '"""valid f1-score"""'}), "(steps, valid_f1, 'bo--', label='valid f1-score')\n", (4473, 4522), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4580), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'valid_acc', '"""r--"""'], {'label': '"""valid accuracy"""'}), "(steps, valid_acc, 'r--', label='valid accuracy')\n", (4531, 4580), True, 'import matplotlib.pyplot as plt\n'), ((4581, 4600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4591, 4600), True, 'import matplotlib.pyplot as plt\n'), ((4601, 4628), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""valid metrics"""'], {}), "('valid metrics')\n", (4611, 4628), True, 'import matplotlib.pyplot as plt\n'), ((4629, 4641), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4639, 4641), True, 'import matplotlib.pyplot as plt\n'), ((4643, 4653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4651, 4653), True, 'import matplotlib.pyplot as plt\n'), ((4823, 4854), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000000)'], {}), '(10000000)\n', (4844, 4854), False, 'import sys\n'), ((4950, 4982), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""events/"""'}), "(log_dir='events/')\n", (4963, 4982), False, 'from tensorboardX import SummaryWriter\n'), ((11056, 11088), 'numpy.ceil', 'np.ceil', (['(num_sample / batch_size)'], {}), '(num_sample / batch_size)\n', (11063, 11088), True, 'import numpy as np\n'), ((11132, 11155), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (11146, 11155), False, 'import random\n'), ((12285, 12339), 'numpy.zeros', 'np.zeros', (['[batch_size, max_sent_num]'], {'dtype': 'np.float32'}), '([batch_size, max_sent_num], dtype=np.float32)\n', (12293, 12339), True, 'import numpy as np\n'), ((12355, 12407), 'numpy.zeros', 'np.zeros', (['[batch_size, max_sent_num]'], {'dtype': 'np.int32'}), '([batch_size, max_sent_num], dtype=np.int32)\n', (12363, 12407), True, 'import numpy as np\n'), ((394, 437), 'torch.nn.Embedding', 'nn.Embedding', (['self.V', 'self.H'], {'padding_idx': '(0)'}), '(self.V, self.H, padding_idx=0)\n', (406, 437), True, 'import torch.nn as nn\n'), ((516, 566), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.embed.weight', '(-scope)', 'scope'], {}), '(self.embed.weight, -scope, scope)\n', (532, 566), True, 'import torch.nn as nn\n'), ((589, 707), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.H', 'hidden_size': 'self.H', 'num_layers': 'self.L', 'bidirectional': '(True)', 'batch_first': '(True)', 'bias': '(True)'}), '(input_size=self.H, hidden_size=self.H, num_layers=self.L,\n bidirectional=True, batch_first=True, bias=True)\n', (596, 707), True, 'import torch.nn as nn\n'), ((745, 871), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(self.H * 2)', 'hidden_size': '(self.H * 2)', 'num_layers': 'self.L', 'bidirectional': '(True)', 'batch_first': '(True)', 'bias': '(True)'}), '(input_size=self.H * 2, hidden_size=self.H * 2, num_layers=self.L,\n bidirectional=True, batch_first=True, bias=True)\n', (752, 871), True, 'import torch.nn as nn\n'), ((908, 973), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.H * 4)', 'out_features': 'self.C', 'bias': '(True)'}), '(in_features=self.H * 4, out_features=self.C, bias=True)\n', (917, 973), True, 'import torch.nn as nn\n'), ((1006, 1042), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (self.H * 2 + self.C))'], {}), '(6.0 / (self.H * 2 + self.C))\n', (1013, 1042), True, 'import numpy as np\n'), ((1045, 1104), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.out_projection.weight', '(-scope)', 'scope'], {}), '(self.out_projection.weight, -scope, scope)\n', (1061, 1104), True, 'import torch.nn as nn\n'), ((1434, 1446), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1444, 1446), True, 'import torch.nn as nn\n'), ((1464, 1476), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1474, 1476), True, 'import torch.nn as nn\n'), ((2165, 2187), 'torch.stack', 'torch.stack', (['x_'], {'dim': '(0)'}), '(x_, dim=0)\n', (2176, 2187), False, 'import torch\n'), ((2216, 2290), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['x', 'sents_len'], {'batch_first': '(True)', 'enforce_sorted': '(False)'}), '(x, sents_len, batch_first=True, enforce_sorted=False)\n', (2236, 2290), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((2433, 2473), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['x'], {'batch_first': '(True)'}), '(x, batch_first=True)\n', (2452, 2473), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((5227, 5240), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5237, 5240), False, 'import csv\n'), ((5381, 5394), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5391, 5394), False, 'import csv\n'), ((7665, 7672), 'rouge.Rouge', 'Rouge', ([], {}), '()\n', (7670, 7672), False, 'from rouge import Rouge\n'), ((9196, 9211), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9209, 9211), False, 'import torch\n'), ((11901, 11915), 'pickle.load', 'pickle.load', (['p'], {}), '(p)\n', (11912, 11915), False, 'import pickle\n'), ((11996, 12050), 'numpy.zeros', 'np.zeros', (['[sents_len[i], max_sent_len]'], {'dtype': 'np.int32'}), '([sents_len[i], max_sent_len], dtype=np.int32)\n', (12004, 12050), True, 'import numpy as np\n'), ((12872, 12886), 'pickle.load', 'pickle.load', (['p'], {}), '(p)\n', (12883, 12886), False, 'import pickle\n'), ((13418, 13461), 'pickle.dump', 'pickle.dump', (['(vocab, dict_w2i, dict_i2w)', 'p'], {}), '((vocab, dict_w2i, dict_i2w), p)\n', (13429, 13461), False, 'import pickle\n'), ((13698, 13733), 're.sub', 're.sub', (['"""[0-9a-zA-Z]+"""', '""" # """', 'sent'], {}), "('[0-9a-zA-Z]+', ' # ', sent)\n", (13704, 13733), False, 'import re\n'), ((14424, 14454), 'random.shuffle', 'random.shuffle', (['processed_data'], {}), '(processed_data)\n', (14438, 14454), False, 'import random\n'), ((14550, 14593), 'pickle.dump', 'pickle.dump', (['processed_data[:train_size]', 'p'], {}), '(processed_data[:train_size], p)\n', (14561, 14593), False, 'import pickle\n'), ((14640, 14683), 'pickle.dump', 'pickle.dump', (['processed_data[train_size:]', 'p'], {}), '(processed_data[train_size:], p)\n', (14651, 14683), False, 'import pickle\n'), ((1729, 1802), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['x', 'x_length'], {'batch_first': '(True)', 'enforce_sorted': '(False)'}), '(x, x_length, batch_first=True, enforce_sorted=False)\n', (1749, 1802), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((1948, 1988), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['x'], {'batch_first': '(True)'}), '(x, batch_first=True)\n', (1967, 1988), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((6604, 6625), 'numpy.array', 'np.array', (['record_loss'], {}), '(record_loss)\n', (6612, 6625), True, 'import numpy as np\n'), ((6842, 6855), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6852, 6855), False, 'import csv\n'), ((6984, 6997), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6994, 6997), False, 'import csv\n'), ((9872, 9901), 'torch.topk', 'torch.topk', (['logits', '(5)'], {'dim': '(-1)'}), '(logits, 5, dim=-1)\n', (9882, 9901), False, 'import torch\n'), ((12719, 12733), 'pickle.load', 'pickle.load', (['p'], {}), '(p)\n', (12730, 12733), False, 'import pickle\n'), ((12796, 12810), 'pickle.load', 'pickle.load', (['p'], {}), '(p)\n', (12807, 12810), False, 'import pickle\n'), ((14147, 14163), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (14157, 14163), False, 'import json\n'), ((7217, 7249), 'torch.sum', 'torch.sum', (['((y_pred & y) * y_mask)'], {}), '((y_pred & y) * y_mask)\n', (7226, 7249), False, 'import torch\n'), ((7270, 7310), 'torch.sum', 'torch.sum', (['((1 - y_pred & 1 - y) * y_mask)'], {}), '((1 - y_pred & 1 - y) * y_mask)\n', (7279, 7310), False, 'import torch\n'), ((7331, 7367), 'torch.sum', 'torch.sum', (['((y_pred & 1 - y) * y_mask)'], {}), '((y_pred & 1 - y) * y_mask)\n', (7340, 7367), False, 'import torch\n'), ((7388, 7424), 'torch.sum', 'torch.sum', (['((1 - y_pred & y) * y_mask)'], {}), '((1 - y_pred & y) * y_mask)\n', (7397, 7424), False, 'import torch\n'), ((8356, 8376), 'numpy.array', 'np.array', (['all_scores'], {}), '(all_scores)\n', (8364, 8376), True, 'import numpy as np\n'), ((8944, 8964), 'numpy.array', 'np.array', (['all_scores'], {}), '(all_scores)\n', (8952, 8964), True, 'import numpy as np\n'), ((10666, 10683), 'numpy.array', 'np.array', (['metrics'], {}), '(metrics)\n', (10674, 10683), True, 'import numpy as np\n'), ((13860, 13894), 'jieba.cut', 'jieba.cut', (['sents[i]'], {'cut_all': '(False)'}), '(sents[i], cut_all=False)\n', (13869, 13894), False, 'import jieba\n'), ((5986, 6006), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (6003, 6006), False, 'import torch\n'), ((6026, 6051), 'torch.FloatTensor', 'torch.FloatTensor', (['y_mask'], {}), '(y_mask)\n', (6043, 6051), False, 'import torch\n'), ((6540, 6564), 'numpy.array', 'np.array', (['record_metrics'], {}), '(record_metrics)\n', (6548, 6564), True, 'import numpy as np\n'), ((8582, 8605), 'jieba.cut', 'jieba.cut', (['summaries[i]'], {}), '(summaries[i])\n', (8591, 8605), False, 'import jieba\n'), ((8638, 8664), 'jieba.cut', 'jieba.cut', (['ground_truth[i]'], {}), '(ground_truth[i])\n', (8647, 8664), False, 'import jieba\n'), ((9502, 9522), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (9519, 9522), False, 'import torch\n'), ((9542, 9567), 'torch.FloatTensor', 'torch.FloatTensor', (['y_mask'], {}), '(y_mask)\n', (9559, 9567), False, 'import torch\n'), ((1163, 1197), 'torch.randn', 'torch.randn', (['(self.L * 2)', '(1)', 'self.H'], {}), '(self.L * 2, 1, self.H)\n', (1174, 1197), False, 'import torch\n'), ((1230, 1264), 'torch.randn', 'torch.randn', (['(self.L * 2)', '(1)', 'self.H'], {}), '(self.L * 2, 1, self.H)\n', (1241, 1264), False, 'import torch\n'), ((1298, 1336), 'torch.randn', 'torch.randn', (['(self.L * 2)', '(1)', '(self.H * 2)'], {}), '(self.L * 2, 1, self.H * 2)\n', (1309, 1336), False, 'import torch\n'), ((1367, 1405), 'torch.randn', 'torch.randn', (['(self.L * 2)', '(1)', '(self.H * 2)'], {}), '(self.L * 2, 1, self.H * 2)\n', (1378, 1405), False, 'import torch\n'), ((2034, 2054), 'torch.mean', 'torch.mean', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (2044, 2054), False, 'import torch\n'), ((6110, 6129), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (6126, 6129), False, 'import torch\n'), ((9600, 9619), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (9616, 9619), False, 'import torch\n')] |
import pandas as pd
from bayespy.network import Discrete
import bayespy.network
import numpy as np
from collections import Counter
import bayespy.data
import bayespy.jni
from bayespy.jni import jp
class _AutoInsight:
def __init__(self, network, target, logger):
self._network = network
self._logger = logger
self._target = target
self._target_state = bayespy.network.get_state(network, target.variable, target.state)
self._target = target
(self._inf_engine, _, _) = bayespy.model.InferenceEngine(network).create(retract=False)
def calculate(self, evidence=[], sort_by=['difference']):
variables = jp.java.util.Arrays.asList(
[v for v in self._network.getVariables() if v.getName() != self._target.variable])
ai = bayespy.jni.bayesServerAnalysis().AutoInsight
if len(evidence) > 0:
e = bayespy.model.Evidence(self._network, self._inf_engine)
evidence_obj = e.apply(evidence)
auto_insight_output = ai.calculate(self._target_state, variables,
bayespy.model.InferenceEngine.get_inference_factory(),
evidence_obj)
else:
auto_insight_output = ai.calculate(self._target_state, variables,
bayespy.model.InferenceEngine.get_inference_factory())
results = []
for variable in auto_insight_output.getVariableOutputs():
variable_name = variable.getVariable().getName()
if variable_name == "Cluster":
continue
for state in variable.getStateOutputs():
results.append({'variable': variable_name, 'state': state.getState().getName(),
'probability': state.getProbability().floatValue(),
'probability_given_target': state.getProbabilityGivenTarget().floatValue(),
'probability_target_given_this': np.nan if state.getProbabilityTargetGivenThis() is None else state.getProbabilityTargetGivenThis().floatValue(),
'difference': state.getDifference().floatValue(),
'lift': np.nan if state.getLift() is None else state.getLift().floatValue()})
df = pd.DataFrame(results)
return df.sort_values(by=sort_by, ascending=False).reset_index().drop('index', axis=1)
class AutoInsight:
def __init__(self, template, target, logger, comparison_models=3):
self._network_template = template
self._logger = logger
self._data_store = template.get_network_factory().get_datastore()
self._model_cache = []
self._comparison_model_count = comparison_models
self._target = target
def _create_models(self):
if len(self._model_cache) > 0:
return self._model_cache
for i in range(self._comparison_model_count):
network = self._network_template.create()
model = bayespy.model.NetworkModel(network, self._data_store, self._logger)
model.train()
self._model_cache.append(_AutoInsight(network, self._target, self._logger))
return self._model_cache
def get_most_common_tuples(self, combination_length=2, top=20):
models = self._create_models()
group = 0
combinations = pd.DataFrame()
for model in models:
insight = model.calculate()
reader = bayespy.data.DataFrameReader(insight)
while reader.read():
rows = [reader.to_dict()]
evidence = [bayespy.network.Discrete(reader.variable, reader.state)]
for i in range(combination_length-1):
sub_insight = model.calculate(evidence=evidence)
top_row = sub_insight.iloc[0]
evidence.append(bayespy.network.Discrete(top_row.variable, top_row.state))
d = top_row.to_dict()
d.update({'group': group})
rows.append(d)
r = pd.DataFrame(rows)
r['max_difference'] = r.difference.sum()
r['evidence'] = ','.join([str(n) for n in evidence])
combinations = combinations.append(r)
group += 1
return combinations.groupby(by=['evidence']).mean().sort_values(by=['max_difference'], ascending=False)\
.reset_index().drop(['index', 'group'], axis=1).head(top)
def get_descriptive_combinations(self, top=10):
models = self._create_models()
combinations = pd.DataFrame()
for i, model in enumerate(models):
rows = []
evidence = []
for j in range(10):
step = model.calculate(evidence=evidence)
row = step.iloc[0]
evidence.append(bayespy.network.Discrete(row.variable, row.state))
d = row.to_dict()
d.update({'group': i})
rows.append(d)
if row.difference < 0.05:
break
r = pd.DataFrame(rows)
r['max_difference'] = r.difference.sum()
r['evidence'] = ','.join([str(n) for n in evidence])
combinations = combinations.append(r)
return combinations.sort_values(by=['max_difference']).reset_index()
def get_exclusive_states(self, top=10):
models = self._create_models()
rows = pd.DataFrame()
for model in models:
rows = rows.append(model.calculate(), ignore_index=True)
# this gets the probability given the 'non-target'.
rows['probability_given_other'] = rows.probability_given_target - rows.difference
# only get those with a probability given 'other' of less than 2 percent
rows = rows.groupby(by=['variable', 'state']).mean().sort_values(by=['difference'], ascending=[False])
return rows[rows.probability_given_other < 0.02].head(top).reset_index()
def get_insightful_states(self, using='difference', top=10):
if using not in ['lift', 'difference']:
raise ValueError("Expecting either lift or difference in the using parameter. Difference favours probability"
" changes with a higher likelihood of occurring, while lift favours relative changes in probability"
" without taking in to account the likelihood that they will occur.")
models = self._create_models()
rows = pd.DataFrame()
for model in models:
rows = rows.append(model.calculate(), ignore_index=True)
return rows.groupby(by=['variable', 'state']).mean().sort_values(by=[using], ascending=False).head(
top).reset_index()
#
# selected_items = pd.DataFrame(selected_items)
# print(selected_items)
# def get_combinations(self, target):
#
# models = self._create_models()
#
# for model in
#
# evidence = []
# selected_items = []
#
# for i in range(10):
# step = a.calculate(evidence=evidence)
# row = step.iloc[x]
# selected_items.append(row.to_dict())
# evidence.append(bayespy.network.Discrete(row.variable, row.state))
# if row.difference < 0.05:
# break
#
# selected_items = pd.DataFrame(selected_items)
# print(selected_items)
class AutoInsight1:
def __init__(self, network_factory, logger, continuous=[], discrete=[]):
self._continuous = continuous
self._discrete = discrete
self._factory = network_factory
self._logger = logger
def evidence_query(self, model=None, base_evidence=None, new_evidence=None):
"""
deprecated, use query_model_with_evidence instead.
:param model:
:param base_evidence:
:param new_evidence:
:return:
"""
return self.query_model_with_evidence(model=model, base_evidence=base_evidence, new_evidence=new_evidence)
def query_model_with_evidence(self, model=None, base_evidence=None, new_evidence=None):
"""
Get the difference query between base_evidence and new_evidence
:param model: The trained model, or None
:param base_evidence: the base_evidence of the model
:param new_evidence: the new_evidence applied to the model
:return: a dataframe with variable, state, base probability, new probability and difference between queries
"""
if model is None:
model = self._get_trained_model()
inference = model.inference()
if base_evidence is not None:
model.evidence(inference).apply(base_evidence)
output = model.create_query(inference).execute()
output.discrete.rename(columns={"value": "base_probability"}, inplace=True)
output.continuous.rename(columns={"mean": "base_mean", "variance": "base_variance"}, inplace=True)
if new_evidence is None:
return (output.discrete, output.continuous)
model.evidence(inference).apply(new_evidence)
output_1 = model.create_query(inference).execute()
c_2 = pd.merge(output_1.continuous, output.continuous, on=['variable'])
o_2 = pd.merge(output_1.discrete, output.discrete, on=['variable', 'state'])
o_2['difference'] = o_2['value'] - o_2['base_probability']
o_2['variable_state'] = o_2.variable.str.cat(others=o_2.state, sep=bayespy.network.STATE_DELIMITER)
return (o_2, c_2)
def _resolve_cluster_index(self, df, variable, state):
"""
Get a consistent cluster index across model builds, as a discrete Cluster_ node may not consistently
map to the underlying continuous child node
:param df: the dataframe (from a query to the model)
:param variable: the current discrete variable
:param state: the current discrete state
:return: an integer relating to the appropriate cluster, to be used internally
"""
means = df[df.variable == variable][['state', 'mean']].copy()
means = means.sort_values(by='mean').reset_index()
return means[means.state == state].index[0]
def _resolve_cluster_from_index(self, df, variable, index):
"""
Map from the internal cluster index to the external variable name, given a dataframe.
:param df:
:param variable:
:param index:
:return: the row relating to the external variable
"""
means = df[df.variable == variable].copy()
means = means.sort_values(by='mean').reset_index()
return means.ix[int(index)]
def _get_trained_model(self, network):
self._logger.debug("Training model")
return self._factory.create_trained_model(network, self._discrete.index.tolist())
def create_model_cache(self, target, times=1):
return [m for f, m in self._build_and_query_network(target, times=times)]
def query_exclusive_states(self, target, models=None, times=1, top=10):
"""
Get variables where a target has almost guaranteed likelihood, even if it has a small difference
:param target: the target state (a Discrete instance)
:param times: the number of times a model should be trained and queried
:param top: the top N results
:return: a list of (string) variables
"""
features = self._build_and_query_network(target, models=models, times=times)
cc = Counter()
models = []
for df, model in features:
dfr = bayespy.data.DataFrameReader(
df[(df.base_probability < 0.008) & (df.difference > 0.005) & (df.difference < 1)])
while dfr.read():
if dfr['variable'] == target.variable or dfr['variable'] == 'Cluster':
continue
if bayespy.network.is_cluster_variable(dfr['variable']):
ix = self._resolve_cluster_index(df, dfr['variable'], dfr['state'])
cc["{}{}{}".format(dfr['variable'], bayespy.network.STATE_DELIMITER, ix)] += dfr['difference']
else:
cc[dfr['variable_state']] += dfr['difference']
models.append(model)
results = self._get_variable_frequency(features, cc, top)
return results
# def query_top_discriminative_variables(self, target, models=None, times=1, top=10):
# generator = self.query_top_variable_combinations_as_df(target, models=models, times=times, top=top)
# for df, model in generator:
# for row in bayespy.data.DataFrameReader(df):
# self._resolve_cluster_index(df, row['variable']
def query_bivariate_combinations(self, target, models=None, times=1, top=10):
"""
Get the most significant bivariate (target + one other) combinations,
where the variables with the highest difference values are returned
:param target: the target variable (a Discrete instance)
:param times: the number of times a model should be trained and queried
:param top: top N results
:return: a list of (string) variables
"""
features = self._build_and_query_network(target, models=models, times=times)
cc = Counter()
models = []
for df, model in features:
dfr = bayespy.data.DataFrameReader(df)
while dfr.read():
if dfr['variable'] == target.variable or dfr['variable'] == 'Cluster':
continue
if bayespy.network.is_cluster_variable(dfr['variable']):
ix = self._resolve_cluster_index(df, dfr['variable'], dfr['state'])
cc["{}{}{}".format(dfr['variable'], bayespy.network.STATE_DELIMITER, ix)] += dfr['difference']
else:
cc[dfr['variable_state']] += dfr['difference']
models.append(model)
results = self._get_variable_frequency(features, cc, top)
return results
def _get_mean_value_across_models(self, models, discrete, key):
values = []
for model in models:
values.append(float(model[(model.variable == discrete.variable) & (model.state == discrete.state)][key]))
return np.mean(values)
def _get_variable_frequency(self, query, counter, top):
most_common = counter.most_common(top)
mc = []
models = [m[0] for m in query]
for (v, d) in most_common:
v_ = bayespy.network.Discrete.fromstring(v)
if bayespy.network.is_cluster_variable(v_.variable):
av = []
va = []
for df, model in query:
c = self._resolve_cluster_from_index(df, v_.variable, v_.state)
av.append(c['mean'])
va.append(c['variance'])
#target_probability = self._get_mean_value_across_models(models, v_, 'target_probability')
mc.append((bayespy.network.Discrete(v_.variable, c['state']).tostring(),
{'mean': np.mean(av), 'variance': np.mean(va), 'sum_difference': d, 'target_probability': 0}))
else:
base_probability = self._get_mean_value_across_models(models, v_, 'base_probability')
target_probability = self._get_mean_value_across_models(models, v_, 'target_probability')
mc.append((v, {'base_probability': base_probability, 'target_probability': target_probability,
'sum_difference': d}))
return mc
def _build_and_query_network(self, target, times=1, models=None):
"""
Builds the network, trains it and queries it to get difference between all other states in target variable and the target state.
:param target: the target (a Discrete instance)
:param times: the number of times to train/ query
:return: a list of dataframes (len(return) == times)
"""
i = 0
if models is not None:
self._logger.info(
"The number of cached models sets the number of iterations, equal to {}".format(len(models)))
if not isinstance(target, Discrete):
raise ValueError("target should be of type discretenode")
(network, network_builder) = self._factory.create_network()
network_builder.build_naive_network_with_latent_parents(discrete=self._discrete,
continuous=self._continuous, latent_states=10)
if not bayespy.network.is_variable_discrete(bayespy.network.get_variable(network, target.variable)):
raise ValueError("Target variable '{}' is not discrete.".format(target.variable))
target_alt = list(bayespy.network.get_other_states_from_variable(network, target))
self._logger.debug("Finished building network.")
features = []
while len(features) < times:
if models is not None:
model = models[len(features)]
self._logger.debug("Pulled model from cache.")
else:
model = self._get_trained_model(network.copy())
self._logger.debug("Trained model")
t = [target.tostring()]
# get a list of the most different nodes between all other and the target node.
(discrete_features, continuous_features) = self.query_model_with_evidence(model=model,
base_evidence=target_alt,
new_evidence=t)
# get the continuous_variable_name if it's a parent of a continuous node.
discrete_features['continuous_variable_name'] = discrete_features.variable.apply(
lambda x: x.replace("Cluster_", "") if "Cluster_" in x else np.nan)
# get the mean and variance for each of the continuous nodes and which are assumed to be children of the discrete nodes.
discrete_features['mean'] = 0.0
discrete_features['variance'] = 0.0
discrete_features['target_probability'] = 0.0
dfr = bayespy.data.DataFrameReader(discrete_features)
while dfr.read():
if dfr['variable'] == target.variable or dfr['variable'] == 'Cluster':
continue
(ds, cs) = self.query_model_with_evidence(model=model, new_evidence=[dfr['variable_state']])
if bayespy.network.is_cluster_variable(dfr['variable']):
discrete_features.set_value(dfr.get_index(), 'mean',
cs[cs.variable == dfr['continuous_variable_name']]['mean'])
discrete_features.set_value(dfr.get_index(), 'variance',
cs[cs.variable == dfr['continuous_variable_name']]['variance'])
# gets the target value given the particular evidence set on the discrete variable
discrete_features.set_value(dfr.get_index(), 'target_probability', ds[(ds.variable == target.variable) & (ds.state == target.state)]['value'])
discrete_features.sort_values(by=['difference'], inplace=True, ascending=False)
features.append((discrete_features, model))
return features
@staticmethod
def _get_row(df, target):
for i in range(0, 10):
if df.ix[i].variable == "Cluster" or df.ix[i].variable == target.variable:
continue
return df.ix[i]
def query_top_variable_combinations_as_df(self, target, models=None, times=5, top=3):
results, models = self.query_top_variable_combinations(target, models=models, times=times, top=top)
for i,result in enumerate(results):
rows = []
for i, item in enumerate(result['evidence']):
row = {'mean': np.nan, 'variance': np.nan, 'variable': '', 'probability': 0.0, 'state': '',
'max_p': 0.0,
'difference': 0.0}
d = bayespy.network.Discrete.fromstring(item)
c_name = d.variable.replace("Cluster_", "")
if c_name != d.variable:
for cv in result['continuous_variables']:
if cv['variable'] == c_name:
row['mean'] = cv['mean']
row['variance'] = cv['variance']
row['difference'] = result['difference'][i]
row['variable'] = d.variable
row['state'] = d.state
row['probability'] = result['probability'][i]
row['max_p'] = result['max_probability']
rows.append(row)
yield pd.DataFrame(rows), models[i]
def query_top_variable_combinations(self, target, models=None, times=5, top=3):
(network, network_builder) = self._factory.create_network()
if not isinstance(target, Discrete):
raise ValueError("target should be of type discretenode")
network_builder.build_naive_network_with_latent_parents(discrete=self._discrete,
continuous=self._continuous, latent_states=10)
target_alt = list(bayespy.network.get_other_states_from_variable(network, target))
self._logger.debug("Finished building network.")
t = [target.tostring()]
if models is not None:
times = len(models)
if top > len(models):
top = len(models)
combinations = []
while len(combinations) < times:
if models is not None:
#print(len(combinations))
model = models[len(combinations)]
self._logger.debug("Picked up model from cache")
else:
model = self._get_trained_model(network)
self._logger.debug("Trained model")
base_evidence = []
difference = []
target_prob_given_evidence = []
recent_variable = None
continuous_variables = []
for i in range(top):
(discrete_features, continuous_features) = self.evidence_query(model=model,
base_evidence=base_evidence + target_alt,
new_evidence=base_evidence + t)
discrete_features.sort_values(by='difference', inplace=True, ascending=False)
discrete_features.reset_index(inplace=True)
# get the target probability given evidence applied to the newly found variable
(dsf, csf) = self.evidence_query(model=model, new_evidence=base_evidence)
if recent_variable is not None and bayespy.network.is_cluster_variable(recent_variable.variable):
c_v_name = recent_variable.variable.replace("Cluster_", "")
continuous_variables.append({'mean': float(csf[c_v_name == csf.variable]['mean']),
'variance': float(csf[c_v_name == csf.variable]['variance']),
'variable': c_v_name})
target_given_evidence = dsf[dsf.variable == target.variable]
# print(target_given_evidence)
target_prob_given_evidence.append(float(target_given_evidence[dsf.state == str(target.state)].value))
row = self._get_row(discrete_features, target)
recent_variable = Discrete(row.variable, row.state)
difference.append(row.difference)
base_evidence.append(recent_variable.tostring())
combinations.append(
{'evidence': base_evidence, 'difference': difference, 'probability': target_prob_given_evidence,
'max_probability': np.max(target_prob_given_evidence), 'continuous_variables': continuous_variables,
'model': model})
combinations = sorted(combinations, key=lambda x: x['max_probability'], reverse=True)
models = [combo.pop('model') for combo in combinations]
return combinations, models
def query_variable_combinations(self, target, conditioned=3, total_iterations_limit=10, diff_convergence_dp=4):
(network, network_builder) = self._factory.create_network()
if not isinstance(target, Discrete):
raise ValueError("target should be of type discretenode")
network_builder.build_naive_network_with_latent_parents(discrete=self._discrete,
continuous=self._continuous, latent_states=10)
target_alt = list(bayespy.network.get_other_states_from_variable(network, target))
self._logger.debug("Finished building network.")
results = []
total_over_limit = 0
total_iterations = 0
while total_over_limit <= conditioned:
model = self._get_trained_model(network)
self._logger.debug("Trained model")
t = [target.tostring()]
base_evidence = []
prev_target_prob = np.nan
curr_target_prob = np.nan
difference = []
while round(prev_target_prob, diff_convergence_dp) != round(curr_target_prob, diff_convergence_dp):
prev_target_prob = curr_target_prob
(discrete_features, continuous_features) = self.evidence_query(model=model,
base_evidence=base_evidence + target_alt,
new_evidence=base_evidence + t)
discrete_features.sort_values(by='difference', inplace=True, ascending=False)
discrete_features.reset_index(inplace=True)
(dsf, csf) = self.evidence_query(model=model, new_evidence=base_evidence)
target_given_evidence = dsf[dsf.variable == target.variable]
# print(target_given_evidence)
curr_target_prob = float(target_given_evidence[dsf.state == str(target.state)].value)
self._logger.debug("Evidence: {0}".format(base_evidence))
self._logger.debug(target_given_evidence)
row = self._get_row(discrete_features, target)
difference.append(row.difference)
evi = Discrete(row.variable, row.state).tostring()
base_evidence.append(evi)
if prev_target_prob > 90:
total_over_limit += 1
total_iterations += 1
results.append(
{'evidence': base_evidence, 'difference': difference, 'probability': prev_target_prob, 'model': model})
if total_iterations >= total_iterations_limit:
break
self._logger.debug("Iteration count: {0}".format(total_iterations))
return results
def rationalise(self, results, num=20):
from collections import Counter
top_results = Counter()
for d in results:
for j, _ in enumerate(d['evidence']):
top_results[d['evidence'][j]] += (d['difference'][j] * d['probability'])
# print(d['evidence'][j])
return top_results.most_common(num)
| [
"numpy.mean",
"pandas.merge",
"numpy.max",
"collections.Counter",
"pandas.DataFrame",
"bayespy.network.Discrete"
] | [((2368, 2389), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (2380, 2389), True, 'import pandas as pd\n'), ((3444, 3458), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3456, 3458), True, 'import pandas as pd\n'), ((4687, 4701), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4699, 4701), True, 'import pandas as pd\n'), ((5554, 5568), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5566, 5568), True, 'import pandas as pd\n'), ((6612, 6626), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6624, 6626), True, 'import pandas as pd\n'), ((9394, 9459), 'pandas.merge', 'pd.merge', (['output_1.continuous', 'output.continuous'], {'on': "['variable']"}), "(output_1.continuous, output.continuous, on=['variable'])\n", (9402, 9459), True, 'import pandas as pd\n'), ((9474, 9544), 'pandas.merge', 'pd.merge', (['output_1.discrete', 'output.discrete'], {'on': "['variable', 'state']"}), "(output_1.discrete, output.discrete, on=['variable', 'state'])\n", (9482, 9544), True, 'import pandas as pd\n'), ((11723, 11732), 'collections.Counter', 'Counter', ([], {}), '()\n', (11730, 11732), False, 'from collections import Counter\n'), ((13522, 13531), 'collections.Counter', 'Counter', ([], {}), '()\n', (13529, 13531), False, 'from collections import Counter\n'), ((14528, 14543), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (14535, 14543), True, 'import numpy as np\n'), ((27623, 27632), 'collections.Counter', 'Counter', ([], {}), '()\n', (27630, 27632), False, 'from collections import Counter\n'), ((5190, 5208), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (5202, 5208), True, 'import pandas as pd\n'), ((4160, 4178), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (4172, 4178), True, 'import pandas as pd\n'), ((24054, 24087), 'bayespy.network.Discrete', 'Discrete', (['row.variable', 'row.state'], {}), '(row.variable, row.state)\n', (24062, 24087), False, 'from bayespy.network import Discrete\n'), ((21154, 21172), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (21166, 21172), True, 'import pandas as pd\n'), ((24387, 24421), 'numpy.max', 'np.max', (['target_prob_given_evidence'], {}), '(target_prob_given_evidence)\n', (24393, 24421), True, 'import numpy as np\n'), ((26980, 27013), 'bayespy.network.Discrete', 'Discrete', (['row.variable', 'row.state'], {}), '(row.variable, row.state)\n', (26988, 27013), False, 'from bayespy.network import Discrete\n'), ((15354, 15365), 'numpy.mean', 'np.mean', (['av'], {}), '(av)\n', (15361, 15365), True, 'import numpy as np\n'), ((15379, 15390), 'numpy.mean', 'np.mean', (['va'], {}), '(va)\n', (15386, 15390), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import sys
import numpy as np
import pytest
import tvm
from tvm import relay, TVMError
from tvm.ir.module import IRModule
from tvm.relay import testing, transform
from tvm.relay.testing import byoc
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.backend import Executor, Runtime
from aot_test_utils import (
AOTTestModel,
AOT_DEFAULT_RUNNER,
generate_ref_data,
convert_to_relay,
compile_and_run,
compile_models,
parametrize_aot_options,
)
def test_error_c_interface_with_packed_api():
interface_api = "c"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(tvm.TVMError, match="Packed interface required for packed operators"):
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func), inputs={}, outputs=generate_ref_data(func, {})
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_conv_with_params(interface_api, use_unpacked_api, test_runner):
RELAY_MODEL = """
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(8, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=8,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%1
}
"""
mod = tvm.parser.fromtext(RELAY_MODEL)
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_with_params(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_in = np.ones((1, 10)).astype("float32")
y_in = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": x_in}
inputs = {"y": y_in}
output_list = generate_ref_data(func, inputs, params)
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func), inputs=inputs, outputs=output_list, params=params
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_concatenate(interface_api, use_unpacked_api, test_runner):
dtype = "float32"
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = OrderedDict([("x", x_data), ("y", y_data), ("z", t_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_nested_tuples(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", shape=(10,))
x1 = x + relay.const(1.0)
x2 = x1 + relay.const(1.0)
x3 = x2 + relay.const(1.0)
x4 = x3 + relay.const(1.0)
out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])])
func = relay.Function([x], out)
x_data = np.random.uniform(size=(10,)).astype(np.float32)
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_getitem(interface_api, use_unpacked_api, test_runner):
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_id(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs = {"x": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(ident), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_const(interface_api, use_unpacked_api, test_runner):
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_mul_param(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_subtract(interface_api, use_unpacked_api, test_runner):
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
inputs = {"i": i_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_output(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", shape=(6, 9))
y = relay.split(x, 3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
func = relay.Function([x], out)
x_data = np.random.rand(6, 9).astype("float32")
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
["debug_calculated_workspaces", "workspace_byte_alignment"], [(True, 1), (True, 16), (False, 1)]
)
def test_mobilenet(debug_calculated_workspaces, workspace_byte_alignment):
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
# TODO(@Mousius) - Enable memory planning to take into account debug information
debugging_memory_overhead = 1024 * 1024
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(
module=mod,
inputs=inputs,
outputs=output_list,
params=params,
extra_memory_in_bytes=debugging_memory_overhead,
),
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=workspace_byte_alignment,
debug_calculated_workspaces=debug_calculated_workspaces,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""This is a simple test to check BYOC capabilities of AOT - with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
# z0 = x + w0
x_ = compiler_begin(x, "ccompiler")
w0_ = compiler_begin(w0, "ccompiler")
z0_ = relay.add(x_, w0_)
z0 = compiler_end(z0_, "ccompiler")
# z1 = z0 + w1
z0__ = compiler_begin(z0, "ccompiler")
w1_ = compiler_begin(w1, "ccompiler")
z1_ = relay.add(z0__, w1_)
z1 = compiler_end(z1_, "ccompiler")
# z2 = z0 + z1
z2 = relay.add(z0, z1)
f = relay.Function([x, w0, w1], z2)
mod = tvm.IRModule()
mod["main"] = f
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm_multiple_subgraphs(merge_compiler_regions):
"""This is a test case to check BYOC capabilities of AOT with multiple sub graphs"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# C compiler
z0 = relay.add(x, w0)
p0 = relay.subtract(z0, w1)
q0 = relay.multiply(p0, w2)
z1 = relay.add(x, w3)
p1 = relay.subtract(z1, w4)
q1 = relay.multiply(p1, w5)
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((q0, q1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
output_list = generate_ref_data(mod, map_inputs)
input_list = [map_inputs["x"]]
input_list.extend([map_inputs["w{}".format(i)] for i in range(8)])
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_name_mangling_with_params(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_in = np.ones((1, 10)).astype("float32")
y_in = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": x_in}
inputs = {"y": y_in}
output_list = generate_ref_data(func, inputs, params)
compile_and_run(
AOTTestModel(name="my_mod", module=func, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiple_models(interface_api, use_unpacked_api, test_runner):
# Identity model without params
x = relay.var("x", "float32")
mod1 = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs1 = {"x": one}
output_list1 = generate_ref_data(mod1, inputs1)
params1 = None
# Convolution model
RELAY_MODEL = """
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(8, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=8,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%1
}
"""
mod2 = tvm.parser.fromtext(RELAY_MODEL)
main_func = mod2["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params2 = {"weight": weight_data}
inputs2 = {"data": input_data}
output_list2 = generate_ref_data(mod2, inputs2, params2)
compile_and_run(
[
AOTTestModel(
name="mod1", module=mod1, inputs=inputs1, outputs=output_list1, params=params1
),
AOTTestModel(
name="mod2", module=mod2, inputs=inputs2, outputs=output_list2, params=params2
),
],
test_runner,
interface_api,
use_unpacked_api,
)
def test_quant_mobilenet_tfl():
"""Since in AOT we pass directly the output buffer from the user, in quantized networks sharing the output buffers is not possible.
This is because the output data type is int8 and the intermediate buffer are int32 or int16. We use mobilenet quantized to stress this
situation and verify that the output buffer sharing is disabled in AOT."""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing
interface_api = "packed"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data_shape = (1, 224, 224, 3)
in_min, in_max = (0, 255)
data = np.random.randint(in_min, high=in_max, size=data_shape, dtype="uint8")
mod, params = convert_to_relay(tflite_model_buf, data, "input")
inputs = {"input": data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_transpose(interface_api, use_unpacked_api, test_runner):
"""Test that non-inpleaceable operations (e.g., transpose) do not happen in-place."""
dtype = "float32"
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
a = relay.add(x, y)
b = relay.transpose(a)
z = relay.add(b, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"x": x_data, "y": y_data, "z": t_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser():
"""Test that input tensors with special characters in the name don't break compilation"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
func = relay.var("input-x::2", "float32")
ident = relay.Function([func], func)
one = np.array(1.0, "float32")
inputs = {"input-x::2": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser_name_clash():
"""Test that 2 input tensors with names that clash once sanitized, generates an error"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
dtype = "float32"
x = relay.var("input::-1", shape=(10, 5), dtype=dtype)
# Next 2 input tensor names will clash once sanitized.
y = relay.var("input::-2", shape=(10, 5), dtype=dtype)
t = relay.var("input:--2", shape=(), dtype=dtype)
a = relay.add(x, y)
b = relay.transpose(a)
z = relay.add(b, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"input::-1": x_data, "input::-2": y_data, "input:--2": t_data}
output_list = generate_ref_data(func, inputs)
with pytest.raises(TVMError, match="Sanitized input tensor name clash"):
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
# This tests for deprecated AOT executor arguments
# TODO(Mousius) Remove deprecated arguments later
def test_deprecated_target_arguments(capsys):
"""Tests we can still use relay.build with -executor, -runtime and -link-params"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_in = np.ones((1, 10)).astype("float32")
y_in = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": x_in}
inputs = {"y": y_in}
output_list = generate_ref_data(func, inputs, params)
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func), inputs=inputs, outputs=output_list, params=params
),
test_runner,
interface_api,
use_unpacked_api,
use_runtime_executor=False,
target="c -executor=aot --link-params -runtime=c -interface-api=c --unpacked-api",
)
@pytest.mark.parametrize(
"workspace_byte_alignment,main_workspace_size,sum_workspace_size",
[
(8, 10368, 15200),
(16, 10368, 15232),
(256, 10752, 17408),
],
)
def test_memory_planning(workspace_byte_alignment, main_workspace_size, sum_workspace_size):
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
},
)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
assert (
sum(lib.function_metadata["__tvm_main__"].workspace_sizes.values()) == main_workspace_size
)
assert (
sum(
[
size
for metadata in lib.function_metadata.values()
for size in metadata.workspace_sizes.values()
]
)
== sum_workspace_size
)
def test_aot_codegen_backend_alloc_workspace_calls():
"""This test checks whether AoT lowering creates TVMBackendAllocWorkspace calls"""
# The %data and %weight shapes in the following primitive Relay should create
# small tensors that would get lowered to stack allocations in the CPU PrimFuncs.
# However, the AoT executor codegen should retain them as TVMBAW calls
relay_mod = tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 4, 4, 4), float32], %weight: Tensor[(4, 4, 3, 3), float32], src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 4, 4, 4), float32] {
%0 = fn (%p02: Tensor[(1, 4, 4, 4), float32], Primitive=1, hash="9332b3872fb5292c", src_layout="NCHW", dst_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
layout_transform(%p02, src_layout="NCHW", dst_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%1 = fn (%p03: Tensor[(4, 4, 3, 3), float32], Primitive=1, hash="9f0b2b8a24a4dab3", src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 1, 3, 3, 4, 4), float32] {
layout_transform(%p03, src_layout="OIHW", dst_layout="OIHW4i4o") /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */
};
%2 = %0(%data) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%3 = %1(%weight) /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */;
%4 = fn (%p01: Tensor[(1, 1, 4, 4, 4), float32], %p1: Tensor[(1, 1, 3, 3, 4, 4), float32], out_layout="NCHW4c", kernel_layout="OIHW4i4o", Primitive=1, data_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
nn.contrib_conv2d_NCHWc(%p01, %p1, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NCHW4c", kernel_layout="OIHW4i4o", out_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%5 = %4(%2, %3) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%6 = fn (%p0: Tensor[(1, 1, 4, 4, 4), float32], Primitive=1, src_layout="NCHW4c", dst_layout="NCHW") -> Tensor[(1, 4, 4, 4), float32] {
layout_transform(%p0, src_layout="NCHW4c", dst_layout="NCHW") /* ty=Tensor[(1, 4, 4, 4), float32] */
};
%6(%5) /* ty=Tensor[(1, 4, 4, 4), float32] */
}
"""
)
compiled_test_mods = compile_models(
models=AOTTestModel(module=relay_mod, inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True,
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
# There should be three allocates created for three primitive relay function
# calls in the main for the above relay snippet.
assert source.count("TVMBackendAllocWorkspace") == 3
@pytest.mark.parametrize("constants_byte_alignment", [8, 16, 32])
def test_constants_alignment(constants_byte_alignment):
"""Test that constants_byte_alignment correctly sets constants byte alignment"""
use_unpacked_api = True
interface_api = "c"
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
target_opts = {"-constants-byte-alignment": constants_byte_alignment}
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
target_opts=target_opts,
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
assert f'__attribute__((section(".rodata.tvm"), aligned({constants_byte_alignment})))' in source
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| [
"tvm.relay.op.annotation.compiler_begin",
"numpy.random.rand",
"tvm.relay.Tuple",
"aot_test_utils.generate_ref_data",
"tvm.relay.TupleGetItem",
"numpy.array",
"aot_test_utils.convert_to_relay",
"tvm.relay.subtract",
"tvm.relay.backend.Runtime",
"tvm.parser.fromtext",
"tvm.relay.TensorType",
"t... | [((3763, 3829), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups,weight_shape"""', '[(1, 32), (32, 1)]'], {}), "('groups,weight_shape', [(1, 32), (32, 1)])\n", (3786, 3829), False, 'import pytest\n'), ((9485, 9610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['debug_calculated_workspaces', 'workspace_byte_alignment']", '[(True, 1), (True, 16), (False, 1)]'], {}), "(['debug_calculated_workspaces',\n 'workspace_byte_alignment'], [(True, 1), (True, 16), (False, 1)])\n", (9508, 9610), False, 'import pytest\n'), ((10628, 10692), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""merge_compiler_regions"""', '[False, True]'], {}), "('merge_compiler_regions', [False, True])\n", (10651, 10692), False, 'import pytest\n'), ((12221, 12285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""merge_compiler_regions"""', '[False, True]'], {}), "('merge_compiler_regions', [False, True])\n", (12244, 12285), False, 'import pytest\n'), ((21724, 21885), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""workspace_byte_alignment,main_workspace_size,sum_workspace_size"""', '[(8, 10368, 15200), (16, 10368, 15232), (256, 10752, 17408)]'], {}), "(\n 'workspace_byte_alignment,main_workspace_size,sum_workspace_size', [(8,\n 10368, 15200), (16, 10368, 15232), (256, 10752, 17408)])\n", (21747, 21885), False, 'import pytest\n'), ((25736, 25800), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""constants_byte_alignment"""', '[8, 16, 32]'], {}), "('constants_byte_alignment', [8, 16, 32])\n", (25759, 25800), False, 'import pytest\n'), ((1525, 1548), 'tvm.relay.Function', 'relay.Function', (['[]', 'two'], {}), '([], two)\n', (1539, 1548), False, 'from tvm import relay, TVMError\n'), ((2369, 2401), 'tvm.parser.fromtext', 'tvm.parser.fromtext', (['RELAY_MODEL'], {}), '(RELAY_MODEL)\n', (2388, 2401), False, 'import tvm\n'), ((2834, 2872), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'inputs', 'params'], {}), '(mod, inputs, params)\n', (2851, 2872), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((3163, 3192), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 10)'}), "('x', shape=(1, 10))\n", (3172, 3192), False, 'from tvm import relay, TVMError\n'), ((3201, 3230), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(1, 10)'}), "('y', shape=(1, 10))\n", (3210, 3230), False, 'from tvm import relay, TVMError\n'), ((3239, 3254), 'tvm.relay.add', 'relay.add', (['x', 'y'], {}), '(x, y)\n', (3248, 3254), False, 'from tvm import relay, TVMError\n'), ((3266, 3291), 'tvm.relay.Function', 'relay.Function', (['[x, y]', 'z'], {}), '([x, y], z)\n', (3280, 3291), False, 'from tvm import relay, TVMError\n'), ((3469, 3508), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs', 'params'], {}), '(func, inputs, params)\n', (3486, 3508), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((4074, 4118), 'tvm.relay.var', 'relay.var', (['"""data"""'], {'shape': 'ishape', 'dtype': 'dtype'}), "('data', shape=ishape, dtype=dtype)\n", (4083, 4118), False, 'from tvm import relay, TVMError\n'), ((4133, 4179), 'tvm.relay.var', 'relay.var', (['"""weight"""'], {'shape': 'wshape', 'dtype': 'dtype'}), "('weight', shape=wshape, dtype=dtype)\n", (4142, 4179), False, 'from tvm import relay, TVMError\n'), ((4190, 4277), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['data0', 'weight0'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'groups': 'groups'}), '(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=\n groups)\n', (4205, 4277), False, 'from tvm import relay, TVMError\n'), ((4286, 4323), 'tvm.relay.Function', 'relay.Function', (['[data0, weight0]', 'out'], {}), '([data0, weight0], out)\n', (4300, 4323), False, 'from tvm import relay, TVMError\n'), ((4334, 4348), 'tvm.IRModule', 'tvm.IRModule', ([], {}), '()\n', (4346, 4348), False, 'import tvm\n'), ((4545, 4597), 'collections.OrderedDict', 'OrderedDict', (["[('data', i_data), ('weight', w1_data)]"], {}), "([('data', i_data), ('weight', w1_data)])\n", (4556, 4597), False, 'from collections import OrderedDict\n'), ((4617, 4647), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'inputs'], {}), '(mod, inputs)\n', (4634, 4647), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((4940, 4982), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(10, 5)', 'dtype': 'dtype'}), "('x', shape=(10, 5), dtype=dtype)\n", (4949, 4982), False, 'from tvm import relay, TVMError\n'), ((4991, 5033), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(10, 5)', 'dtype': 'dtype'}), "('y', shape=(10, 5), dtype=dtype)\n", (5000, 5033), False, 'from tvm import relay, TVMError\n'), ((5042, 5079), 'tvm.relay.var', 'relay.var', (['"""z"""'], {'shape': '()', 'dtype': 'dtype'}), "('z', shape=(), dtype=dtype)\n", (5051, 5079), False, 'from tvm import relay, TVMError\n'), ((5088, 5121), 'tvm.relay.concatenate', 'relay.concatenate', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (5105, 5121), False, 'from tvm import relay, TVMError\n'), ((5130, 5145), 'tvm.relay.add', 'relay.add', (['z', 't'], {}), '(z, t)\n', (5139, 5145), False, 'from tvm import relay, TVMError\n'), ((5177, 5205), 'tvm.relay.Function', 'relay.Function', (['[x, y, t]', 'z'], {}), '([x, y, t], z)\n', (5191, 5205), False, 'from tvm import relay, TVMError\n'), ((5371, 5429), 'collections.OrderedDict', 'OrderedDict', (["[('x', x_data), ('y', y_data), ('z', t_data)]"], {}), "([('x', x_data), ('y', y_data), ('z', t_data)])\n", (5382, 5429), False, 'from collections import OrderedDict\n'), ((5449, 5480), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (5466, 5480), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((5774, 5801), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(10,)'}), "('x', shape=(10,))\n", (5783, 5801), False, 'from tvm import relay, TVMError\n'), ((6006, 6030), 'tvm.relay.Function', 'relay.Function', (['[x]', 'out'], {}), '([x], out)\n', (6020, 6030), False, 'from tvm import relay, TVMError\n'), ((6139, 6170), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (6156, 6170), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((6575, 6602), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', '{}'], {}), '(func, {})\n', (6592, 6602), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((6882, 6907), 'tvm.relay.var', 'relay.var', (['"""x"""', '"""float32"""'], {}), "('x', 'float32')\n", (6891, 6907), False, 'from tvm import relay, TVMError\n'), ((6920, 6942), 'tvm.relay.Function', 'relay.Function', (['[x]', 'x'], {}), '([x], x)\n', (6934, 6942), False, 'from tvm import relay, TVMError\n'), ((6953, 6977), 'numpy.array', 'np.array', (['(1.0)', '"""float32"""'], {}), "(1.0, 'float32')\n", (6961, 6977), True, 'import numpy as np\n'), ((7020, 7052), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['ident', 'inputs'], {}), '(ident, inputs)\n', (7037, 7052), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((7399, 7422), 'tvm.relay.Function', 'relay.Function', (['[]', 'two'], {}), '([], two)\n', (7413, 7422), False, 'from tvm import relay, TVMError\n'), ((7441, 7468), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', '{}'], {}), '(func, {})\n', (7458, 7468), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((7755, 7785), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(10, 10)'}), "('x', shape=(10, 10))\n", (7764, 7785), False, 'from tvm import relay, TVMError\n'), ((7794, 7823), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(1, 10)'}), "('y', shape=(1, 10))\n", (7803, 7823), False, 'from tvm import relay, TVMError\n'), ((8001, 8044), 'collections.OrderedDict', 'OrderedDict', (["[('x', x_data), ('y', y_data)]"], {}), "([('x', x_data), ('y', y_data)])\n", (8012, 8044), False, 'from collections import OrderedDict\n'), ((8063, 8094), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (8080, 8094), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((8384, 8423), 'tvm.relay.var', 'relay.var', (['"""i"""'], {'shape': '[]', 'dtype': '"""int32"""'}), "('i', shape=[], dtype='int32')\n", (8393, 8423), False, 'from tvm import relay, TVMError\n'), ((8572, 8598), 'numpy.array', 'np.array', (['(1)'], {'dtype': '"""int32"""'}), "(1, dtype='int32')\n", (8580, 8598), True, 'import numpy as np\n'), ((8644, 8675), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (8661, 8675), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((8968, 8996), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(6, 9)'}), "('x', shape=(6, 9))\n", (8977, 8996), False, 'from tvm import relay, TVMError\n'), ((9041, 9065), 'tvm.relay.TupleGetItem', 'relay.TupleGetItem', (['y', '(0)'], {}), '(y, 0)\n', (9059, 9065), False, 'from tvm import relay, TVMError\n'), ((9074, 9098), 'tvm.relay.TupleGetItem', 'relay.TupleGetItem', (['y', '(1)'], {}), '(y, 1)\n', (9092, 9098), False, 'from tvm import relay, TVMError\n'), ((9109, 9128), 'tvm.relay.Tuple', 'relay.Tuple', (['[a, b]'], {}), '([a, b])\n', (9120, 9128), False, 'from tvm import relay, TVMError\n'), ((9140, 9164), 'tvm.relay.Function', 'relay.Function', (['[x]', 'out'], {}), '([x], out)\n', (9154, 9164), False, 'from tvm import relay, TVMError\n'), ((9262, 9293), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (9279, 9293), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((9926, 9970), 'tvm.relay.testing.mobilenet.get_workload', 'testing.mobilenet.get_workload', ([], {'batch_size': '(1)'}), '(batch_size=1)\n', (9956, 9970), False, 'from tvm.relay import testing, transform\n'), ((10160, 10198), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'inputs', 'params'], {}), '(mod, inputs, params)\n', (10177, 10198), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((11009, 11039), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(10, 10)'}), "('x', shape=(10, 10))\n", (11018, 11039), False, 'from tvm import relay, TVMError\n'), ((11049, 11080), 'tvm.relay.var', 'relay.var', (['"""w0"""'], {'shape': '(10, 10)'}), "('w0', shape=(10, 10))\n", (11058, 11080), False, 'from tvm import relay, TVMError\n'), ((11090, 11121), 'tvm.relay.var', 'relay.var', (['"""w1"""'], {'shape': '(10, 10)'}), "('w1', shape=(10, 10))\n", (11099, 11121), False, 'from tvm import relay, TVMError\n'), ((11150, 11180), 'tvm.relay.op.annotation.compiler_begin', 'compiler_begin', (['x', '"""ccompiler"""'], {}), "(x, 'ccompiler')\n", (11164, 11180), False, 'from tvm.relay.op.annotation import compiler_begin, compiler_end\n'), ((11191, 11222), 'tvm.relay.op.annotation.compiler_begin', 'compiler_begin', (['w0', '"""ccompiler"""'], {}), "(w0, 'ccompiler')\n", (11205, 11222), False, 'from tvm.relay.op.annotation import compiler_begin, compiler_end\n'), ((11233, 11251), 'tvm.relay.add', 'relay.add', (['x_', 'w0_'], {}), '(x_, w0_)\n', (11242, 11251), False, 'from tvm import relay, TVMError\n'), ((11261, 11291), 'tvm.relay.op.annotation.compiler_end', 'compiler_end', (['z0_', '"""ccompiler"""'], {}), "(z0_, 'ccompiler')\n", (11273, 11291), False, 'from tvm.relay.op.annotation import compiler_begin, compiler_end\n'), ((11323, 11354), 'tvm.relay.op.annotation.compiler_begin', 'compiler_begin', (['z0', '"""ccompiler"""'], {}), "(z0, 'ccompiler')\n", (11337, 11354), False, 'from tvm.relay.op.annotation import compiler_begin, compiler_end\n'), ((11365, 11396), 'tvm.relay.op.annotation.compiler_begin', 'compiler_begin', (['w1', '"""ccompiler"""'], {}), "(w1, 'ccompiler')\n", (11379, 11396), False, 'from tvm.relay.op.annotation import compiler_begin, compiler_end\n'), ((11407, 11427), 'tvm.relay.add', 'relay.add', (['z0__', 'w1_'], {}), '(z0__, w1_)\n', (11416, 11427), False, 'from tvm import relay, TVMError\n'), ((11437, 11467), 'tvm.relay.op.annotation.compiler_end', 'compiler_end', (['z1_', '"""ccompiler"""'], {}), "(z1_, 'ccompiler')\n", (11449, 11467), False, 'from tvm.relay.op.annotation import compiler_begin, compiler_end\n'), ((11497, 11514), 'tvm.relay.add', 'relay.add', (['z0', 'z1'], {}), '(z0, z1)\n', (11506, 11514), False, 'from tvm import relay, TVMError\n'), ((11524, 11555), 'tvm.relay.Function', 'relay.Function', (['[x, w0, w1]', 'z2'], {}), '([x, w0, w1], z2)\n', (11538, 11555), False, 'from tvm import relay, TVMError\n'), ((11566, 11580), 'tvm.IRModule', 'tvm.IRModule', ([], {}), '()\n', (11578, 11580), False, 'import tvm\n'), ((11950, 11978), 'collections.OrderedDict', 'OrderedDict', (['(x_data + w_data)'], {}), '(x_data + w_data)\n', (11961, 11978), False, 'from collections import OrderedDict\n'), ((11997, 12031), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'map_inputs'], {}), '(mod, map_inputs)\n', (12014, 12031), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((12546, 12576), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(10, 10)'}), "('x', shape=(10, 10))\n", (12555, 12576), False, 'from tvm import relay, TVMError\n'), ((12586, 12617), 'tvm.relay.var', 'relay.var', (['"""w0"""'], {'shape': '(10, 10)'}), "('w0', shape=(10, 10))\n", (12595, 12617), False, 'from tvm import relay, TVMError\n'), ((12627, 12658), 'tvm.relay.var', 'relay.var', (['"""w1"""'], {'shape': '(10, 10)'}), "('w1', shape=(10, 10))\n", (12636, 12658), False, 'from tvm import relay, TVMError\n'), ((12668, 12699), 'tvm.relay.var', 'relay.var', (['"""w2"""'], {'shape': '(10, 10)'}), "('w2', shape=(10, 10))\n", (12677, 12699), False, 'from tvm import relay, TVMError\n'), ((12709, 12740), 'tvm.relay.var', 'relay.var', (['"""w3"""'], {'shape': '(10, 10)'}), "('w3', shape=(10, 10))\n", (12718, 12740), False, 'from tvm import relay, TVMError\n'), ((12750, 12781), 'tvm.relay.var', 'relay.var', (['"""w4"""'], {'shape': '(10, 10)'}), "('w4', shape=(10, 10))\n", (12759, 12781), False, 'from tvm import relay, TVMError\n'), ((12791, 12822), 'tvm.relay.var', 'relay.var', (['"""w5"""'], {'shape': '(10, 10)'}), "('w5', shape=(10, 10))\n", (12800, 12822), False, 'from tvm import relay, TVMError\n'), ((12832, 12863), 'tvm.relay.var', 'relay.var', (['"""w6"""'], {'shape': '(10, 10)'}), "('w6', shape=(10, 10))\n", (12841, 12863), False, 'from tvm import relay, TVMError\n'), ((12873, 12904), 'tvm.relay.var', 'relay.var', (['"""w7"""'], {'shape': '(10, 10)'}), "('w7', shape=(10, 10))\n", (12882, 12904), False, 'from tvm import relay, TVMError\n'), ((12932, 12948), 'tvm.relay.add', 'relay.add', (['x', 'w0'], {}), '(x, w0)\n', (12941, 12948), False, 'from tvm import relay, TVMError\n'), ((12958, 12980), 'tvm.relay.subtract', 'relay.subtract', (['z0', 'w1'], {}), '(z0, w1)\n', (12972, 12980), False, 'from tvm import relay, TVMError\n'), ((12990, 13012), 'tvm.relay.multiply', 'relay.multiply', (['p0', 'w2'], {}), '(p0, w2)\n', (13004, 13012), False, 'from tvm import relay, TVMError\n'), ((13023, 13039), 'tvm.relay.add', 'relay.add', (['x', 'w3'], {}), '(x, w3)\n', (13032, 13039), False, 'from tvm import relay, TVMError\n'), ((13049, 13071), 'tvm.relay.subtract', 'relay.subtract', (['z1', 'w4'], {}), '(z1, w4)\n', (13063, 13071), False, 'from tvm import relay, TVMError\n'), ((13081, 13103), 'tvm.relay.multiply', 'relay.multiply', (['p1', 'w5'], {}), '(p1, w5)\n', (13095, 13103), False, 'from tvm import relay, TVMError\n'), ((13139, 13155), 'tvm.relay.add', 'relay.add', (['x', 'w6'], {}), '(x, w6)\n', (13148, 13155), False, 'from tvm import relay, TVMError\n'), ((13165, 13187), 'tvm.relay.subtract', 'relay.subtract', (['z2', 'w7'], {}), '(z2, w7)\n', (13179, 13187), False, 'from tvm import relay, TVMError\n'), ((13197, 13236), 'tvm.relay.concatenate', 'relay.concatenate', (['(q0, q1, q2)'], {'axis': '(0)'}), '((q0, q1, q2), axis=0)\n', (13214, 13236), False, 'from tvm import relay, TVMError\n'), ((13245, 13299), 'tvm.relay.Function', 'relay.Function', (['[x, w0, w1, w2, w3, w4, w5, w6, w7]', 'r'], {}), '([x, w0, w1, w2, w3, w4, w5, w6, w7], r)\n', (13259, 13299), False, 'from tvm import relay, TVMError\n'), ((13310, 13324), 'tvm.IRModule', 'tvm.IRModule', ([], {}), '()\n', (13322, 13324), False, 'import tvm\n'), ((13335, 13360), 'tvm.relay.testing.byoc.CcompilerAnnotator', 'byoc.CcompilerAnnotator', ([], {}), '()\n', (13358, 13360), False, 'from tvm.relay.testing import byoc\n'), ((13860, 13894), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'map_inputs'], {}), '(mod, map_inputs)\n', (13877, 13894), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((14308, 14337), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 10)'}), "('x', shape=(1, 10))\n", (14317, 14337), False, 'from tvm import relay, TVMError\n'), ((14346, 14375), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(1, 10)'}), "('y', shape=(1, 10))\n", (14355, 14375), False, 'from tvm import relay, TVMError\n'), ((14384, 14399), 'tvm.relay.add', 'relay.add', (['x', 'y'], {}), '(x, y)\n', (14393, 14399), False, 'from tvm import relay, TVMError\n'), ((14411, 14436), 'tvm.relay.Function', 'relay.Function', (['[x, y]', 'z'], {}), '([x, y], z)\n', (14425, 14436), False, 'from tvm import relay, TVMError\n'), ((14614, 14653), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs', 'params'], {}), '(func, inputs, params)\n', (14631, 14653), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((14996, 15021), 'tvm.relay.var', 'relay.var', (['"""x"""', '"""float32"""'], {}), "('x', 'float32')\n", (15005, 15021), False, 'from tvm import relay, TVMError\n'), ((15033, 15055), 'tvm.relay.Function', 'relay.Function', (['[x]', 'x'], {}), '([x], x)\n', (15047, 15055), False, 'from tvm import relay, TVMError\n'), ((15066, 15090), 'numpy.array', 'np.array', (['(1.0)', '"""float32"""'], {}), "(1.0, 'float32')\n", (15074, 15090), True, 'import numpy as np\n'), ((15135, 15167), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod1', 'inputs1'], {}), '(mod1, inputs1)\n', (15152, 15167), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((15584, 15616), 'tvm.parser.fromtext', 'tvm.parser.fromtext', (['RELAY_MODEL'], {}), '(RELAY_MODEL)\n', (15603, 15616), False, 'import tvm\n'), ((16053, 16094), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod2', 'inputs2', 'params2'], {}), '(mod2, inputs2, params2)\n', (16070, 16094), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((16878, 16907), 'pytest.importorskip', 'pytest.importorskip', (['"""tflite"""'], {}), "('tflite')\n", (16897, 16907), False, 'import pytest\n'), ((17076, 17275), 'tvm.relay.testing.tf.get_workload_official', 'tf_testing.get_workload_official', (['"""https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz"""', '"""mobilenet_v1_1.0_224_quant.tflite"""'], {}), "(\n 'https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz'\n , 'mobilenet_v1_1.0_224_quant.tflite')\n", (17108, 17275), True, 'import tvm.relay.testing.tf as tf_testing\n'), ((17456, 17526), 'numpy.random.randint', 'np.random.randint', (['in_min'], {'high': 'in_max', 'size': 'data_shape', 'dtype': '"""uint8"""'}), "(in_min, high=in_max, size=data_shape, dtype='uint8')\n", (17473, 17526), True, 'import numpy as np\n'), ((17545, 17594), 'aot_test_utils.convert_to_relay', 'convert_to_relay', (['tflite_model_buf', 'data', '"""input"""'], {}), "(tflite_model_buf, data, 'input')\n", (17561, 17594), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((17642, 17680), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'inputs', 'params'], {}), '(mod, inputs, params)\n', (17659, 17680), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((18077, 18119), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(10, 5)', 'dtype': 'dtype'}), "('x', shape=(10, 5), dtype=dtype)\n", (18086, 18119), False, 'from tvm import relay, TVMError\n'), ((18128, 18170), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(10, 5)', 'dtype': 'dtype'}), "('y', shape=(10, 5), dtype=dtype)\n", (18137, 18170), False, 'from tvm import relay, TVMError\n'), ((18179, 18216), 'tvm.relay.var', 'relay.var', (['"""z"""'], {'shape': '()', 'dtype': 'dtype'}), "('z', shape=(), dtype=dtype)\n", (18188, 18216), False, 'from tvm import relay, TVMError\n'), ((18225, 18240), 'tvm.relay.add', 'relay.add', (['x', 'y'], {}), '(x, y)\n', (18234, 18240), False, 'from tvm import relay, TVMError\n'), ((18249, 18267), 'tvm.relay.transpose', 'relay.transpose', (['a'], {}), '(a)\n', (18264, 18267), False, 'from tvm import relay, TVMError\n'), ((18276, 18291), 'tvm.relay.add', 'relay.add', (['b', 't'], {}), '(b, t)\n', (18285, 18291), False, 'from tvm import relay, TVMError\n'), ((18323, 18351), 'tvm.relay.Function', 'relay.Function', (['[x, y, t]', 'z'], {}), '([x, y, t], z)\n', (18337, 18351), False, 'from tvm import relay, TVMError\n'), ((18576, 18607), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (18593, 18607), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((19053, 19087), 'tvm.relay.var', 'relay.var', (['"""input-x::2"""', '"""float32"""'], {}), "('input-x::2', 'float32')\n", (19062, 19087), False, 'from tvm import relay, TVMError\n'), ((19100, 19128), 'tvm.relay.Function', 'relay.Function', (['[func]', 'func'], {}), '([func], func)\n', (19114, 19128), False, 'from tvm import relay, TVMError\n'), ((19139, 19163), 'numpy.array', 'np.array', (['(1.0)', '"""float32"""'], {}), "(1.0, 'float32')\n", (19147, 19163), True, 'import numpy as np\n'), ((19215, 19247), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['ident', 'inputs'], {}), '(ident, inputs)\n', (19232, 19247), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((19723, 19773), 'tvm.relay.var', 'relay.var', (['"""input::-1"""'], {'shape': '(10, 5)', 'dtype': 'dtype'}), "('input::-1', shape=(10, 5), dtype=dtype)\n", (19732, 19773), False, 'from tvm import relay, TVMError\n'), ((19841, 19891), 'tvm.relay.var', 'relay.var', (['"""input::-2"""'], {'shape': '(10, 5)', 'dtype': 'dtype'}), "('input::-2', shape=(10, 5), dtype=dtype)\n", (19850, 19891), False, 'from tvm import relay, TVMError\n'), ((19900, 19945), 'tvm.relay.var', 'relay.var', (['"""input:--2"""'], {'shape': '()', 'dtype': 'dtype'}), "('input:--2', shape=(), dtype=dtype)\n", (19909, 19945), False, 'from tvm import relay, TVMError\n'), ((19954, 19969), 'tvm.relay.add', 'relay.add', (['x', 'y'], {}), '(x, y)\n', (19963, 19969), False, 'from tvm import relay, TVMError\n'), ((19978, 19996), 'tvm.relay.transpose', 'relay.transpose', (['a'], {}), '(a)\n', (19993, 19996), False, 'from tvm import relay, TVMError\n'), ((20005, 20020), 'tvm.relay.add', 'relay.add', (['b', 't'], {}), '(b, t)\n', (20014, 20020), False, 'from tvm import relay, TVMError\n'), ((20052, 20080), 'tvm.relay.Function', 'relay.Function', (['[x, y, t]', 'z'], {}), '([x, y, t], z)\n', (20066, 20080), False, 'from tvm import relay, TVMError\n'), ((20329, 20360), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs'], {}), '(func, inputs)\n', (20346, 20360), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((21022, 21051), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 10)'}), "('x', shape=(1, 10))\n", (21031, 21051), False, 'from tvm import relay, TVMError\n'), ((21060, 21089), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(1, 10)'}), "('y', shape=(1, 10))\n", (21069, 21089), False, 'from tvm import relay, TVMError\n'), ((21098, 21113), 'tvm.relay.add', 'relay.add', (['x', 'y'], {}), '(x, y)\n', (21107, 21113), False, 'from tvm import relay, TVMError\n'), ((21125, 21150), 'tvm.relay.Function', 'relay.Function', (['[x, y]', 'z'], {}), '([x, y], z)\n', (21139, 21150), False, 'from tvm import relay, TVMError\n'), ((21328, 21367), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', 'inputs', 'params'], {}), '(func, inputs, params)\n', (21345, 21367), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((22030, 22072), 'tvm.relay.testing.synthetic.get_workload', 'tvm.relay.testing.synthetic.get_workload', ([], {}), '()\n', (22070, 22072), False, 'import tvm\n'), ((22104, 22118), 'tvm.relay.backend.Runtime', 'Runtime', (['"""crt"""'], {}), "('crt')\n", (22111, 22118), False, 'from tvm.relay.backend import Executor, Runtime\n'), ((22134, 22205), 'tvm.relay.backend.Executor', 'Executor', (['"""aot"""', "{'workspace-byte-alignment': workspace_byte_alignment}"], {}), "('aot', {'workspace-byte-alignment': workspace_byte_alignment})\n", (22142, 22205), False, 'from tvm.relay.backend import Executor, Runtime\n'), ((23203, 25269), 'tvm.parser.fromtext', 'tvm.parser.fromtext', (['"""\n #[version = "0.0.5"]\n def @main(%data: Tensor[(1, 4, 4, 4), float32], %weight: Tensor[(4, 4, 3, 3), float32], src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 4, 4, 4), float32] {\n %0 = fn (%p02: Tensor[(1, 4, 4, 4), float32], Primitive=1, hash="9332b3872fb5292c", src_layout="NCHW", dst_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {\n layout_transform(%p02, src_layout="NCHW", dst_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */\n };\n %1 = fn (%p03: Tensor[(4, 4, 3, 3), float32], Primitive=1, hash="9f0b2b8a24a4dab3", src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 1, 3, 3, 4, 4), float32] {\n layout_transform(%p03, src_layout="OIHW", dst_layout="OIHW4i4o") /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */\n };\n %2 = %0(%data) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;\n %3 = %1(%weight) /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */;\n %4 = fn (%p01: Tensor[(1, 1, 4, 4, 4), float32], %p1: Tensor[(1, 1, 3, 3, 4, 4), float32], out_layout="NCHW4c", kernel_layout="OIHW4i4o", Primitive=1, data_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {\n nn.contrib_conv2d_NCHWc(%p01, %p1, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NCHW4c", kernel_layout="OIHW4i4o", out_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */\n };\n %5 = %4(%2, %3) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;\n %6 = fn (%p0: Tensor[(1, 1, 4, 4, 4), float32], Primitive=1, src_layout="NCHW4c", dst_layout="NCHW") -> Tensor[(1, 4, 4, 4), float32] {\n layout_transform(%p0, src_layout="NCHW4c", dst_layout="NCHW") /* ty=Tensor[(1, 4, 4, 4), float32] */\n };\n %6(%5) /* ty=Tensor[(1, 4, 4, 4), float32] */\n }\n """'], {}), '(\n """\n #[version = "0.0.5"]\n def @main(%data: Tensor[(1, 4, 4, 4), float32], %weight: Tensor[(4, 4, 3, 3), float32], src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 4, 4, 4), float32] {\n %0 = fn (%p02: Tensor[(1, 4, 4, 4), float32], Primitive=1, hash="9332b3872fb5292c", src_layout="NCHW", dst_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {\n layout_transform(%p02, src_layout="NCHW", dst_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */\n };\n %1 = fn (%p03: Tensor[(4, 4, 3, 3), float32], Primitive=1, hash="9f0b2b8a24a4dab3", src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 1, 3, 3, 4, 4), float32] {\n layout_transform(%p03, src_layout="OIHW", dst_layout="OIHW4i4o") /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */\n };\n %2 = %0(%data) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;\n %3 = %1(%weight) /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */;\n %4 = fn (%p01: Tensor[(1, 1, 4, 4, 4), float32], %p1: Tensor[(1, 1, 3, 3, 4, 4), float32], out_layout="NCHW4c", kernel_layout="OIHW4i4o", Primitive=1, data_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {\n nn.contrib_conv2d_NCHWc(%p01, %p1, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NCHW4c", kernel_layout="OIHW4i4o", out_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */\n };\n %5 = %4(%2, %3) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;\n %6 = fn (%p0: Tensor[(1, 1, 4, 4, 4), float32], Primitive=1, src_layout="NCHW4c", dst_layout="NCHW") -> Tensor[(1, 4, 4, 4), float32] {\n layout_transform(%p0, src_layout="NCHW4c", dst_layout="NCHW") /* ty=Tensor[(1, 4, 4, 4), float32] */\n };\n %6(%5) /* ty=Tensor[(1, 4, 4, 4), float32] */\n }\n """\n )\n', (23222, 25269), False, 'import tvm\n'), ((26014, 26058), 'tvm.relay.testing.mobilenet.get_workload', 'testing.mobilenet.get_workload', ([], {'batch_size': '(1)'}), '(batch_size=1)\n', (26044, 26058), False, 'from tvm.relay import testing, transform\n'), ((26248, 26286), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['mod', 'inputs', 'params'], {}), '(mod, inputs, params)\n', (26265, 26286), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((1482, 1496), 'tvm.relay.const', 'relay.const', (['(1)'], {}), '(1)\n', (1493, 1496), False, 'from tvm import relay, TVMError\n'), ((1498, 1512), 'tvm.relay.const', 'relay.const', (['(1)'], {}), '(1)\n', (1509, 1512), False, 'from tvm import relay, TVMError\n'), ((1559, 1647), 'pytest.raises', 'pytest.raises', (['tvm.TVMError'], {'match': '"""Packed interface required for packed operators"""'}), "(tvm.TVMError, match=\n 'Packed interface required for packed operators')\n", (1572, 1647), False, 'import pytest\n'), ((2903, 2978), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'module': 'mod', 'inputs': 'inputs', 'outputs': 'output_list', 'params': 'params'}), '(module=mod, inputs=inputs, outputs=output_list, params=params)\n', (2915, 2978), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((4384, 4405), 'tvm.relay.transform.InferType', 'transform.InferType', ([], {}), '()\n', (4403, 4405), False, 'from tvm.relay import testing, transform\n'), ((4677, 4737), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'module': 'mod', 'inputs': 'inputs', 'outputs': 'output_list'}), '(module=mod, inputs=inputs, outputs=output_list)\n', (4689, 4737), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((5815, 5831), 'tvm.relay.const', 'relay.const', (['(1.0)'], {}), '(1.0)\n', (5826, 5831), False, 'from tvm import relay, TVMError\n'), ((5846, 5862), 'tvm.relay.const', 'relay.const', (['(1.0)'], {}), '(1.0)\n', (5857, 5862), False, 'from tvm import relay, TVMError\n'), ((5877, 5893), 'tvm.relay.const', 'relay.const', (['(1.0)'], {}), '(1.0)\n', (5888, 5893), False, 'from tvm import relay, TVMError\n'), ((5908, 5924), 'tvm.relay.const', 'relay.const', (['(1.0)'], {}), '(1.0)\n', (5919, 5924), False, 'from tvm import relay, TVMError\n'), ((7356, 7370), 'tvm.relay.const', 'relay.const', (['(1)'], {}), '(1)\n', (7367, 7370), False, 'from tvm import relay, TVMError\n'), ((7372, 7386), 'tvm.relay.const', 'relay.const', (['(1)'], {}), '(1)\n', (7383, 7386), False, 'from tvm import relay, TVMError\n'), ((7858, 7878), 'tvm.relay.multiply', 'relay.multiply', (['x', 'y'], {}), '(x, y)\n', (7872, 7878), False, 'from tvm import relay, TVMError\n'), ((8452, 8481), 'tvm.relay.const', 'relay.const', (['(1)'], {'dtype': '"""int32"""'}), "(1, dtype='int32')\n", (8463, 8481), False, 'from tvm import relay, TVMError\n'), ((10228, 10356), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'module': 'mod', 'inputs': 'inputs', 'outputs': 'output_list', 'params': 'params', 'extra_memory_in_bytes': 'debugging_memory_overhead'}), '(module=mod, inputs=inputs, outputs=output_list, params=params,\n extra_memory_in_bytes=debugging_memory_overhead)\n', (10240, 10356), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((11696, 11732), 'tvm.relay.transform.PartitionGraph', 'transform.PartitionGraph', (['"""mod_name"""'], {}), "('mod_name')\n", (11720, 11732), False, 'from tvm.relay import testing, transform\n'), ((11748, 11769), 'tvm.relay.transform.InferType', 'transform.InferType', ([], {}), '()\n', (11767, 11769), False, 'from tvm.relay import testing, transform\n'), ((12061, 12140), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'name': '"""my_mod"""', 'module': 'mod', 'inputs': 'map_inputs', 'outputs': 'output_list'}), "(name='my_mod', module=mod, inputs=map_inputs, outputs=output_list)\n", (12073, 12140), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((13487, 13533), 'tvm.relay.transform.PartitionGraph', 'tvm.relay.transform.PartitionGraph', (['"""mod_name"""'], {}), "('mod_name')\n", (13521, 13533), False, 'import tvm\n'), ((13549, 13580), 'tvm.relay.transform.InferType', 'tvm.relay.transform.InferType', ([], {}), '()\n', (13578, 13580), False, 'import tvm\n'), ((14030, 14109), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'name': '"""my_mod"""', 'module': 'mod', 'inputs': 'map_inputs', 'outputs': 'output_list'}), "(name='my_mod', module=mod, inputs=map_inputs, outputs=output_list)\n", (14042, 14109), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((14684, 14779), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'name': '"""my_mod"""', 'module': 'func', 'inputs': 'inputs', 'outputs': 'output_list', 'params': 'params'}), "(name='my_mod', module=func, inputs=inputs, outputs=output_list,\n params=params)\n", (14696, 14779), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((17710, 17785), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'module': 'mod', 'inputs': 'inputs', 'outputs': 'output_list', 'params': 'params'}), '(module=mod, inputs=inputs, outputs=output_list, params=params)\n', (17722, 17785), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((20371, 20437), 'pytest.raises', 'pytest.raises', (['TVMError'], {'match': '"""Sanitized input tensor name clash"""'}), "(TVMError, match='Sanitized input tensor name clash')\n", (20384, 20437), False, 'import pytest\n'), ((22261, 22339), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'opt_level': '(3)', 'config': "{'tir.disable_vectorize': True}"}), "(opt_level=3, config={'tir.disable_vectorize': True})\n", (22286, 22339), False, 'import tvm\n'), ((22355, 22434), 'tvm.relay.build', 'tvm.relay.build', (['mod', 'target'], {'executor': 'executor', 'runtime': 'runtime', 'params': 'params'}), '(mod, target, executor=executor, runtime=runtime, params=params)\n', (22370, 22434), False, 'import tvm\n'), ((26410, 26485), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'module': 'mod', 'inputs': 'inputs', 'outputs': 'output_list', 'params': 'params'}), '(module=mod, inputs=inputs, outputs=output_list, params=params)\n', (26422, 26485), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((26807, 26845), 'pytest.main', 'pytest.main', (['([__file__] + sys.argv[1:])'], {}), '([__file__] + sys.argv[1:])\n', (26818, 26845), False, 'import pytest\n'), ((2615, 2644), 'numpy.ones', 'np.ones', (["shape_dict['weight']"], {}), "(shape_dict['weight'])\n", (2622, 2644), True, 'import numpy as np\n'), ((2690, 2717), 'numpy.ones', 'np.ones', (["shape_dict['data']"], {}), "(shape_dict['data'])\n", (2697, 2717), True, 'import numpy as np\n'), ((3304, 3320), 'numpy.ones', 'np.ones', (['(1, 10)'], {}), '((1, 10))\n', (3311, 3320), True, 'import numpy as np\n'), ((3350, 3381), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, 10)'}), '(size=(1, 10))\n', (3367, 3381), True, 'import numpy as np\n'), ((4425, 4456), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'ishape'], {}), '(0, 1, ishape)\n', (4442, 4456), True, 'import numpy as np\n'), ((4485, 4516), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'wshape'], {}), '(0, 1, wshape)\n', (4502, 4516), True, 'import numpy as np\n'), ((5219, 5240), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (5233, 5240), True, 'import numpy as np\n'), ((5268, 5289), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (5282, 5289), True, 'import numpy as np\n'), ((5317, 5343), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '()'}), '(size=())\n', (5334, 5343), True, 'import numpy as np\n'), ((6045, 6074), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10,)'}), '(size=(10,))\n', (6062, 6074), True, 'import numpy as np\n'), ((7893, 7915), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (7907, 7915), True, 'import numpy as np\n'), ((7947, 7968), 'numpy.random.rand', 'np.random.rand', (['(1)', '(10)'], {}), '(1, 10)\n', (7961, 7968), True, 'import numpy as np\n'), ((8528, 8557), 'tvm.relay.TensorType', 'relay.TensorType', (['[]', '"""int32"""'], {}), "([], 'int32')\n", (8544, 8557), False, 'from tvm import relay, TVMError\n'), ((9005, 9022), 'tvm.relay.split', 'relay.split', (['x', '(3)'], {}), '(x, 3)\n', (9016, 9022), False, 'from tvm import relay, TVMError\n'), ((9178, 9198), 'numpy.random.rand', 'np.random.rand', (['(6)', '(9)'], {}), '(6, 9)\n', (9192, 9198), True, 'import numpy as np\n'), ((10061, 10095), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data_shape'}), '(size=data_shape)\n', (10078, 10095), True, 'import numpy as np\n'), ((11647, 11679), 'tvm.relay.transform.MergeCompilerRegions', 'transform.MergeCompilerRegions', ([], {}), '()\n', (11677, 11679), False, 'from tvm.relay import testing, transform\n'), ((13438, 13470), 'tvm.relay.transform.MergeCompilerRegions', 'transform.MergeCompilerRegions', ([], {}), '()\n', (13468, 13470), False, 'from tvm.relay import testing, transform\n'), ((13600, 13622), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (13614, 13622), True, 'import numpy as np\n'), ((14449, 14465), 'numpy.ones', 'np.ones', (['(1, 10)'], {}), '((1, 10))\n', (14456, 14465), True, 'import numpy as np\n'), ((14495, 14526), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, 10)'}), '(size=(1, 10))\n', (14512, 14526), True, 'import numpy as np\n'), ((15831, 15860), 'numpy.ones', 'np.ones', (["shape_dict['weight']"], {}), "(shape_dict['weight'])\n", (15838, 15860), True, 'import numpy as np\n'), ((15906, 15933), 'numpy.ones', 'np.ones', (["shape_dict['data']"], {}), "(shape_dict['data'])\n", (15913, 15933), True, 'import numpy as np\n'), ((16139, 16235), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'name': '"""mod1"""', 'module': 'mod1', 'inputs': 'inputs1', 'outputs': 'output_list1', 'params': 'params1'}), "(name='mod1', module=mod1, inputs=inputs1, outputs=output_list1,\n params=params1)\n", (16151, 16235), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((16275, 16371), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'name': '"""mod2"""', 'module': 'mod2', 'inputs': 'inputs2', 'outputs': 'output_list2', 'params': 'params2'}), "(name='mod2', module=mod2, inputs=inputs2, outputs=output_list2,\n params=params2)\n", (16287, 16371), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((18365, 18386), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (18379, 18386), True, 'import numpy as np\n'), ((18414, 18435), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (18428, 18435), True, 'import numpy as np\n'), ((18463, 18489), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '()'}), '(size=())\n', (18480, 18489), True, 'import numpy as np\n'), ((20094, 20115), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (20108, 20115), True, 'import numpy as np\n'), ((20143, 20164), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (20157, 20164), True, 'import numpy as np\n'), ((20192, 20218), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '()'}), '(size=())\n', (20209, 20218), True, 'import numpy as np\n'), ((21163, 21179), 'numpy.ones', 'np.ones', (['(1, 10)'], {}), '((1, 10))\n', (21170, 21179), True, 'import numpy as np\n'), ((21209, 21240), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, 10)'}), '(size=(1, 10))\n', (21226, 21240), True, 'import numpy as np\n'), ((25330, 25387), 'aot_test_utils.AOTTestModel', 'AOTTestModel', ([], {'module': 'relay_mod', 'inputs': 'None', 'outputs': 'None'}), '(module=relay_mod, inputs=None, outputs=None)\n', (25342, 25387), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((26149, 26183), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data_shape'}), '(size=data_shape)\n', (26166, 26183), True, 'import numpy as np\n'), ((3572, 3596), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (3590, 3596), False, 'from tvm.ir.module import IRModule\n'), ((5530, 5554), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (5548, 5554), False, 'from tvm.ir.module import IRModule\n'), ((6221, 6245), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (6239, 6245), False, 'from tvm.ir.module import IRModule\n'), ((6653, 6677), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (6671, 6677), False, 'from tvm.ir.module import IRModule\n'), ((7103, 7128), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['ident'], {}), '(ident)\n', (7121, 7128), False, 'from tvm.ir.module import IRModule\n'), ((7519, 7543), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (7537, 7543), False, 'from tvm.ir.module import IRModule\n'), ((8145, 8169), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (8163, 8169), False, 'from tvm.ir.module import IRModule\n'), ((8725, 8749), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (8743, 8749), False, 'from tvm.ir.module import IRModule\n'), ((9343, 9367), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (9361, 9367), False, 'from tvm.ir.module import IRModule\n'), ((18657, 18681), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (18675, 18681), False, 'from tvm.ir.module import IRModule\n'), ((19298, 19322), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (19316, 19322), False, 'from tvm.ir.module import IRModule\n'), ((21431, 21455), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (21449, 21455), False, 'from tvm.ir.module import IRModule\n'), ((1718, 1742), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (1736, 1742), False, 'from tvm.ir.module import IRModule\n'), ((1763, 1790), 'aot_test_utils.generate_ref_data', 'generate_ref_data', (['func', '{}'], {}), '(func, {})\n', (1780, 1790), False, 'from aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, generate_ref_data, convert_to_relay, compile_and_run, compile_models, parametrize_aot_options\n'), ((5965, 5986), 'tvm.relay.Tuple', 'relay.Tuple', (['[x2, x3]'], {}), '([x2, x3])\n', (5976, 5986), False, 'from tvm import relay, TVMError\n'), ((6519, 6533), 'tvm.relay.const', 'relay.const', (['(1)'], {}), '(1)\n', (6530, 6533), False, 'from tvm import relay, TVMError\n'), ((6535, 6549), 'tvm.relay.const', 'relay.const', (['(2)'], {}), '(2)\n', (6546, 6549), False, 'from tvm import relay, TVMError\n'), ((11796, 11818), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (11810, 11818), True, 'import numpy as np\n'), ((11871, 11893), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (11885, 11893), True, 'import numpy as np\n'), ((13702, 13724), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (13716, 13724), True, 'import numpy as np\n'), ((20496, 20520), 'tvm.ir.module.IRModule.from_expr', 'IRModule.from_expr', (['func'], {}), '(func)\n', (20514, 20520), False, 'from tvm.ir.module import IRModule\n')] |
# @Time: 2/21/2021
# @Author: lnblanke
# @Email: <EMAIL>
# @File: Prediction.py
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
import tensorflow as tf
from sklearn.decomposition import PCA
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
drop_cols = ["Name", "PassengerId", "Cabin", "Ticket"]
train = train.drop(drop_cols, axis = 1)
test = test.drop(drop_cols, axis = 1)
imputer = SimpleImputer(missing_values = np.nan, strategy = "most_frequent")
train_data = pd.DataFrame(imputer.fit_transform(train.drop("Survived", axis = 1)))
test = pd.DataFrame(imputer.transform(test))
train_data.columns = train.drop("Survived", axis = 1).columns
test.columns = train.drop("Survived", axis = 1).columns
for col in train.select_dtypes("object"):
train_data[col], _ = train_data[col].factorize()
for col in test.select_dtypes("object"):
test[col], _ = test[col].factorize()
train_label = train.Survived
train_data = pd.DataFrame(ColumnTransformer([("one_hot_encoder", OneHotEncoder(), ["Pclass", "Sex", "Embarked"])],
remainder = "passthrough").fit_transform(train_data))
test = pd.DataFrame(ColumnTransformer([("one_hot_encoder", OneHotEncoder(), ["Pclass", "Sex", "Embarked"])],
remainder = "passthrough").fit_transform(test))
ss = StandardScaler()
train_data = ss.fit_transform(train_data)
test = ss.fit_transform(test)
pca = PCA(n_components = None)
train_pca = pd.DataFrame(pca.fit_transform(train_data))
test_pca = pd.DataFrame(pca.transform(test))
train_pca, valid_pca, train_label, valid_label = train_test_split(train_pca, train_label, train_size = .8)
model = tf.keras.Sequential([
tf.keras.layers.Dense(8, activation = "relu"),
tf.keras.layers.Dense(5, activation = "relu"),
tf.keras.layers.Dense(1, activation = "relu")
])
if __name__ == '__main__':
model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])
model.fit(train_pca, train_label, epochs = 1000, batch_size = 1)
pred = model.predict(valid_pca)
acc = 0
for i in range(0, len(valid_label)):
acc += ((pred[i] >= .5) == valid_label.values[i])
print(acc / len(valid_label))
pred = model.predict(test_pca)
output = pd.DataFrame({"PassengerId": pd.read_csv("test.csv").PassengerId, "Survived": np.argmax(pred, axis = 1)})
output.to_csv("submission.csv", index = False)
| [
"pandas.read_csv",
"sklearn.decomposition.PCA",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"numpy.argmax",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.layers.Dense",
"sklearn.impute.SimpleImputer"
] | [((395, 419), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (406, 419), True, 'import pandas as pd\n'), ((427, 450), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (438, 450), True, 'import pandas as pd\n'), ((597, 659), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""most_frequent"""'}), "(missing_values=np.nan, strategy='most_frequent')\n", (610, 659), False, 'from sklearn.impute import SimpleImputer\n'), ((1461, 1477), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1475, 1477), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((1557, 1579), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'None'}), '(n_components=None)\n', (1560, 1579), False, 'from sklearn.decomposition import PCA\n'), ((1734, 1790), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_pca', 'train_label'], {'train_size': '(0.8)'}), '(train_pca, train_label, train_size=0.8)\n', (1750, 1790), False, 'from sklearn.model_selection import train_test_split\n'), ((1827, 1870), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (1848, 1870), True, 'import tensorflow as tf\n'), ((1878, 1921), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {'activation': '"""relu"""'}), "(5, activation='relu')\n", (1899, 1921), True, 'import tensorflow as tf\n'), ((1929, 1972), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""relu"""'}), "(1, activation='relu')\n", (1950, 1972), True, 'import tensorflow as tf\n'), ((2480, 2503), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (2489, 2503), True, 'import numpy as np\n'), ((2431, 2454), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (2442, 2454), True, 'import pandas as pd\n'), ((1186, 1201), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1199, 1201), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((1353, 1368), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1366, 1368), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n')] |
""" test parquet compat """
import datetime
from distutils.version import LooseVersion
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected, actual, check_names=check_names, check_like=check_like
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
pytest.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
df.index = df.index._with_freq(None) # freq doesnt round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError)
def test_unsupported(self, pa):
if LooseVersion(pyarrow.__version__) < LooseVersion("0.15.1.dev"):
# period - will be supported using an extension type with pyarrow 1.0
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
# timedelta
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame()
df["a"] = pd.Categorical(list("abcdef"))
# test for null, out-of-order values, and unobserved category
df["b"] = pd.Categorical(
["bar", "foo", "foo", "bar", None, "bar"],
dtype=pd.CategoricalDtype(["foo", "bar", "baz"]),
)
# test for ordered flag
df["c"] = pd.Categorical(
["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True
)
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.15.0"):
check_round_trip(df, pa)
else:
# de-serialized as object for pyarrow < 0.15
expected = df.astype(object)
check_round_trip(df, pa, expected=expected)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet")
@td.skip_if_no("s3fs")
@pytest.mark.parametrize("partition_col", [["A"], []])
def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col):
from pandas.io.s3 import get_fs as get_s3_fs
# GH #26388
# https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716
# As per pyarrow partitioned columns become 'categorical' dtypes
# and are added to back of dataframe on read
expected_df = df_compat.copy()
if partition_col:
expected_df[partition_col] = expected_df[partition_col].astype("category")
check_round_trip(
df_compat,
pa,
expected=expected_df,
path="s3://pandas-test/parquet_dir",
write_kwargs={
"partition_cols": partition_col,
"compression": None,
"filesystem": get_s3_fs(),
},
check_like=True,
repeat=1,
)
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 2
assert dataset.partitions.partition_names == set(partition_cols)
def test_partition_cols_string(self, pa, df_full):
# GH #27117
partition_cols = "bool"
partition_cols_list = [partition_cols]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 1
assert dataset.partitions.partition_names == set(partition_cols_list)
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
check_round_trip(df, pa)
def test_write_with_schema(self, pa):
import pyarrow
df = pd.DataFrame({"x": [0, 1]})
schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())])
out_df = df.astype(bool)
check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="UInt32"),
"c": pd.Series(["a", None, "c"], dtype="string"),
}
)
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.16.0"):
expected = df
else:
# de-serialized as plain int / object
expected = df.assign(
a=df.a.astype("int64"), b=df.b.astype("int64"), c=df.c.astype("object")
)
check_round_trip(df, pa, expected=expected)
df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.16.0"):
expected = df
else:
# if missing values in integer, currently de-serialized as float
expected = df.assign(a=df.a.astype("float64"))
check_round_trip(df, pa, expected=expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_additional_extension_types(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol + by defining a custom ExtensionType
df = pd.DataFrame(
{
# Arrow does not yet support struct in writing to Parquet (ARROW-1644)
# "c": pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (3, 4)]),
"d": pd.period_range("2012-01-01", periods=3, freq="D"),
}
)
check_round_trip(df, pa)
@td.skip_if_no("pyarrow", min_version="0.14")
def test_timestamp_nanoseconds(self, pa):
# with version 2.0, pyarrow defaults to writing the nanoseconds, so
# this should work without error
df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
check_round_trip(df, pa, write_kwargs={"version": "2.0"})
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.3.2")
def test_basic(self, fp, df_full):
df = df_full
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp, path="s3://pandas-test/fastparquet.parquet")
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_cols_string(self, fp, df_full):
# GH #27117
partition_cols = "bool"
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 1
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with pytest.raises(ValueError):
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
partition_cols=partition_cols,
)
def test_empty_dataframe(self, fp):
# GH #27339
df = pd.DataFrame()
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
| [
"pytest.mark.filterwarnings",
"pandas._testing.ensure_clean",
"pandas.option_context",
"pandas.compat._optional.VERSIONS.get",
"numpy.array",
"pandas.io.parquet.read_parquet",
"pandas.MultiIndex.from_tuples",
"pandas.date_range",
"pytest.xfail",
"numpy.arange",
"pandas.MultiIndex.from_product",
... | [((620, 707), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:RangeIndex.* is deprecated:DeprecationWarning"""'], {}), "(\n 'ignore:RangeIndex.* is deprecated:DeprecationWarning')\n", (646, 707), False, 'import pytest\n'), ((1491, 1533), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 2, 3], 'B': 'foo'}"], {}), "({'A': [1, 2, 3], 'B': 'foo'})\n", (1503, 1533), True, 'import pandas as pd\n'), ((6364, 6387), 'pandas.compat._optional.VERSIONS.get', 'VERSIONS.get', (['"""pyarrow"""'], {}), "('pyarrow')\n", (6376, 6387), False, 'from pandas.compat._optional import VERSIONS\n'), ((6405, 6432), 'pandas.compat._optional.VERSIONS.get', 'VERSIONS.get', (['"""fastparquet"""'], {}), "('fastparquet')\n", (6417, 6432), False, 'from pandas.compat._optional import VERSIONS\n'), ((10334, 10408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""compression"""', "[None, 'gzip', 'snappy', 'brotli']"], {}), "('compression', [None, 'gzip', 'snappy', 'brotli'])\n", (10357, 10408), False, 'import pytest\n'), ((17069, 17090), 'pandas.util._test_decorators.skip_if_no', 'td.skip_if_no', (['"""s3fs"""'], {}), "('s3fs')\n", (17082, 17090), True, 'import pandas.util._test_decorators as td\n'), ((17096, 17149), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""partition_col"""', "[['A'], []]"], {}), "('partition_col', [['A'], []])\n", (17119, 17149), False, 'import pytest\n'), ((19568, 19614), 'pandas.util._test_decorators.skip_if_no', 'td.skip_if_no', (['"""pyarrow"""'], {'min_version': '"""0.15.0"""'}), "('pyarrow', min_version='0.15.0')\n", (19581, 19614), True, 'import pandas.util._test_decorators as td\n'), ((20756, 20802), 'pandas.util._test_decorators.skip_if_no', 'td.skip_if_no', (['"""pyarrow"""'], {'min_version': '"""0.16.0"""'}), "('pyarrow', min_version='0.16.0')\n", (20769, 20802), True, 'import pandas.util._test_decorators as td\n'), ((21349, 21393), 'pandas.util._test_decorators.skip_if_no', 'td.skip_if_no', (['"""pyarrow"""'], {'min_version': '"""0.14"""'}), "('pyarrow', min_version='0.14')\n", (21362, 21393), True, 'import pandas.util._test_decorators as td\n'), ((21751, 21800), 'pandas.util._test_decorators.skip_if_no', 'td.skip_if_no', (['"""fastparquet"""'], {'min_version': '"""0.3.2"""'}), "('fastparquet', min_version='0.3.2')\n", (21764, 21800), True, 'import pandas.util._test_decorators as td\n'), ((22127, 22167), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""not supported"""'}), "(reason='not supported')\n", (22143, 22167), False, 'import pytest\n'), ((1249, 1288), 'pytest.skip', 'pytest.skip', (['"""pyarrow is not installed"""'], {}), "('pyarrow is not installed')\n", (1260, 1288), False, 'import pytest\n'), ((1376, 1419), 'pytest.skip', 'pytest.skip', (['"""fastparquet is not installed"""'], {}), "('fastparquet is not installed')\n", (1387, 1419), False, 'import pytest\n'), ((4565, 4590), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4578, 4590), False, 'import pytest\n'), ((4715, 4764), 'pandas.option_context', 'pd.option_context', (['"""io.parquet.engine"""', '"""pyarrow"""'], {}), "('io.parquet.engine', 'pyarrow')\n", (4732, 4764), True, 'import pandas as pd\n'), ((4875, 4928), 'pandas.option_context', 'pd.option_context', (['"""io.parquet.engine"""', '"""fastparquet"""'], {}), "('io.parquet.engine', 'fastparquet')\n", (4892, 4928), True, 'import pandas as pd\n'), ((5045, 5091), 'pandas.option_context', 'pd.option_context', (['"""io.parquet.engine"""', '"""auto"""'], {}), "('io.parquet.engine', 'auto')\n", (5062, 5091), True, 'import pandas as pd\n'), ((5190, 5211), 'pandas.io.parquet.get_engine', 'get_engine', (['"""pyarrow"""'], {}), "('pyarrow')\n", (5200, 5211), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5248, 5273), 'pandas.io.parquet.get_engine', 'get_engine', (['"""fastparquet"""'], {}), "('fastparquet')\n", (5258, 5273), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5302, 5351), 'pandas.option_context', 'pd.option_context', (['"""io.parquet.engine"""', '"""pyarrow"""'], {}), "('io.parquet.engine', 'pyarrow')\n", (5319, 5351), True, 'import pandas as pd\n'), ((5554, 5607), 'pandas.option_context', 'pd.option_context', (['"""io.parquet.engine"""', '"""fastparquet"""'], {}), "('io.parquet.engine', 'fastparquet')\n", (5571, 5607), True, 'import pandas as pd\n'), ((5814, 5860), 'pandas.option_context', 'pd.option_context', (['"""io.parquet.engine"""', '"""auto"""'], {}), "('io.parquet.engine', 'auto')\n", (5831, 5860), True, 'import pandas as pd\n'), ((7912, 7929), 'pandas._testing.ensure_clean', 'tm.ensure_clean', ([], {}), '()\n', (7927, 7929), True, 'import pandas._testing as tm\n'), ((8014, 8043), 'pandas.io.parquet.read_parquet', 'read_parquet', (['path'], {'engine': 'fp'}), '(path, engine=fp)\n', (8026, 8043), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((8052, 8085), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'df'], {}), '(result, df)\n', (8073, 8085), True, 'import pandas._testing as tm\n'), ((8104, 8153), 'pandas.io.parquet.read_parquet', 'read_parquet', (['path'], {'engine': 'fp', 'columns': "['a', 'd']"}), "(path, engine=fp, columns=['a', 'd'])\n", (8116, 8153), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((8162, 8207), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', "df[['a', 'd']]"], {}), "(result, df[['a', 'd']])\n", (8183, 8207), True, 'import pandas._testing as tm\n'), ((8454, 8576), 'pytest.xfail', 'pytest.xfail', (['"""Reading fastparquet with pyarrow in 0.14 fails: https://issues.apache.org/jira/browse/ARROW-6492"""'], {}), "(\n 'Reading fastparquet with pyarrow in 0.14 fails: https://issues.apache.org/jira/browse/ARROW-6492'\n )\n", (8466, 8576), False, 'import pytest\n'), ((8639, 8656), 'pandas._testing.ensure_clean', 'tm.ensure_clean', ([], {}), '()\n', (8654, 8656), True, 'import pandas._testing as tm\n'), ((10636, 10666), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 2, 3]}"], {}), "({'A': [1, 2, 3]})\n", (10648, 10666), True, 'import pandas as pd\n'), ((11158, 11188), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 2, 3]}"], {}), "({'A': [1, 2, 3]})\n", (11170, 11188), True, 'import pandas as pd\n'), ((11942, 11972), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 2, 3]}"], {}), "({'A': [1, 2, 3]})\n", (11954, 11972), True, 'import pandas as pd\n'), ((11989, 12046), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('a', 1), ('a', 2), ('b', 1)]"], {}), "([('a', 1), ('a', 2), ('b', 1)])\n", (12014, 12046), True, 'import pandas as pd\n'), ((12212, 12269), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('a', 1), ('a', 2), ('b', 1)]"], {}), "([('a', 1), ('a', 2), ('b', 1)])\n", (12237, 12269), True, 'import pandas as pd\n'), ((12482, 12536), 'pandas.date_range', 'pd.date_range', (['"""01-Jan-2018"""', '"""01-Dec-2018"""'], {'freq': '"""MS"""'}), "('01-Jan-2018', '01-Dec-2018', freq='MS')\n", (12495, 12536), True, 'import pandas as pd\n'), ((12637, 12723), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Level1', 'Level2'], dates]"], {'names': "['level', 'date']"}), "([['Level1', 'Level2'], dates], names=['level',\n 'date'])\n", (12663, 12723), True, 'import pandas as pd\n'), ((13185, 13237), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3], 'b': ['q', 'r', 's']}"], {}), "({'a': [1, 2, 3], 'b': ['q', 'r', 's']})\n", (13197, 13237), True, 'import pandas as pd\n'), ((13594, 13679), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3], 'b': ['q', 'r', 's']}"], {'index': "['zyx', 'wvu', 'tsr']"}), "({'a': [1, 2, 3], 'b': ['q', 'r', 's']}, index=['zyx', 'wvu',\n 'tsr'])\n", (13606, 13679), True, 'import pandas as pd\n'), ((14398, 14456), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(3)', 'tz': '"""Europe/Brussels"""'}), "('20130101', periods=3, tz='Europe/Brussels')\n", (14411, 14456), True, 'import pandas as pd\n'), ((14806, 14864), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(3)', 'tz': '"""Europe/Brussels"""'}), "('20130101', periods=3, tz='Europe/Brussels')\n", (14819, 14864), True, 'import pandas as pd\n'), ((15904, 15938), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': ['a', 1, 2.0]}"], {}), "({'a': ['a', 1, 2.0]})\n", (15916, 15938), True, 'import pandas as pd\n'), ((16164, 16178), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16176, 16178), True, 'import pandas as pd\n'), ((16511, 16603), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'c', 'a', 'c', 'b']"], {'categories': "['b', 'c', 'd']", 'ordered': '(True)'}), "(['a', 'b', 'c', 'a', 'c', 'b'], categories=['b', 'c', 'd'],\n ordered=True)\n", (16525, 16603), True, 'import pandas as pd\n'), ((19214, 19228), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19226, 19228), True, 'import pandas as pd\n'), ((19342, 19369), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [0, 1]}"], {}), "({'x': [0, 1]})\n", (19354, 19369), True, 'import pandas as pd\n'), ((21876, 21929), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(3)', 'tz': '"""US/Eastern"""'}), "('20130101', periods=3, tz='US/Eastern')\n", (21889, 21929), True, 'import pandas as pd\n'), ((22049, 22087), 'pandas.timedelta_range', 'pd.timedelta_range', (['"""1 day"""'], {'periods': '(3)'}), "('1 day', periods=3)\n", (22067, 22087), True, 'import pandas as pd\n'), ((22458, 22498), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [True, None, False]}"], {}), "({'a': [True, None, False]})\n", (22470, 22498), True, 'import pandas as pd\n'), ((22518, 22574), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1.0, np.nan, 0.0]}"], {'dtype': '"""float16"""'}), "({'a': [1.0, np.nan, 0.0]}, dtype='float16')\n", (22530, 22574), True, 'import pandas as pd\n'), ((22845, 22879), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': ['a', 1, 2.0]}"], {}), "({'a': ['a', 1, 2.0]})\n", (22857, 22879), True, 'import pandas as pd\n'), ((23159, 23174), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (23171, 23174), True, 'import pandas as pd\n'), ((25869, 25883), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (25881, 25883), True, 'import pandas as pd\n'), ((1740, 1776), 'numpy.arange', 'np.arange', (['(4.0)', '(7.0)'], {'dtype': '"""float64"""'}), "(4.0, 7.0, dtype='float64')\n", (1749, 1776), True, 'import numpy as np\n'), ((1833, 1869), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(3)'}), "('20130101', periods=3)\n", (1846, 1869), True, 'import pandas as pd\n'), ((2481, 2517), 'numpy.arange', 'np.arange', (['(4.0)', '(7.0)'], {'dtype': '"""float64"""'}), "(4.0, 7.0, dtype='float64')\n", (2490, 2517), True, 'import numpy as np\n'), ((2634, 2670), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(3)'}), "('20130101', periods=3)\n", (2647, 2670), True, 'import pandas as pd\n'), ((4276, 4368), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['expected', 'actual'], {'check_names': 'check_names', 'check_like': 'check_like'}), '(expected, actual, check_names=check_names, check_like\n =check_like)\n', (4297, 4368), True, 'import pandas._testing as tm\n'), ((4429, 4446), 'pandas._testing.ensure_clean', 'tm.ensure_clean', ([], {}), '()\n', (4444, 4446), True, 'import pandas._testing as tm\n'), ((5379, 5397), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (5389, 5397), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5438, 5459), 'pandas.io.parquet.get_engine', 'get_engine', (['"""pyarrow"""'], {}), "('pyarrow')\n", (5448, 5459), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5500, 5525), 'pandas.io.parquet.get_engine', 'get_engine', (['"""fastparquet"""'], {}), "('fastparquet')\n", (5510, 5525), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5635, 5653), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (5645, 5653), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5698, 5719), 'pandas.io.parquet.get_engine', 'get_engine', (['"""pyarrow"""'], {}), "('pyarrow')\n", (5708, 5719), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5760, 5785), 'pandas.io.parquet.get_engine', 'get_engine', (['"""fastparquet"""'], {}), "('fastparquet')\n", (5770, 5785), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5888, 5906), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (5898, 5906), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((5947, 5968), 'pandas.io.parquet.get_engine', 'get_engine', (['"""pyarrow"""'], {}), "('pyarrow')\n", (5957, 5968), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((6009, 6034), 'pandas.io.parquet.get_engine', 'get_engine', (['"""fastparquet"""'], {}), "('fastparquet')\n", (6019, 6034), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((6517, 6550), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (6529, 6550), False, 'from distutils.version import LooseVersion\n'), ((6553, 6577), 'distutils.version.LooseVersion', 'LooseVersion', (['pa_min_ver'], {}), '(pa_min_ver)\n', (6565, 6577), False, 'from distutils.version import LooseVersion\n'), ((6672, 6709), 'distutils.version.LooseVersion', 'LooseVersion', (['fastparquet.__version__'], {}), '(fastparquet.__version__)\n', (6684, 6709), False, 'from distutils.version import LooseVersion\n'), ((6712, 6736), 'distutils.version.LooseVersion', 'LooseVersion', (['fp_min_ver'], {}), '(fp_min_ver)\n', (6724, 6736), False, 'from distutils.version import LooseVersion\n'), ((8340, 8373), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (8352, 8373), False, 'from distutils.version import LooseVersion\n'), ((8395, 8428), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (8407, 8428), False, 'from distutils.version import LooseVersion\n'), ((8737, 8764), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (8751, 8764), False, 'from warnings import catch_warnings\n'), ((8787, 8816), 'pandas.io.parquet.read_parquet', 'read_parquet', (['path'], {'engine': 'pa'}), '(path, engine=pa)\n', (8799, 8816), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((8829, 8862), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'df'], {}), '(result, df)\n', (8850, 8862), True, 'import pandas._testing as tm\n'), ((8885, 8934), 'pandas.io.parquet.read_parquet', 'read_parquet', (['path'], {'engine': 'pa', 'columns': "['a', 'd']"}), "(path, engine=pa, columns=['a', 'd'])\n", (8897, 8934), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((8947, 8992), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', "df[['a', 'd']]"], {}), "(result, df[['a', 'd']])\n", (8968, 8992), True, 'import pandas._testing as tm\n'), ((9134, 9151), 'pandas._testing.ensure_clean', 'tm.ensure_clean', ([], {}), '()\n', (9149, 9151), True, 'import pandas._testing as tm\n'), ((9353, 9373), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (9362, 9373), True, 'import pandas as pd\n'), ((9421, 9445), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130101"""'], {}), "('20130101')\n", (9433, 9445), True, 'import pandas as pd\n'), ((9459, 9478), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (9467, 9478), True, 'import numpy as np\n'), ((10174, 10209), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(1)', '(1)', '(0)', '(0)'], {}), '(2011, 1, 1, 0, 0)\n', (10191, 10209), False, 'import datetime\n'), ((10223, 10258), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(1)', '(1)', '(1)', '(1)'], {}), '(2011, 1, 1, 1, 1)\n', (10240, 10258), False, 'import datetime\n'), ((10511, 10540), 'pytest.importorskip', 'pytest.importorskip', (['"""snappy"""'], {}), "('snappy')\n", (10530, 10540), False, 'import pytest\n'), ((11282, 11318), 'pandas.date_range', 'pd.date_range', (['"""20130101"""'], {'periods': '(3)'}), "('20130101', periods=3)\n", (11295, 11318), True, 'import pandas as pd\n'), ((12296, 12317), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)'], {}), '(4, 3)\n', (12311, 12317), True, 'import numpy as np\n'), ((15319, 15352), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (15331, 15352), False, 'from distutils.version import LooseVersion\n'), ((15355, 15381), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.15.1.dev"""'], {}), "('0.15.1.dev')\n", (15367, 15381), False, 'from distutils.version import LooseVersion\n'), ((16634, 16667), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (16646, 16667), False, 'from distutils.version import LooseVersion\n'), ((16671, 16693), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.15.0"""'], {}), "('0.15.0')\n", (16683, 16693), False, 'from distutils.version import LooseVersion\n'), ((18218, 18239), 'pandas._testing.ensure_clean_dir', 'tm.ensure_clean_dir', ([], {}), '()\n', (18237, 18239), True, 'import pandas._testing as tm\n'), ((18394, 18440), 'pyarrow.parquet.ParquetDataset', 'pq.ParquetDataset', (['path'], {'validate_schema': '(False)'}), '(path, validate_schema=False)\n', (18411, 18440), True, 'import pyarrow.parquet as pq\n'), ((18771, 18792), 'pandas._testing.ensure_clean_dir', 'tm.ensure_clean_dir', ([], {}), '()\n', (18790, 18792), True, 'import pandas._testing as tm\n'), ((18947, 18993), 'pyarrow.parquet.ParquetDataset', 'pq.ParquetDataset', (['path'], {'validate_schema': '(False)'}), '(path, validate_schema=False)\n', (18964, 18993), True, 'import pyarrow.parquet as pq\n'), ((20034, 20067), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (20046, 20067), False, 'from distutils.version import LooseVersion\n'), ((20071, 20093), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.16.0"""'], {}), "('0.16.0')\n", (20083, 20093), False, 'from distutils.version import LooseVersion\n'), ((20461, 20494), 'distutils.version.LooseVersion', 'LooseVersion', (['pyarrow.__version__'], {}), '(pyarrow.__version__)\n', (20473, 20494), False, 'from distutils.version import LooseVersion\n'), ((20498, 20520), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.16.0"""'], {}), "('0.16.0')\n", (20510, 20520), False, 'from distutils.version import LooseVersion\n'), ((23188, 23205), 'pandas._testing.ensure_clean', 'tm.ensure_clean', ([], {}), '()\n', (23203, 23205), True, 'import pandas._testing as tm\n'), ((23311, 23359), 'pandas.io.parquet.read_parquet', 'read_parquet', (['path', 'fp'], {'filters': "[('a', '==', 0)]"}), "(path, fp, filters=[('a', '==', 0)])\n", (23323, 23359), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((23713, 23734), 'pandas._testing.ensure_clean_dir', 'tm.ensure_clean_dir', ([], {}), '()\n', (23732, 23734), True, 'import pandas._testing as tm\n'), ((23945, 23965), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (23959, 23965), False, 'import os\n'), ((24283, 24304), 'pandas._testing.ensure_clean_dir', 'tm.ensure_clean_dir', ([], {}), '()\n', (24302, 24304), True, 'import pandas._testing as tm\n'), ((24515, 24535), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (24529, 24535), False, 'import os\n'), ((24863, 24884), 'pandas._testing.ensure_clean_dir', 'tm.ensure_clean_dir', ([], {}), '()\n', (24882, 24884), True, 'import pandas._testing as tm\n'), ((25093, 25113), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (25107, 25113), False, 'import os\n'), ((25465, 25490), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (25478, 25490), False, 'import pytest\n'), ((2723, 2747), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130101"""'], {}), "('20130101')\n", (2735, 2747), True, 'import pandas as pd\n'), ((2789, 2813), 'pandas.Timestamp', 'pd.Timestamp', (['"""20130103"""'], {}), "('20130103')\n", (2801, 2813), True, 'import pandas as pd\n'), ((4175, 4202), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (4189, 4202), False, 'from warnings import catch_warnings\n'), ((4229, 4262), 'pandas.io.parquet.read_parquet', 'read_parquet', (['path'], {}), '(path, **read_kwargs)\n', (4241, 4262), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((7135, 7174), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': 'match'}), '(ImportError, match=match)\n', (7148, 7174), False, 'import pytest\n'), ((7192, 7210), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (7202, 7210), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((7302, 7341), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': 'match'}), '(ImportError, match=match)\n', (7315, 7341), False, 'import pytest\n'), ((7359, 7377), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (7369, 7377), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((7516, 7555), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': 'match'}), '(ImportError, match=match)\n', (7529, 7555), False, 'import pytest\n'), ((7573, 7591), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (7583, 7591), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((7687, 7726), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': 'match'}), '(ImportError, match=match)\n', (7700, 7726), False, 'import pytest\n'), ((7744, 7762), 'pandas.io.parquet.get_engine', 'get_engine', (['"""auto"""'], {}), "('auto')\n", (7754, 7762), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((9178, 9196), 'pytest.raises', 'pytest.raises', (['exc'], {}), '(exc)\n', (9191, 9196), False, 'import pytest\n'), ((9214, 9260), 'pandas.io.parquet.to_parquet', 'to_parquet', (['df', 'path', 'engine'], {'compression': 'None'}), '(df, path, engine, compression=None)\n', (9224, 9260), False, 'from pandas.io.parquet import FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet\n'), ((10592, 10621), 'pytest.importorskip', 'pytest.importorskip', (['"""brotli"""'], {}), "('brotli')\n", (10611, 10621), False, 'import pytest\n'), ((15755, 15793), 'pandas.timedelta_range', 'pd.timedelta_range', (['"""1 day"""'], {'periods': '(3)'}), "('1 day', periods=3)\n", (15773, 15793), True, 'import pandas as pd\n'), ((16406, 16448), 'pandas.CategoricalDtype', 'pd.CategoricalDtype', (["['foo', 'bar', 'baz']"], {}), "(['foo', 'bar', 'baz'])\n", (16425, 16448), True, 'import pandas as pd\n'), ((19837, 19872), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'dtype': '"""Int64"""'}), "([1, 2, 3], dtype='Int64')\n", (19846, 19872), True, 'import pandas as pd\n'), ((19895, 19931), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'dtype': '"""UInt32"""'}), "([1, 2, 3], dtype='UInt32')\n", (19904, 19931), True, 'import pandas as pd\n'), ((19954, 19997), 'pandas.Series', 'pd.Series', (["['a', None, 'c']"], {'dtype': '"""string"""'}), "(['a', None, 'c'], dtype='string')\n", (19963, 19997), True, 'import pandas as pd\n'), ((20406, 20447), 'pandas.Series', 'pd.Series', (['[1, 2, 3, None]'], {'dtype': '"""Int64"""'}), "([1, 2, 3, None], dtype='Int64')\n", (20415, 20447), True, 'import pandas as pd\n'), ((21234, 21284), 'pandas.period_range', 'pd.period_range', (['"""2012-01-01"""'], {'periods': '(3)', 'freq': '"""D"""'}), "('2012-01-01', periods=3, freq='D')\n", (21249, 21284), True, 'import pandas as pd\n'), ((21589, 21639), 'pandas.date_range', 'pd.date_range', (['"""2017-01-01"""'], {'freq': '"""1n"""', 'periods': '(10)'}), "('2017-01-01', freq='1n', periods=10)\n", (21602, 21639), True, 'import pandas as pd\n'), ((22714, 22758), 'pandas.period_range', 'pd.period_range', (['"""2013"""'], {'freq': '"""M"""', 'periods': '(3)'}), "('2013', freq='M', periods=3)\n", (22729, 22758), True, 'import pandas as pd\n'), ((24048, 24084), 'fastparquet.ParquetFile', 'fastparquet.ParquetFile', (['path', '(False)'], {}), '(path, False)\n', (24071, 24084), False, 'import fastparquet\n'), ((24618, 24654), 'fastparquet.ParquetFile', 'fastparquet.ParquetFile', (['path', '(False)'], {}), '(path, False)\n', (24641, 24654), False, 'import fastparquet\n'), ((25196, 25232), 'fastparquet.ParquetFile', 'fastparquet.ParquetFile', (['path', '(False)'], {}), '(path, False)\n', (25219, 25232), False, 'import fastparquet\n'), ((25509, 25530), 'pandas._testing.ensure_clean_dir', 'tm.ensure_clean_dir', ([], {}), '()\n', (25528, 25530), True, 'import pandas._testing as tm\n'), ((832, 917), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _HAVE_FASTPARQUET)'], {'reason': '"""fastparquet is not installed"""'}), "(not _HAVE_FASTPARQUET, reason='fastparquet is not installed'\n )\n", (850, 917), False, 'import pytest\n'), ((1018, 1090), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _HAVE_PYARROW)'], {'reason': '"""pyarrow is not installed"""'}), "(not _HAVE_PYARROW, reason='pyarrow is not installed')\n", (1036, 1090), False, 'import pytest\n'), ((2430, 2445), 'numpy.arange', 'np.arange', (['(3)', '(6)'], {}), '(3, 6)\n', (2439, 2445), True, 'import numpy as np\n'), ((15501, 15545), 'pandas.period_range', 'pd.period_range', (['"""2013"""'], {'freq': '"""M"""', 'periods': '(3)'}), "('2013', freq='M', periods=3)\n", (15516, 15545), True, 'import pandas as pd\n'), ((17975, 17986), 'pandas.io.s3.get_fs', 'get_s3_fs', ([], {}), '()\n', (17984, 17986), True, 'from pandas.io.s3 import get_fs as get_s3_fs\n'), ((19427, 19442), 'pyarrow.bool_', 'pyarrow.bool_', ([], {}), '()\n', (19440, 19442), False, 'import pyarrow\n'), ((15160, 15173), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (15169, 15173), True, 'import numpy as np\n'), ((22294, 22307), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (22303, 22307), True, 'import numpy as np\n')] |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014).
See https://arxiv.org/abs/1505.05424 for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl import flags
from bandits.core.bayesian_nn import BayesianNN
FLAGS = flags.FLAGS
def log_gaussian(x, mu, sigma, reduce_sum=True):
"""Returns log Gaussian pdf."""
res = (-0.5 * np.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) /
(2 * tf.square(sigma)))
if reduce_sum:
return tf.reduce_sum(res)
else:
return res
def analytic_kl(mu_1, sigma_1, mu_2, sigma_2):
"""KL for two Gaussian distributions with diagonal covariance matrix."""
sigma_1_sq = tf.square(sigma_1)
sigma_2_sq = tf.square(sigma_2)
t1 = tf.square(mu_1 - mu_2) / (2. * sigma_2_sq)
t2 = (sigma_1_sq/sigma_2_sq - 1. - tf.log(sigma_1_sq) + tf.log(sigma_2_sq))/2.
return tf.reduce_sum(t1 + t2)
class VariationalNeuralBanditModel(BayesianNN):
"""Implements an approximate Bayesian NN using Variational Inference."""
def __init__(self, hparams, name="BBBNN"):
self.name = name
self.hparams = hparams
self.n_in = self.hparams.context_dim
self.n_out = self.hparams.num_actions
self.layers = self.hparams.layer_sizes
self.init_scale = self.hparams.init_scale
self.f_num_points = None
if "f_num_points" in hparams:
self.f_num_points = self.hparams.f_num_points
self.cleared_times_trained = self.hparams.cleared_times_trained
self.initial_training_steps = self.hparams.initial_training_steps
self.training_schedule = np.linspace(self.initial_training_steps,
self.hparams.training_epochs,
self.cleared_times_trained)
self.verbose = getattr(self.hparams, "verbose", True)
self.weights_m = {}
self.weights_std = {}
self.biases_m = {}
self.biases_std = {}
self.times_trained = 0
if self.hparams.use_sigma_exp_transform:
self.sigma_transform = tf.exp
self.inverse_sigma_transform = np.log
else:
self.sigma_transform = tf.nn.softplus
self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y))
# Whether to use the local reparameterization trick to compute the loss.
# See details in https://arxiv.org/abs/1506.02557
self.use_local_reparameterization = True
self.build_graph()
def build_mu_variable(self, shape):
"""Returns a mean variable initialized as N(0, 0.05)."""
return tf.Variable(tf.random_normal(shape, 0.0, 0.05))
def build_sigma_variable(self, shape, init=-5.):
"""Returns a sigma variable initialized as N(init, 0.05)."""
# Initialize sigma to be very small initially to encourage MAP opt first
return tf.Variable(tf.random_normal(shape, init, 0.05))
def build_layer(self, input_x, input_x_local, shape,
layer_id, activation_fn=tf.nn.relu):
"""Builds a variational layer, and computes KL term.
Args:
input_x: Input to the variational layer.
input_x_local: Input when the local reparameterization trick was applied.
shape: [number_inputs, number_outputs] for the layer.
layer_id: Number of layer in the architecture.
activation_fn: Activation function to apply.
Returns:
output_h: Output of the variational layer.
output_h_local: Output when local reparameterization trick was applied.
neg_kl: Negative KL term for the layer.
"""
w_mu = self.build_mu_variable(shape)
w_sigma = self.sigma_transform(self.build_sigma_variable(shape))
w_noise = tf.random_normal(shape)
w = w_mu + w_sigma * w_noise
b_mu = self.build_mu_variable([1, shape[1]])
b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]]))
b = b_mu
# Store means and stds
self.weights_m[layer_id] = w_mu
self.weights_std[layer_id] = w_sigma
self.biases_m[layer_id] = b_mu
self.biases_std[layer_id] = b_sigma
# Create outputs
output_h = activation_fn(tf.matmul(input_x, w) + b)
if self.use_local_reparameterization:
# Use analytic KL divergence wrt the prior
neg_kl = -analytic_kl(w_mu, w_sigma,
0., tf.to_float(np.sqrt(2./shape[0])))
else:
# Create empirical KL loss terms
log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0])))
log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma))
neg_kl = log_p - log_q
# Apply local reparameterization trick: sample activations pre nonlinearity
m_h = tf.matmul(input_x_local, w_mu) + b
v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma))
output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h))
output_h_local = activation_fn(output_h_local)
return output_h, output_h_local, neg_kl
def build_action_noise(self):
"""Defines a model for additive noise per action, and its KL term."""
# Define mean and std variables (log-normal dist) for each action.
noise_sigma_mu = (self.build_mu_variable([1, self.n_out])
+ self.inverse_sigma_transform(self.hparams.noise_sigma))
noise_sigma_sigma = self.sigma_transform(
self.build_sigma_variable([1, self.n_out]))
pre_noise_sigma = (noise_sigma_mu
+ tf.random_normal([1, self.n_out]) * noise_sigma_sigma)
self.noise_sigma = self.sigma_transform(pre_noise_sigma)
# Compute KL for additive noise sigma terms.
if getattr(self.hparams, "infer_noise_sigma", False):
neg_kl_term = log_gaussian(
pre_noise_sigma,
self.inverse_sigma_transform(self.hparams.noise_sigma),
self.hparams.prior_sigma
)
neg_kl_term -= log_gaussian(pre_noise_sigma,
noise_sigma_mu,
noise_sigma_sigma)
else:
neg_kl_term = 0.
return neg_kl_term
def build_model(self, activation_fn=tf.nn.relu):
"""Defines the actual NN model with fully connected layers.
The loss is computed for partial feedback settings (bandits), so only
the observed outcome is backpropagated (see weighted loss).
Selects the optimizer and, finally, it also initializes the graph.
Args:
activation_fn: the activation function used in the nn layers.
"""
if self.verbose:
print("Initializing model {}.".format(self.name))
neg_kl_term, l_number = 0, 0
use_local_reparameterization = self.use_local_reparameterization
# Compute model additive noise for each action with log-normal distribution
neg_kl_term += self.build_action_noise()
# Build network.
input_x = self.x
input_local = self.x
n_in = self.n_in
for l_number, n_nodes in enumerate(self.layers):
if n_nodes > 0:
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, n_nodes], l_number)
neg_kl_term += neg_kl
input_x, input_local = h, h_local
n_in = n_nodes
# Create last linear layer
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, self.n_out],
l_number + 1,
activation_fn=lambda x: x)
neg_kl_term += neg_kl
self.y_pred = h
self.y_pred_local = h_local
# Compute log likelihood (with learned or fixed noise level)
if getattr(self.hparams, "infer_noise_sigma", False):
log_likelihood = log_gaussian(
self.y, self.y_pred_local, self.noise_sigma, reduce_sum=False)
else:
y_hat = self.y_pred_local if use_local_reparameterization else self.y_pred
log_likelihood = log_gaussian(
self.y, y_hat, self.hparams.noise_sigma, reduce_sum=False)
# Only take into account observed outcomes (bandits setting)
batch_size = tf.to_float(tf.shape(self.x)[0])
weighted_log_likelihood = tf.reduce_sum(
log_likelihood * self.weights) / batch_size
# The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL
elbo = weighted_log_likelihood + (neg_kl_term / self.n)
self.loss = -elbo
self.global_step = tf.train.get_or_create_global_step()
self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize(
self.loss, global_step=self.global_step)
# Create tensorboard metrics
self.create_summaries()
self.summary_writer = tf.summary.FileWriter(
"{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph)
def build_graph(self):
"""Defines graph, session, placeholders, and model.
Placeholders are: n (size of the dataset), x and y (context and observed
reward for each action), and weights (one-hot encoding of selected action
for each context, i.e., only possibly non-zero element in each y).
"""
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
self.n = tf.placeholder(shape=[], dtype=tf.float32)
self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32)
self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32)
self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32)
self.build_model()
self.sess.run(tf.global_variables_initializer())
def create_summaries(self):
"""Defines summaries including mean loss, and global step."""
with self.graph.as_default():
with tf.name_scope(self.name + "_summaries"):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("global_step", self.global_step)
self.summary_op = tf.summary.merge_all()
def assign_lr(self):
"""Resets the learning rate in dynamic schedules for subsequent trainings.
In bandits settings, we do expand our dataset over time. Then, we need to
re-train the network with the new data. The algorithms that do not keep
the step constant, can reset it at the start of each *training* process.
"""
decay_steps = 1
if self.hparams.activate_decay:
current_gs = self.sess.run(self.global_step)
with self.graph.as_default():
self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr,
self.global_step - current_gs,
decay_steps,
self.hparams.lr_decay_rate)
def train(self, data, num_steps):
"""Trains the BNN for num_steps, using the data in 'data'.
Args:
data: ContextualDataset object that provides the data.
num_steps: Number of minibatches to train the network for.
Returns:
losses: Loss history during training.
"""
if self.times_trained < self.cleared_times_trained:
num_steps = int(self.training_schedule[self.times_trained])
self.times_trained += 1
losses = []
with self.graph.as_default():
if self.verbose:
print("Training {} for {} steps...".format(self.name, num_steps))
for step in range(num_steps):
x, y, weights = data.get_batch_with_weights(self.hparams.batch_size)
_, summary, global_step, loss = self.sess.run(
[self.train_op, self.summary_op, self.global_step, self.loss],
feed_dict={
self.x: x,
self.y: y,
self.weights: weights,
self.n: data.num_points(self.f_num_points),
})
losses.append(loss)
if step % self.hparams.freq_summary == 0:
if self.hparams.show_training:
print("{} | step: {}, loss: {}".format(
self.name, global_step, loss))
self.summary_writer.add_summary(summary, global_step)
return losses
| [
"tensorflow.shape",
"numpy.sqrt",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.train.inverse_time_decay",
"tensorflow.log",
"tensorflow.Graph",
"tensorflow.random_normal",
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.exp",
"numpy.linspace",
"tensorflow.matmul",
"tensorflow.s... | [((1513, 1531), 'tensorflow.square', 'tf.square', (['sigma_1'], {}), '(sigma_1)\n', (1522, 1531), True, 'import tensorflow as tf\n'), ((1548, 1566), 'tensorflow.square', 'tf.square', (['sigma_2'], {}), '(sigma_2)\n', (1557, 1566), True, 'import tensorflow as tf\n'), ((1712, 1734), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(t1 + t2)'], {}), '(t1 + t2)\n', (1725, 1734), True, 'import tensorflow as tf\n'), ((1325, 1343), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res'], {}), '(res)\n', (1338, 1343), True, 'import tensorflow as tf\n'), ((1577, 1599), 'tensorflow.square', 'tf.square', (['(mu_1 - mu_2)'], {}), '(mu_1 - mu_2)\n', (1586, 1599), True, 'import tensorflow as tf\n'), ((2432, 2535), 'numpy.linspace', 'np.linspace', (['self.initial_training_steps', 'self.hparams.training_epochs', 'self.cleared_times_trained'], {}), '(self.initial_training_steps, self.hparams.training_epochs, self\n .cleared_times_trained)\n', (2443, 2535), True, 'import numpy as np\n'), ((4510, 4533), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {}), '(shape)\n', (4526, 4533), True, 'import tensorflow as tf\n'), ((9290, 9326), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (9324, 9326), True, 'import tensorflow as tf\n'), ((9989, 9999), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9997, 9999), True, 'import tensorflow as tf\n'), ((1225, 1238), 'tensorflow.log', 'tf.log', (['sigma'], {}), '(sigma)\n', (1231, 1238), True, 'import tensorflow as tf\n'), ((1241, 1258), 'tensorflow.square', 'tf.square', (['(x - mu)'], {}), '(x - mu)\n', (1250, 1258), True, 'import tensorflow as tf\n'), ((1679, 1697), 'tensorflow.log', 'tf.log', (['sigma_2_sq'], {}), '(sigma_2_sq)\n', (1685, 1697), True, 'import tensorflow as tf\n'), ((3404, 3438), 'tensorflow.random_normal', 'tf.random_normal', (['shape', '(0.0)', '(0.05)'], {}), '(shape, 0.0, 0.05)\n', (3420, 3438), True, 'import tensorflow as tf\n'), ((3662, 3697), 'tensorflow.random_normal', 'tf.random_normal', (['shape', 'init', '(0.05)'], {}), '(shape, init, 0.05)\n', (3678, 3697), True, 'import tensorflow as tf\n'), ((5513, 5543), 'tensorflow.matmul', 'tf.matmul', (['input_x_local', 'w_mu'], {}), '(input_x_local, w_mu)\n', (5522, 5543), True, 'import tensorflow as tf\n'), ((5569, 5593), 'tensorflow.square', 'tf.square', (['input_x_local'], {}), '(input_x_local)\n', (5578, 5593), True, 'import tensorflow as tf\n'), ((5595, 5613), 'tensorflow.square', 'tf.square', (['w_sigma'], {}), '(w_sigma)\n', (5604, 5613), True, 'import tensorflow as tf\n'), ((9028, 9072), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_likelihood * self.weights)'], {}), '(log_likelihood * self.weights)\n', (9041, 9072), True, 'import tensorflow as tf\n'), ((10056, 10068), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10066, 10068), True, 'import tensorflow as tf\n'), ((10087, 10129), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[]', 'dtype': 'tf.float32'}), '(shape=[], dtype=tf.float32)\n', (10101, 10129), True, 'import tensorflow as tf\n'), ((10148, 10205), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.n_in]', 'dtype': 'tf.float32'}), '(shape=[None, self.n_in], dtype=tf.float32)\n', (10162, 10205), True, 'import tensorflow as tf\n'), ((10222, 10280), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.n_out]', 'dtype': 'tf.float32'}), '(shape=[None, self.n_out], dtype=tf.float32)\n', (10236, 10280), True, 'import tensorflow as tf\n'), ((10303, 10361), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.n_out]', 'dtype': 'tf.float32'}), '(shape=[None, self.n_out], dtype=tf.float32)\n', (10317, 10361), True, 'import tensorflow as tf\n'), ((1205, 1222), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1211, 1222), True, 'import numpy as np\n'), ((1276, 1292), 'tensorflow.square', 'tf.square', (['sigma'], {}), '(sigma)\n', (1285, 1292), True, 'import tensorflow as tf\n'), ((1658, 1676), 'tensorflow.log', 'tf.log', (['sigma_1_sq'], {}), '(sigma_1_sq)\n', (1664, 1676), True, 'import tensorflow as tf\n'), ((4952, 4973), 'tensorflow.matmul', 'tf.matmul', (['input_x', 'w'], {}), '(input_x, w)\n', (4961, 4973), True, 'import tensorflow as tf\n'), ((5338, 5360), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['w_mu'], {}), '(w_mu)\n', (5354, 5360), True, 'import tensorflow as tf\n'), ((5362, 5387), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['w_sigma'], {}), '(w_sigma)\n', (5378, 5387), True, 'import tensorflow as tf\n'), ((5643, 5663), 'tensorflow.sqrt', 'tf.sqrt', (['(v_h + 1e-06)'], {}), '(v_h + 1e-06)\n', (5650, 5663), True, 'import tensorflow as tf\n'), ((6291, 6324), 'tensorflow.random_normal', 'tf.random_normal', (['[1, self.n_out]'], {}), '([1, self.n_out])\n', (6307, 6324), True, 'import tensorflow as tf\n'), ((8976, 8992), 'tensorflow.shape', 'tf.shape', (['self.x'], {}), '(self.x)\n', (8984, 8992), True, 'import tensorflow as tf\n'), ((9348, 9395), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.hparams.initial_lr'], {}), '(self.hparams.initial_lr)\n', (9370, 9395), True, 'import tensorflow as tf\n'), ((10411, 10444), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10442, 10444), True, 'import tensorflow as tf\n'), ((10595, 10634), 'tensorflow.name_scope', 'tf.name_scope', (["(self.name + '_summaries')"], {}), "(self.name + '_summaries')\n", (10608, 10634), True, 'import tensorflow as tf\n'), ((10645, 10681), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (10662, 10681), True, 'import tensorflow as tf\n'), ((10691, 10741), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""global_step"""', 'self.global_step'], {}), "('global_step', self.global_step)\n", (10708, 10741), True, 'import tensorflow as tf\n'), ((10769, 10791), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (10789, 10791), True, 'import tensorflow as tf\n'), ((11311, 11439), 'tensorflow.train.inverse_time_decay', 'tf.train.inverse_time_decay', (['self.hparams.initial_lr', '(self.global_step - current_gs)', 'decay_steps', 'self.hparams.lr_decay_rate'], {}), '(self.hparams.initial_lr, self.global_step -\n current_gs, decay_steps, self.hparams.lr_decay_rate)\n', (11338, 11439), True, 'import tensorflow as tf\n'), ((5284, 5307), 'numpy.sqrt', 'np.sqrt', (['(2.0 / shape[0])'], {}), '(2.0 / shape[0])\n', (5291, 5307), True, 'import numpy as np\n'), ((5682, 5695), 'tensorflow.shape', 'tf.shape', (['v_h'], {}), '(v_h)\n', (5690, 5695), True, 'import tensorflow as tf\n'), ((5163, 5186), 'numpy.sqrt', 'np.sqrt', (['(2.0 / shape[0])'], {}), '(2.0 / shape[0])\n', (5170, 5186), True, 'import numpy as np\n'), ((3058, 3068), 'numpy.exp', 'np.exp', (['(-y)'], {}), '(-y)\n', (3064, 3068), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for data utility functions."""
import numpy as np
import tensorflow as tf
from poem.core import data_utils
class DataUtilsTest(tf.test.TestCase):
def test_flatten_last_dims(self):
# Shape = [2, 3, 4].
x = tf.constant([[[1, 2, 3, 4], [11, 12, 13, 14], [21, 22, 23, 24]],
[[31, 32, 33, 34], [41, 42, 43, 44], [51, 52, 53, 54]]])
flattened_x = data_utils.flatten_last_dims(x, num_last_dims=2)
self.assertAllEqual(flattened_x,
[[1, 2, 3, 4, 11, 12, 13, 14, 21, 22, 23, 24],
[31, 32, 33, 34, 41, 42, 43, 44, 51, 52, 53, 54]])
def test_flatten_first_dims(self):
# Shape = [1, 2, 3, 4, 1].
x = tf.constant([[[[[1], [2], [3], [4]], [[11], [12], [13], [14]],
[[21], [22], [23], [24]]],
[[[31], [32], [33], [34]], [[41], [42], [43], [44]],
[[51], [52], [53], [54]]]]])
flattened_x = data_utils.flatten_first_dims(x, num_last_dims_to_keep=2)
self.assertAllEqual(flattened_x,
[[[1], [2], [3], [4]], [[11], [12], [13], [14]],
[[21], [22], [23], [24]], [[31], [32], [33], [34]],
[[41], [42], [43], [44]], [[51], [52], [53], [54]]])
def test_tile_first_dims(self):
# Shape = [1, 2, 1].
x = tf.constant([[[1], [2]]])
tiled_x = data_utils.tile_first_dims(x, first_dim_multiples=[2, 2])
self.assertAllEqual(tiled_x, [[[1], [2], [1], [2]], [[1], [2], [1], [2]]])
def test_tile_last_dims(self):
# Shape = [2, 1, 2, 1].
x = tf.constant([[[[1], [2]]], [[[3], [4]]]])
tiled_x = data_utils.tile_last_dims(x, last_dim_multiples=[2, 2])
self.assertAllEqual(tiled_x, [[[[1, 1], [2, 2], [1, 1], [2, 2]]],
[[[3, 3], [4, 4], [3, 3], [4, 4]]]])
def test_recursively_expand_dims(self):
# Shape = [2, 3].
x = tf.constant([[1, 2, 3], [4, 5, 6]])
# Shape = [2, 1, 3, 1]
expanded_x = data_utils.recursively_expand_dims(x, axes=[-1, 1])
self.assertAllEqual(expanded_x, [[[[1], [2], [3]]], [[[4], [5], [6]]]])
def test_reshape_by_last_dims(self):
# Shape = [2, 4, 1].
x = tf.constant([[[1], [2], [3], [4]], [[5], [6], [7], [8]]])
# Shape = [2, 2, 2]
reshaped_x = data_utils.reshape_by_last_dims(x, last_dim_shape=[2, 2])
self.assertAllEqual(reshaped_x, [[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
def test_reduce_mean(self):
# Shape = [2, 3, 2].
tensor = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]])
# Shape = [2, 3, 1].
weights = tf.constant([[[1.0], [0.0], [1.0]], [[0.0], [1.0], [0.0]]])
# Shape = [2, 1, 2].
means = data_utils.reduce_weighted_mean(
tensor, weights, axis=-2, keepdims=True)
self.assertAllClose(means, [[[3.0, 4.0]], [[9.0, 10.0]]])
def test_sample_gaussians(self):
means = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
stddevs = tf.constant([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
samples = data_utils.sample_gaussians(
means, stddevs, num_samples=10, seed=1)
self.assertAllClose(
samples,
[[[0.9188682, 2.2969198, 3.0195987], [0.75572956, 2.0198498, 3.1773672],
[1.0592823, 1.5754141, 2.783131], [0.99437296, 2.1287088, 2.9207027],
[1.1856633, 2.1135683, 2.8851492], [0.85146564, 2.2523541, 2.9924083],
[0.973537, 2.3065627, 2.4771068], [0.95621073, 1.886798, 3.0962007],
[1.1132832, 1.5443486, 3.1448436], [0.8687291, 2.0713701, 2.480915]],
[[3.983933, 5.449831, 5.1716466], [4.592585, 4.8772526, 5.5604115],
[3.9216413, 5.035854, 6.3797884], [3.3715236, 5.6646905, 5.2959795],
[4.012618, 5.2385263, 6.262165], [3.8732765, 4.774625, 4.9163604],
[4.0499597, 4.6146727, 5.552255], [3.8872187, 4.020592, 5.7974334],
[4.4120793, 5.756701, 6.1350946], [3.8857353, 5.134413, 7.0477266]]])
def test_compute_lower_percentile_means(self):
# Shape = [2, 3, 3].
x = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
[[11.0, 12.0, 13.0], [14.0, 15.0, 16.0],
[17.0, 18.0, 19.0]]])
lower_half = data_utils.compute_lower_percentile_means(x, axis=[-2, -1])
self.assertAllClose(lower_half, [3.0, 13.0])
def test_mix_pair_batch_evenly(self):
lhs_pairs = tf.constant([
[[1.0], [2.0]],
[[1.1], [2.1]],
[[3.0], [4.0]],
[[3.1], [4.1]],
[[5.0], [6.0]],
[[5.1], [6.1]],
[[7.0], [8.0]],
[[7.1], [8.1]],
])
rhs_pairs = tf.constant([
[[11.0], [12.0]],
[[11.1], [12.1]],
[[13.0], [14.0]],
[[13.1], [14.1]],
[[15.0], [16.0]],
[[15.1], [16.1]],
[[17.0], [18.0]],
[[17.1], [18.1]],
])
mixed_batch = data_utils.mix_pair_batch(lhs_pairs, rhs_pairs, axis=1)
self.assertAllEqual(
mixed_batch,
np.array([
[[1.0], [2.0]],
[[1.1], [2.1]],
[[13.0], [14.0]],
[[13.1], [14.1]],
[[5.0], [16.0]],
[[5.1], [16.1]],
[[17.0], [8.0]],
[[17.1], [8.1]],
],
dtype=np.float32))
def test_mix_pair_batch_porportionally(self):
lhs_pairs = tf.constant([
[[1.0], [2.0]],
[[1.1], [2.1]],
[[3.0], [4.0]],
[[3.1], [4.1]],
[[5.0], [6.0]],
[[5.1], [6.1]],
[[7.0], [8.0]],
[[7.1], [8.1]],
])
rhs_pairs = tf.constant([
[[11.0], [12.0]],
[[11.1], [12.1]],
[[13.0], [14.0]],
[[13.1], [14.1]],
[[15.0], [16.0]],
[[15.1], [16.1]],
[[17.0], [18.0]],
[[17.1], [18.1]],
])
mixed_batch = data_utils.mix_pair_batch(
lhs_pairs, rhs_pairs, axis=1, sub_batch_ratios=(1.0, 4.0, 1.0, 2.0))
self.assertAllEqual(
mixed_batch,
np.array([
[[1.0], [2.0]],
[[11.1], [12.1]],
[[13.0], [14.0]],
[[13.1], [14.1]],
[[15.0], [16.0]],
[[5.1], [16.1]],
[[17.0], [8.0]],
[[17.1], [8.1]],
],
dtype=np.float32))
def test_shuffle_batches(self):
# Shape = [3, 2].
tensor_1 = tf.constant([[1, 2], [3, 4], [5, 6]])
tensor_2 = tf.constant([[11, 12], [13, 14], [15, 16]])
tensor_3 = tf.constant([[21, 22], [23, 24], [25, 26]])
shuffled_tensor_1, shuffled_tensor_2, shuffled_tensor_3 = (
data_utils.shuffle_batches([tensor_1, tensor_2, tensor_3]))
tensor_diff_21 = shuffled_tensor_2 - shuffled_tensor_1
tensor_diff_31 = shuffled_tensor_3 - shuffled_tensor_1
self.assertAllEqual(tensor_diff_21, [[10, 10], [10, 10], [10, 10]])
self.assertAllEqual(tensor_diff_31, [[20, 20], [20, 20], [20, 20]])
def test_update_sub_tensor(self):
# Shape = [3, 5, 2].
x = tf.constant([
[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]],
[[10.0, 11.0], [12.0, 13.0], [14.0, 15.0], [16.0, 17.0], [18.0, 19.0]],
[[20.0, 21.0], [22.0, 23.0], [24.0, 25.0], [26.0, 27.0], [28.0, 29.0]],
])
def update_func(sub_tensor):
# Shape = [3, 3, 2].
delta = tf.constant([[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
[[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]],
[[1.3, 1.4], [1.5, 1.6], [1.7, 1.8]]])
return sub_tensor + delta
updated_x = data_utils.update_sub_tensor(
x, indices=[0, 2, 4], axis=-2, update_func=update_func)
self.assertAllClose(updated_x, [
[[0.1, 1.2], [2.0, 3.0], [4.3, 5.4], [6.0, 7.0], [8.5, 9.6]],
[[10.7, 11.8], [12.0, 13.0], [14.9, 16.0], [16.0, 17.0], [19.1, 20.2]],
[[21.3, 22.4], [22.0, 23.0], [25.5, 26.6], [26.0, 27.0], [29.7, 30.8]],
])
if __name__ == '__main__':
tf.test.main()
| [
"poem.core.data_utils.compute_lower_percentile_means",
"poem.core.data_utils.reduce_weighted_mean",
"poem.core.data_utils.reshape_by_last_dims",
"tensorflow.test.main",
"poem.core.data_utils.tile_last_dims",
"poem.core.data_utils.mix_pair_batch",
"poem.core.data_utils.shuffle_batches",
"numpy.array",
... | [((8549, 8563), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (8561, 8563), True, 'import tensorflow as tf\n'), ((837, 963), 'tensorflow.constant', 'tf.constant', (['[[[1, 2, 3, 4], [11, 12, 13, 14], [21, 22, 23, 24]], [[31, 32, 33, 34], [41,\n 42, 43, 44], [51, 52, 53, 54]]]'], {}), '([[[1, 2, 3, 4], [11, 12, 13, 14], [21, 22, 23, 24]], [[31, 32, \n 33, 34], [41, 42, 43, 44], [51, 52, 53, 54]]])\n', (848, 963), True, 'import tensorflow as tf\n'), ((998, 1046), 'poem.core.data_utils.flatten_last_dims', 'data_utils.flatten_last_dims', (['x'], {'num_last_dims': '(2)'}), '(x, num_last_dims=2)\n', (1026, 1046), False, 'from poem.core import data_utils\n'), ((1308, 1488), 'tensorflow.constant', 'tf.constant', (['[[[[[1], [2], [3], [4]], [[11], [12], [13], [14]], [[21], [22], [23], [24]]\n ], [[[31], [32], [33], [34]], [[41], [42], [43], [44]], [[51], [52], [\n 53], [54]]]]]'], {}), '([[[[[1], [2], [3], [4]], [[11], [12], [13], [14]], [[21], [22],\n [23], [24]]], [[[31], [32], [33], [34]], [[41], [42], [43], [44]], [[51\n ], [52], [53], [54]]]]])\n', (1319, 1488), True, 'import tensorflow as tf\n'), ((1566, 1623), 'poem.core.data_utils.flatten_first_dims', 'data_utils.flatten_first_dims', (['x'], {'num_last_dims_to_keep': '(2)'}), '(x, num_last_dims_to_keep=2)\n', (1595, 1623), False, 'from poem.core import data_utils\n'), ((1957, 1982), 'tensorflow.constant', 'tf.constant', (['[[[1], [2]]]'], {}), '([[[1], [2]]])\n', (1968, 1982), True, 'import tensorflow as tf\n'), ((1997, 2054), 'poem.core.data_utils.tile_first_dims', 'data_utils.tile_first_dims', (['x'], {'first_dim_multiples': '[2, 2]'}), '(x, first_dim_multiples=[2, 2])\n', (2023, 2054), False, 'from poem.core import data_utils\n'), ((2204, 2245), 'tensorflow.constant', 'tf.constant', (['[[[[1], [2]]], [[[3], [4]]]]'], {}), '([[[[1], [2]]], [[[3], [4]]]])\n', (2215, 2245), True, 'import tensorflow as tf\n'), ((2260, 2315), 'poem.core.data_utils.tile_last_dims', 'data_utils.tile_last_dims', (['x'], {'last_dim_multiples': '[2, 2]'}), '(x, last_dim_multiples=[2, 2])\n', (2285, 2315), False, 'from poem.core import data_utils\n'), ((2530, 2565), 'tensorflow.constant', 'tf.constant', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (2541, 2565), True, 'import tensorflow as tf\n'), ((2610, 2661), 'poem.core.data_utils.recursively_expand_dims', 'data_utils.recursively_expand_dims', (['x'], {'axes': '[-1, 1]'}), '(x, axes=[-1, 1])\n', (2644, 2661), False, 'from poem.core import data_utils\n'), ((2811, 2868), 'tensorflow.constant', 'tf.constant', (['[[[1], [2], [3], [4]], [[5], [6], [7], [8]]]'], {}), '([[[1], [2], [3], [4]], [[5], [6], [7], [8]]])\n', (2822, 2868), True, 'import tensorflow as tf\n'), ((2910, 2967), 'poem.core.data_utils.reshape_by_last_dims', 'data_utils.reshape_by_last_dims', (['x'], {'last_dim_shape': '[2, 2]'}), '(x, last_dim_shape=[2, 2])\n', (2941, 2967), False, 'from poem.core import data_utils\n'), ((3111, 3207), 'tensorflow.constant', 'tf.constant', (['[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]]'], {}), '([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[7.0, 8.0], [9.0, 10.0],\n [11.0, 12.0]]])\n', (3122, 3207), True, 'import tensorflow as tf\n'), ((3269, 3328), 'tensorflow.constant', 'tf.constant', (['[[[1.0], [0.0], [1.0]], [[0.0], [1.0], [0.0]]]'], {}), '([[[1.0], [0.0], [1.0]], [[0.0], [1.0], [0.0]]])\n', (3280, 3328), True, 'import tensorflow as tf\n'), ((3366, 3438), 'poem.core.data_utils.reduce_weighted_mean', 'data_utils.reduce_weighted_mean', (['tensor', 'weights'], {'axis': '(-2)', 'keepdims': '(True)'}), '(tensor, weights, axis=-2, keepdims=True)\n', (3397, 3438), False, 'from poem.core import data_utils\n'), ((3559, 3606), 'tensorflow.constant', 'tf.constant', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n', (3570, 3606), True, 'import tensorflow as tf\n'), ((3621, 3668), 'tensorflow.constant', 'tf.constant', (['[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]'], {}), '([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])\n', (3632, 3668), True, 'import tensorflow as tf\n'), ((3683, 3750), 'poem.core.data_utils.sample_gaussians', 'data_utils.sample_gaussians', (['means', 'stddevs'], {'num_samples': '(10)', 'seed': '(1)'}), '(means, stddevs, num_samples=10, seed=1)\n', (3710, 3750), False, 'from poem.core import data_utils\n'), ((4677, 4810), 'tensorflow.constant', 'tf.constant', (['[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[11.0, 12.0, 13.0],\n [14.0, 15.0, 16.0], [17.0, 18.0, 19.0]]]'], {}), '([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[11.0, \n 12.0, 13.0], [14.0, 15.0, 16.0], [17.0, 18.0, 19.0]]])\n', (4688, 4810), True, 'import tensorflow as tf\n'), ((4866, 4925), 'poem.core.data_utils.compute_lower_percentile_means', 'data_utils.compute_lower_percentile_means', (['x'], {'axis': '[-2, -1]'}), '(x, axis=[-2, -1])\n', (4907, 4925), False, 'from poem.core import data_utils\n'), ((5032, 5177), 'tensorflow.constant', 'tf.constant', (['[[[1.0], [2.0]], [[1.1], [2.1]], [[3.0], [4.0]], [[3.1], [4.1]], [[5.0], [\n 6.0]], [[5.1], [6.1]], [[7.0], [8.0]], [[7.1], [8.1]]]'], {}), '([[[1.0], [2.0]], [[1.1], [2.1]], [[3.0], [4.0]], [[3.1], [4.1]],\n [[5.0], [6.0]], [[5.1], [6.1]], [[7.0], [8.0]], [[7.1], [8.1]]])\n', (5043, 5177), True, 'import tensorflow as tf\n'), ((5261, 5426), 'tensorflow.constant', 'tf.constant', (['[[[11.0], [12.0]], [[11.1], [12.1]], [[13.0], [14.0]], [[13.1], [14.1]], [[\n 15.0], [16.0]], [[15.1], [16.1]], [[17.0], [18.0]], [[17.1], [18.1]]]'], {}), '([[[11.0], [12.0]], [[11.1], [12.1]], [[13.0], [14.0]], [[13.1],\n [14.1]], [[15.0], [16.0]], [[15.1], [16.1]], [[17.0], [18.0]], [[17.1],\n [18.1]]])\n', (5272, 5426), True, 'import tensorflow as tf\n'), ((5508, 5563), 'poem.core.data_utils.mix_pair_batch', 'data_utils.mix_pair_batch', (['lhs_pairs', 'rhs_pairs'], {'axis': '(1)'}), '(lhs_pairs, rhs_pairs, axis=1)\n', (5533, 5563), False, 'from poem.core import data_utils\n'), ((5973, 6118), 'tensorflow.constant', 'tf.constant', (['[[[1.0], [2.0]], [[1.1], [2.1]], [[3.0], [4.0]], [[3.1], [4.1]], [[5.0], [\n 6.0]], [[5.1], [6.1]], [[7.0], [8.0]], [[7.1], [8.1]]]'], {}), '([[[1.0], [2.0]], [[1.1], [2.1]], [[3.0], [4.0]], [[3.1], [4.1]],\n [[5.0], [6.0]], [[5.1], [6.1]], [[7.0], [8.0]], [[7.1], [8.1]]])\n', (5984, 6118), True, 'import tensorflow as tf\n'), ((6202, 6367), 'tensorflow.constant', 'tf.constant', (['[[[11.0], [12.0]], [[11.1], [12.1]], [[13.0], [14.0]], [[13.1], [14.1]], [[\n 15.0], [16.0]], [[15.1], [16.1]], [[17.0], [18.0]], [[17.1], [18.1]]]'], {}), '([[[11.0], [12.0]], [[11.1], [12.1]], [[13.0], [14.0]], [[13.1],\n [14.1]], [[15.0], [16.0]], [[15.1], [16.1]], [[17.0], [18.0]], [[17.1],\n [18.1]]])\n', (6213, 6367), True, 'import tensorflow as tf\n'), ((6449, 6548), 'poem.core.data_utils.mix_pair_batch', 'data_utils.mix_pair_batch', (['lhs_pairs', 'rhs_pairs'], {'axis': '(1)', 'sub_batch_ratios': '(1.0, 4.0, 1.0, 2.0)'}), '(lhs_pairs, rhs_pairs, axis=1, sub_batch_ratios=(\n 1.0, 4.0, 1.0, 2.0))\n', (6474, 6548), False, 'from poem.core import data_utils\n'), ((6972, 7009), 'tensorflow.constant', 'tf.constant', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (6983, 7009), True, 'import tensorflow as tf\n'), ((7025, 7068), 'tensorflow.constant', 'tf.constant', (['[[11, 12], [13, 14], [15, 16]]'], {}), '([[11, 12], [13, 14], [15, 16]])\n', (7036, 7068), True, 'import tensorflow as tf\n'), ((7084, 7127), 'tensorflow.constant', 'tf.constant', (['[[21, 22], [23, 24], [25, 26]]'], {}), '([[21, 22], [23, 24], [25, 26]])\n', (7095, 7127), True, 'import tensorflow as tf\n'), ((7200, 7258), 'poem.core.data_utils.shuffle_batches', 'data_utils.shuffle_batches', (['[tensor_1, tensor_2, tensor_3]'], {}), '([tensor_1, tensor_2, tensor_3])\n', (7226, 7258), False, 'from poem.core import data_utils\n'), ((7592, 7819), 'tensorflow.constant', 'tf.constant', (['[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]], [[10.0, 11.0\n ], [12.0, 13.0], [14.0, 15.0], [16.0, 17.0], [18.0, 19.0]], [[20.0, \n 21.0], [22.0, 23.0], [24.0, 25.0], [26.0, 27.0], [28.0, 29.0]]]'], {}), '([[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]],\n [[10.0, 11.0], [12.0, 13.0], [14.0, 15.0], [16.0, 17.0], [18.0, 19.0]],\n [[20.0, 21.0], [22.0, 23.0], [24.0, 25.0], [26.0, 27.0], [28.0, 29.0]]])\n', (7603, 7819), True, 'import tensorflow as tf\n'), ((8149, 8238), 'poem.core.data_utils.update_sub_tensor', 'data_utils.update_sub_tensor', (['x'], {'indices': '[0, 2, 4]', 'axis': '(-2)', 'update_func': 'update_func'}), '(x, indices=[0, 2, 4], axis=-2, update_func=\n update_func)\n', (8177, 8238), False, 'from poem.core import data_utils\n'), ((5618, 5791), 'numpy.array', 'np.array', (['[[[1.0], [2.0]], [[1.1], [2.1]], [[13.0], [14.0]], [[13.1], [14.1]], [[5.0],\n [16.0]], [[5.1], [16.1]], [[17.0], [8.0]], [[17.1], [8.1]]]'], {'dtype': 'np.float32'}), '([[[1.0], [2.0]], [[1.1], [2.1]], [[13.0], [14.0]], [[13.1], [14.1]\n ], [[5.0], [16.0]], [[5.1], [16.1]], [[17.0], [8.0]], [[17.1], [8.1]]],\n dtype=np.float32)\n', (5626, 5791), True, 'import numpy as np\n'), ((6607, 6784), 'numpy.array', 'np.array', (['[[[1.0], [2.0]], [[11.1], [12.1]], [[13.0], [14.0]], [[13.1], [14.1]], [[\n 15.0], [16.0]], [[5.1], [16.1]], [[17.0], [8.0]], [[17.1], [8.1]]]'], {'dtype': 'np.float32'}), '([[[1.0], [2.0]], [[11.1], [12.1]], [[13.0], [14.0]], [[13.1], [\n 14.1]], [[15.0], [16.0]], [[5.1], [16.1]], [[17.0], [8.0]], [[17.1], [\n 8.1]]], dtype=np.float32)\n', (6615, 6784), True, 'import numpy as np\n'), ((7918, 8049), 'tensorflow.constant', 'tf.constant', (['[[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], [[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]],\n [[1.3, 1.4], [1.5, 1.6], [1.7, 1.8]]]'], {}), '([[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], [[0.7, 0.8], [0.9, 1.0],\n [1.1, 1.2]], [[1.3, 1.4], [1.5, 1.6], [1.7, 1.8]]])\n', (7929, 8049), True, 'import tensorflow as tf\n')] |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2020 the AAS WorldWide Telescope project
# Licensed under the MIT License.
"""
Building up WWT imagery data sets.
This gets a little complex since the generation of a tiled image involves
several tasks that may or may not be implemented in several, swappable ways:
generating the tiled pixel data; positioning the image on the sky; filling in
metadata; and so on. We try to provide a framework that allows the
implementations of different tasks to be swapped out without getting too airy
and abstract.
"""
from __future__ import absolute_import, division, print_function
__all__ = """
Builder
""".split()
from wwt_data_formats.enums import DataSetType, ProjectionType
from wwt_data_formats.imageset import ImageSet
from wwt_data_formats.layers import ImageSetLayer, LayerContainerReader
from wwt_data_formats.place import Place
from .image import ImageLoader
class Builder(object):
"""
State for some kind of imagery data set that's being assembled.
"""
pio = None
"A PyramidIO object representing the backing storage of the tiles and other image data."
imgset = None
"""
The WWT ImageSet data describing the image data and their positioning on the sky.
Data URLs in this ImageSet should be populated as relative URLs.
"""
place = None
"The WWT Place data describing a default view of the image data."
def __init__(self, pio):
self.pio = pio
self.imgset = ImageSet()
self.imgset.name = "Toasty"
self.imgset.file_type = "." + pio.get_default_format()
self.imgset.url = pio.get_path_scheme() + self.imgset.file_type
self.place = Place()
self.place.foreground_image_set = self.imgset
self.place.name = "Toasty"
def _check_no_wcs_yet(self):
"""
The astrometric fields of ImageSet change their meaning depending on
whether the image in question is tiled or not. Therefore, you'll get
bogus results if change the tiling status *after* setting the
astrometric information. This method should be called by other methods
that control tiling in order to catch the issue if the user does things
backwards.
"""
if self.imgset.center_x != 0 or self.imgset.center_y != 0:
raise Exception(
"order-of-operations error: you must apply WCS after applying tiling settings"
)
def prepare_study_tiling(self, image):
"""
Set up to tile the specified image as a WWT "study".
Parameters
----------
image : `toasty.image.Image`
The image that will be tiled
Returns
-------
tiling : `toasty.study.StudyTiling`
The prepared tiling information
Notes
-----
After calling this method, you should set up the WCS for the tiled
imagery, using :meth:`default_tiled_study_astrometry` as a backstop if
no real information is available. Then use :meth:`execute_study_tiling`
to actually perform the tiling process.
"""
from .study import StudyTiling
tiling = StudyTiling(image.width, image.height)
tiling.apply_to_imageset(self.imgset)
return tiling
def execute_study_tiling(self, image, tiling, **kwargs):
"""
Tile the specified image as a WWT "study".
Parameters
----------
image : `toasty.image.Image`
The image that will be tiled
tiling : `toasty.study.StudyTiling`
The prepared tiling information
**kwargs
Arguments relayed to :meth:`toasty.study.StudyTiling.tile_image`,
such as ``cli_progress``.
Returns
-------
*self*
"""
tiling.tile_image(image, self.pio, **kwargs)
return self
def tile_base_as_study(self, image, **kwargs):
"""
Tile an image assuming that it is in the appropriate format for WWT's
"study" framework, namely that it uses a tangential (gnomonic)
projection on the sky.
Use of this method is somewhat discouraged since it both analyzes and
performs the tiling all at once, which means that you can only correctly
set (and validate) the WCS information *after* doing all the work of
tiling. (Which in turn is because the proper way to apply WCS
information to an imageset depends on the tiling parameters.) It is
generally better to use :meth:`prepare_study_tiling` and
:meth:`execute_study_tiling`, applying the WCS metadata in between, so
that WCS errors can be caught and reported before doing the I/O.
"""
from .study import tile_study_image
self._check_no_wcs_yet()
tiling = tile_study_image(image, self.pio, **kwargs)
tiling.apply_to_imageset(self.imgset)
return self
def default_tiled_study_astrometry(self):
self._check_no_wcs_yet()
self.imgset.data_set_type = DataSetType.SKY
self.imgset.base_degrees_per_tile = 1.0
self.imgset.projection = ProjectionType.TAN
self.place.zoom_level = 1.0
return self
def load_from_wwtl(self, cli_settings, wwtl_path, cli_progress=False):
from contextlib import closing
from io import BytesIO
# Load WWTL and see if it matches expectations
with closing(LayerContainerReader.from_file(wwtl_path)) as lc:
if len(lc.layers) != 1:
raise Exception("WWTL file must contain exactly one layer")
layer = lc.layers[0]
if not isinstance(layer, ImageSetLayer):
raise Exception("WWTL file must contain an imageset layer")
imgset = layer.image_set
if imgset.projection != ProjectionType.SKY_IMAGE:
raise Exception(
'WWTL imageset layer must have "SkyImage" projection type'
)
# Looks OK. Read and parse the image.
loader = ImageLoader.create_from_args(cli_settings)
img_data = lc.read_layer_file(layer, layer.extension)
img = loader.load_stream(BytesIO(img_data))
# (Re-)initialize with the imageset info extracted from the WWTL.
self.imgset = imgset
self.place.foreground_image_set = self.imgset
self.imgset.file_type = "." + self.pio.get_default_format()
self.imgset.url = self.pio.get_path_scheme() + self.imgset.file_type
self.place.name = self.imgset.name
# Transmogrify untiled image info to tiled image info. We reuse the
# existing imageset as much as possible, but update the parameters that
# change in the tiling process.
wcs_keywords = self.imgset.wcs_headers_from_position(height=img.height)
self.imgset.center_x = (
self.imgset.center_y
) = 0 # hack to satisfy _check_no_wcs_yet()
self.tile_base_as_study(img, cli_progress=cli_progress)
self.imgset.set_position_from_wcs(
wcs_keywords, img.width, img.height, place=self.place
)
return img
def toast_base(self, sampler, depth, is_planet=False, is_pano=False, **kwargs):
from .toast import sample_layer, ToastCoordinateSystem
self._check_no_wcs_yet()
coordsys = (
ToastCoordinateSystem.PLANETARY
if is_planet
else ToastCoordinateSystem.ASTRONOMICAL
)
coordsys = kwargs.pop("coordsys", coordsys)
sample_layer(self.pio, sampler, depth, coordsys=coordsys, **kwargs)
if is_planet:
self.imgset.data_set_type = DataSetType.PLANET
elif is_pano:
self.imgset.data_set_type = DataSetType.PANORAMA
else:
self.imgset.data_set_type = DataSetType.SKY
self.imgset.base_degrees_per_tile = 180
self.imgset.projection = ProjectionType.TOAST
self.imgset.tile_levels = depth
self.place.zoom_level = 360
return self
def cascade(self, **kwargs):
from .merge import averaging_merger, cascade_images
cascade_images(self.pio, self.imgset.tile_levels, averaging_merger, **kwargs)
if "fits" in self.imgset.file_type:
from .pyramid import Pos
from astropy.io import fits
import numpy as np
with fits.open(
self.pio.tile_path(
pos=Pos(n=0, x=0, y=0), format="fits", makedirs=False
)
) as top_tile:
self.imgset.data_min = top_tile[0].header["DATAMIN"]
self.imgset.data_max = top_tile[0].header["DATAMAX"]
(
self.imgset.pixel_cut_low,
self.imgset.pixel_cut_high,
) = np.nanpercentile(top_tile[0].data, [0.5, 99.5])
return self
def make_thumbnail_from_other(self, thumbnail_image):
thumb = thumbnail_image.make_thumbnail_bitmap()
with self.pio.open_metadata_for_write("thumb.jpg") as f:
thumb.save(f, format="JPEG")
self.imgset.thumbnail_url = "thumb.jpg"
return self
def make_placeholder_thumbnail(self):
import numpy as np
from .image import Image
arr = np.zeros((45, 96, 3), dtype=np.uint8)
img = Image.from_array(arr)
with self.pio.open_metadata_for_write("thumb.jpg") as f:
img.aspil().save(f, format="JPEG")
self.imgset.thumbnail_url = "thumb.jpg"
return self
def apply_wcs_info(self, wcs, width, height):
self.imgset.set_position_from_wcs(
wcs.to_header(),
width,
height,
place=self.place,
)
return self
def apply_avm_info(self, avm, width, height):
# So. The AVM standard discusses how parity should be expressed and how
# it should be translated into WCS data, but in practice things are a
# bit wonky: the AVM data that we've seen in the wild basically express
# FITS-like (positive parity) WCS, while the actual associated image
# data have a JPEG-like (negative parity) data layout. WCS can express
# either parity so it would arguably be more correct for the generated
# WCS to have negative parity. Based on the current state of knowledge,
# I think the best option for now is to always flip the parity of the
# WCS that pyavm hands us. We might need to change the heuristic or
# allow the user to change the behavior.
wcs = avm.to_wcs(target_shape=(width, height))
from .image import _flip_wcs_parity
wcs = _flip_wcs_parity(wcs, height)
self.apply_wcs_info(wcs, width, height)
if avm.Title:
self.imgset.name = avm.Title
if avm.Description:
self.imgset.description = avm.Description
if avm.Credit:
self.imgset.credits = avm.Credit
if avm.ReferenceURL:
self.imgset.credits_url = avm.ReferenceURL
return self
def set_name(self, name):
self.imgset.name = name
self.place.name = name
return self
def create_wtml_folder(self):
"""
Create a one-item :class:`wwt_data_formats.folder.Folder` object
capturing this image.
"""
from wwt_data_formats.folder import Folder
self.place.name = self.imgset.name
self.place.data_set_type = self.imgset.data_set_type
self.place.thumbnail = self.imgset.thumbnail_url
folder = Folder()
folder.name = self.imgset.name
# For all-sky/all-planet datasets, don't associate the imageset with a
# particular Place. Otherwise, loading up the imageset causes the view
# to zoom to a particular RA/Dec or lat/lon, likely 0,0. We might want
# to make this manually configurable but this heuristic should Do The
# Right Thing most times.
if self.imgset.projection == ProjectionType.TOAST:
folder.children = [self.imgset]
else:
folder.children = [self.place]
return folder
def write_index_rel_wtml(self):
from wwt_data_formats import write_xml_doc
folder = self.create_wtml_folder()
with self.pio.open_metadata_for_write("index_rel.wtml") as f:
write_xml_doc(folder.to_xml(), dest_stream=f, dest_wants_bytes=True)
return self
| [
"numpy.nanpercentile",
"wwt_data_formats.layers.LayerContainerReader.from_file",
"io.BytesIO",
"wwt_data_formats.place.Place",
"numpy.zeros",
"wwt_data_formats.folder.Folder",
"wwt_data_formats.imageset.ImageSet"
] | [((1488, 1498), 'wwt_data_formats.imageset.ImageSet', 'ImageSet', ([], {}), '()\n', (1496, 1498), False, 'from wwt_data_formats.imageset import ImageSet\n'), ((1692, 1699), 'wwt_data_formats.place.Place', 'Place', ([], {}), '()\n', (1697, 1699), False, 'from wwt_data_formats.place import Place\n'), ((9371, 9408), 'numpy.zeros', 'np.zeros', (['(45, 96, 3)'], {'dtype': 'np.uint8'}), '((45, 96, 3), dtype=np.uint8)\n', (9379, 9408), True, 'import numpy as np\n'), ((11680, 11688), 'wwt_data_formats.folder.Folder', 'Folder', ([], {}), '()\n', (11686, 11688), False, 'from wwt_data_formats.folder import Folder\n'), ((5468, 5509), 'wwt_data_formats.layers.LayerContainerReader.from_file', 'LayerContainerReader.from_file', (['wwtl_path'], {}), '(wwtl_path)\n', (5498, 5509), False, 'from wwt_data_formats.layers import ImageSetLayer, LayerContainerReader\n'), ((6241, 6258), 'io.BytesIO', 'BytesIO', (['img_data'], {}), '(img_data)\n', (6248, 6258), False, 'from io import BytesIO\n'), ((8894, 8941), 'numpy.nanpercentile', 'np.nanpercentile', (['top_tile[0].data', '[0.5, 99.5]'], {}), '(top_tile[0].data, [0.5, 99.5])\n', (8910, 8941), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
###########################################################
# The example shows how to get mapping data #
# The peak ratio at 1315 cm^-1 and 1380 cm^-1 are plotted #
# Details see Small 14, 1804006 (2018). #
###########################################################
import numpy as np
from renishawWiRE import WDFReader
from _path import curdir, imgdir
try:
import matplotlib.pyplot as plt
plot = True
except ImportError:
plot = False
def peak_in_range(spectra, wn, range, method="max", **params):
"""Find the max intensity of peak within range
method can be max, min, or mean
"""
cond = np.where((wn >= range[0]) & (wn <= range[1]))[0]
spectra_cut = spectra[:, :, cond]
return getattr(np, method)(spectra_cut, axis=2, **params)
def main():
filename = curdir / "spectra_files" / "mapping.wdf"
reader = WDFReader(filename)
assert reader.measurement_type == 3
wn = reader.xdata
spectra = reader.spectra
print(wn.shape, spectra.shape)
x = reader.xpos
y = reader.ypos
w, h = reader.map_shape
print("The size of mapping is {0:d} * {1:d}".
format(w, h))
# w and h are the measure in xy coordinates
# Level the spectra
spectra = spectra - np.min(spectra, axis=2, keepdims=True)
peaks_a = peak_in_range(spectra, wn, [1295, 1340])
peaks_b = peak_in_range(spectra, wn, [1350, 1400])
ratio = peaks_a / peaks_b
ratio_fl = ratio.flatten()
if plot is True:
plt.figure(figsize=(10, 5))
# Left plot histogram of Peak A/B ratio
plt.subplot(121)
plt.hist(ratio_fl, bins=50, range=(0.1, 2))
plt.xlabel("Ratio peak A / peak B")
plt.ylabel("Counts")
# Right plot histogram of Peak A/B mapping
plt.subplot(122)
plt.imshow(ratio, interpolation="bicubic",
extent=[0, x.max() - x.min(),
y.max() - y.min(), 0],
vmin=0.5, vmax=1.5)
plt.xlabel("Mapping x [μm]")
plt.ylabel("Mapping y [μm]")
cb = plt.colorbar()
cb.ax.set_title("Ratio")
plt.tight_layout()
plt.show(block=False)
plt.pause(3)
plt.savefig(imgdir / "mapping.png", dpi=100)
plt.close()
else:
pass
return
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"renishawWiRE.WDFReader",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"matplotli... | [((913, 932), 'renishawWiRE.WDFReader', 'WDFReader', (['filename'], {}), '(filename)\n', (922, 932), False, 'from renishawWiRE import WDFReader\n'), ((681, 726), 'numpy.where', 'np.where', (['((wn >= range[0]) & (wn <= range[1]))'], {}), '((wn >= range[0]) & (wn <= range[1]))\n', (689, 726), True, 'import numpy as np\n'), ((1297, 1335), 'numpy.min', 'np.min', (['spectra'], {'axis': '(2)', 'keepdims': '(True)'}), '(spectra, axis=2, keepdims=True)\n', (1303, 1335), True, 'import numpy as np\n'), ((1538, 1565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1548, 1565), True, 'import matplotlib.pyplot as plt\n'), ((1623, 1639), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1634, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1691), 'matplotlib.pyplot.hist', 'plt.hist', (['ratio_fl'], {'bins': '(50)', 'range': '(0.1, 2)'}), '(ratio_fl, bins=50, range=(0.1, 2))\n', (1656, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1700, 1735), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ratio peak A / peak B"""'], {}), "('Ratio peak A / peak B')\n", (1710, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1764), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (1754, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1841), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1836, 1841), True, 'import matplotlib.pyplot as plt\n'), ((2039, 2067), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mapping x [μm]"""'], {}), "('Mapping x [μm]')\n", (2049, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2104), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mapping y [μm]"""'], {}), "('Mapping y [μm]')\n", (2086, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2118, 2132), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2130, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2192), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2190, 2192), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2222), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2209, 2222), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2243), 'matplotlib.pyplot.pause', 'plt.pause', (['(3)'], {}), '(3)\n', (2240, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2252, 2296), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(imgdir / 'mapping.png')"], {'dpi': '(100)'}), "(imgdir / 'mapping.png', dpi=100)\n", (2263, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2305, 2316), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2314, 2316), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
from floris.simulation import Floris
from floris.simulation import TurbineMap
from .flow_data import FlowData
from ..utilities import Vec3
import copy
class FlorisInterface():
"""
The interface between a FLORIS instance and the wfc tools
"""
def __init__(self, input_file):
self.input_file = input_file
self.floris = Floris(input_file=input_file)
def calculate_wake(self, yaw_angles=None):
"""
Wrapper to the floris flow field calculate_wake method
Args:
yaw_angles (np.array, optional): Turbine yaw angles.
Defaults to None.
"""
if yaw_angles is not None:
self.floris.farm.set_yaw_angles(yaw_angles)
self.floris.farm.flow_field.calculate_wake()
# Extension by WF-2.0
def marl_fi_calculate_wake(self, turb_i, init_paras=False, final_paras=False, yaw_angles=None):
if yaw_angles is not None:
self.floris.farm.set_yaw_angles(yaw_angles)
self.floris.farm.flow_field.marl_calculate_wake(turb_i, init_paras, final_paras)
def reinitialize_flow_field(self,
wind_speed=None,
wind_direction=None,
wind_shear=None,
wind_veer=None,
turbulence_intensity=None,
air_density=None,
wake=None,
layout_array=None,
with_resolution=None):
"""
Wrapper to
:py:meth:`floris.simlulation.flow_field.reinitialize_flow_field`.
All input values are used to update the flow_field instance.
Args:
wind_speed (float, optional): background wind speed.
Defaults to None.
wind_direction (float, optional): background wind direction.
Defaults to None.
wind_shear (float, optional): shear exponent.
Defaults to None.
wind_veer (float, optional): direction change over rotor.
Defaults to None.
turbulence_intensity (float, optional): background turbulence intensity. Defaults to None.Defaults to None.
(float, optional): ambient air density.
Defaults to None.
wake (str, optional): wake model type. Defaults to None.
layout_array (np.array, optional): array of x- and
y-locations of wind turbines. Defaults to None.
with_resolution (float, optional): resolution of output
flow_field. Defaults to None.
"""
# Build turbine map (convenience layer for user)
if layout_array is not None:
turbine_map = TurbineMap(
layout_array[0], layout_array[1], \
[copy.deepcopy(self.floris.farm.turbines[0]) \
for ii in range(len(layout_array[0]))])
else:
turbine_map = None
self.floris.farm.flow_field.reinitialize_flow_field(
wind_speed=wind_speed,
wind_direction=wind_direction,
wind_shear=wind_shear,
wind_veer=wind_veer,
turbulence_intensity=turbulence_intensity,
air_density=air_density,
wake=wake,
turbine_map=turbine_map,
with_resolution=with_resolution)
# Special case function for quick visualization of hub height
def get_hub_height_flow_data(self,
x_resolution=100,
y_resolution=100,
x_bounds=None,
y_bounds=None):
"""
Shortcut method to visualize flow field at hub height.
Args:
x_resolution (float, optional): output array resolution.
Defaults to 100.
y_resolution (float, optional): output array resolution.
Defaults to 100.
x_bounds (tuple, optional): limits of output array.
Defaults to None.
y_bounds (tuple, optional): limits of output array.
Defaults to None.
Returns:
:py:class:`floris.tools.flow_data.FlowData`: FlowData object at hub
height.
"""
if self.floris.farm.flow_field.wake.velocity_model.requires_resolution:
raise (
'Not allowed for wake model %s ' %
self.floris.farm.flow_field.wake.velocity_model.model_string)
# Get a copy for the flow field so don't change underlying grid points
flow_field = copy.deepcopy(self.floris.farm.flow_field)
# If x and y bounds are not provided, use rules of thumb
if x_bounds is None:
coords = self.floris.farm.flow_field.turbine_map.coords
max_diameter = self.floris.farm.flow_field.max_diameter
x = [coord.x1 for coord in coords]
x_bounds = (min(x) - 2 * max_diameter, max(x) + 10 * max_diameter)
if y_bounds is None:
coords = self.floris.farm.flow_field.turbine_map.coords
max_diameter = self.floris.farm.flow_field.max_diameter
y = [coord.x2 for coord in coords]
y_bounds = (min(y) - 2 * max_diameter, max(y) + 2 * max_diameter)
# Z_bounds is always hub-height
hub_height = self.floris.farm.flow_field.turbine_map.turbines[
0].hub_height
bounds_to_set = (x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1],
hub_height - 5., hub_height + 5.)
# Set new bounds
flow_field.set_bounds(bounds_to_set=bounds_to_set)
# Change the resolution
flow_field.reinitialize_flow_field(
with_resolution=Vec3(x_resolution, y_resolution, 3))
# Calculate the wakes
flow_field.calculate_wake()
order = "f"
x = flow_field.x.flatten(order=order)
y = flow_field.y.flatten(order=order)
z = flow_field.z.flatten(order=order)
u = flow_field.u.flatten(order=order)
v = flow_field.v.flatten(order=order)
w = flow_field.w.flatten(order=order)
# Determine spacing, dimensions and origin
unique_x = np.sort(np.unique(x))
unique_y = np.sort(np.unique(y))
unique_z = np.sort(np.unique(z))
spacing = Vec3(unique_x[1] - unique_x[0], unique_y[1] - unique_y[0],
unique_z[1] - unique_z[0])
dimensions = Vec3(len(unique_x), len(unique_y), len(unique_z))
origin = Vec3(0.0, 0.0, 0.0)
return FlowData(x,
y,
z,
u,
v,
w,
spacing=spacing,
dimensions=dimensions,
origin=origin)
def get_flow_data(self, resolution=None, grid_spacing=10):
"""
Generate FlowData object corresponding to the floris instance.
#TODO disambiguate between resolution and grid spacing.
Args:
resolution (float, optional): resolution of output data.
Only used for wake models that require spatial
resolution (e.g. curl). Defaults to None.
grid_spacing (int, optional): resolution of output data.
Defaults to 10.
Returns:
:py:class:`floris.tools.flow_data.FlowData`: FlowData object
"""
if resolution is None:
if not self.floris.farm.flow_field.wake.velocity_model.requires_resolution:
print('Assuming grid with spacing %d' % grid_spacing)
xmin, xmax, ymin, ymax, zmin, zmax = self.floris.farm.flow_field.domain_bounds
resolution = Vec3(1 + (xmax - xmin) / grid_spacing,
1 + (ymax - ymin) / grid_spacing,
1 + (zmax - zmin) / grid_spacing)
self.grid_spacing_if_need = grid_spacing
else:
print('Assuming model resolution')
resolution = self.floris.farm.flow_field.wake.velocity_model.model_grid_resolution
# Get a copy for the flow field so don't change underlying grid points
flow_field = copy.deepcopy(self.floris.farm.flow_field)
if flow_field.wake.velocity_model.requires_resolution and \
flow_field.wake.velocity_model.model_grid_resolution != resolution:
print(
"WARNING: The current wake velocity model contains a required grid resolution;"
)
print(
" The Resolution given to FlorisInterface.get_flow_field is ignored."
)
resolution = flow_field.wake.velocity_model.model_grid_resolution
flow_field.reinitialize_flow_field(with_resolution=resolution)
print(resolution)
flow_field.calculate_wake()
order = "f"
x = flow_field.x.flatten(order=order)
y = flow_field.y.flatten(order=order)
z = flow_field.z.flatten(order=order)
u = flow_field.u.flatten(order=order)
v = flow_field.v.flatten(order=order)
w = flow_field.w.flatten(order=order)
# Determine spacing, dimensions and origin
unique_x = np.sort(np.unique(x))
unique_y = np.sort(np.unique(y))
unique_z = np.sort(np.unique(z))
spacing = Vec3(unique_x[1] - unique_x[0], unique_y[1] - unique_y[0],
unique_z[1] - unique_z[0])
dimensions = Vec3(len(unique_x), len(unique_y), len(unique_z))
origin = Vec3(0.0, 0.0, 0.0)
return FlowData(x,
y,
z,
u,
v,
w,
spacing=spacing,
dimensions=dimensions,
origin=origin)
def get_yaw_angles(self):
"""
Report yaw angles of wind turbines from instance of floris.
Returns:
yaw_angles (np.array): wind turbine yaw angles.
"""
yaw_angles = [
turbine.yaw_angle
for turbine in self.floris.farm.turbine_map.turbines
]
return yaw_angles
def get_farm_power(self):
"""
Report wind plant power from instance of floris.
Returns:
plant_power (float): sum of wind turbine powers.
"""
turb_powers = [turbine.power for turbine in self.floris.farm.turbines]
return np.sum(turb_powers)
def get_turbine_power(self):
"""
Report power from each wind turbine from instance of floris.
Returns:
turb_powers (np.array): power produced by each wind turbine.
"""
turb_powers = [
turbine.power
for turbine in self.floris.farm.flow_field.turbine_map.turbines
]
return turb_powers
def get_turbine_ct(self):
"""
Report thrust coefficient from each wind turbine from instance of floris.
Returns:
turb_ct_array (np.array): thrust coefficient for each wind turbine.
"""
turb_ct_array = [
turbine.Ct
for turbine in self.floris.farm.flow_field.turbine_map.turbines
]
return turb_ct_array
# calculate the power under different yaw angles
def get_power_for_yaw_angle_opt(self, yaw_angles):
"""
Assign yaw angles to turbines, calculate wake, report power
Args:
yaw_angles (np.array): yaw to apply to each turbine
Returns:
power (float): wind plant power. #TODO negative? in kW?
"""
self.calculate_wake(yaw_angles=yaw_angles)
# self.floris.farm.set_yaw_angles(yaw_angles, calculate_wake=True)
power = -1 * np.sum(
[turbine.power for turbine in self.floris.farm.turbines])
return power / (10**3)
@property
def layout_x(self):
"""
Wind turbine coordinate information.
Returns:
layout_x (np.array): Wind turbine x-coordinate (east-west).
"""
coords = self.floris.farm.flow_field.turbine_map.coords
layout_x = np.zeros(len(coords))
for i, coord in enumerate(coords):
layout_x[i] = coord.x1prime
return layout_x
@property
def layout_y(self):
"""
Wind turbine coordinate information.
Returns:
layout_y (np.array): Wind turbine y-coordinate (east-west).
"""
coords = self.floris.farm.flow_field.turbine_map.coords
layout_y = np.zeros(len(coords))
for i, coord in enumerate(coords):
layout_y[i] = coord.x2prime
return layout_y | [
"numpy.sum",
"numpy.unique",
"floris.simulation.Floris",
"copy.deepcopy"
] | [((928, 957), 'floris.simulation.Floris', 'Floris', ([], {'input_file': 'input_file'}), '(input_file=input_file)\n', (934, 957), False, 'from floris.simulation import Floris\n'), ((5289, 5331), 'copy.deepcopy', 'copy.deepcopy', (['self.floris.farm.flow_field'], {}), '(self.floris.farm.flow_field)\n', (5302, 5331), False, 'import copy\n'), ((8984, 9026), 'copy.deepcopy', 'copy.deepcopy', (['self.floris.farm.flow_field'], {}), '(self.floris.farm.flow_field)\n', (8997, 9026), False, 'import copy\n'), ((11275, 11294), 'numpy.sum', 'np.sum', (['turb_powers'], {}), '(turb_powers)\n', (11281, 11294), True, 'import numpy as np\n'), ((6924, 6936), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (6933, 6936), True, 'import numpy as np\n'), ((6965, 6977), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6974, 6977), True, 'import numpy as np\n'), ((7006, 7018), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (7015, 7018), True, 'import numpy as np\n'), ((10015, 10027), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (10024, 10027), True, 'import numpy as np\n'), ((10056, 10068), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (10065, 10068), True, 'import numpy as np\n'), ((10097, 10109), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (10106, 10109), True, 'import numpy as np\n'), ((12594, 12658), 'numpy.sum', 'np.sum', (['[turbine.power for turbine in self.floris.farm.turbines]'], {}), '([turbine.power for turbine in self.floris.farm.turbines])\n', (12600, 12658), True, 'import numpy as np\n'), ((3478, 3521), 'copy.deepcopy', 'copy.deepcopy', (['self.floris.farm.turbines[0]'], {}), '(self.floris.farm.turbines[0])\n', (3491, 3521), False, 'import copy\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
@parameterized.parameters(dtypes.int32, dtypes.int64)
def testSimpleGather(self, indices_dtype):
data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])
indices = [3, 4]
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices, dtype=indices_dtype)
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([3, 7])
np_val = self._buildParams(expected_result, dtype)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
@parameterized.parameters(dtypes.int32, dtypes.int64)
def test2DArray(self, indices_dtype):
data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])
indices = [[3], [4]]
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices, dtype=indices_dtype)
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([[3], [15]])
np_val = self._buildParams(expected_result, dtype)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testHigherRank(self):
data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])
indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]]
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.batch_gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
expected_result = np.array([[[2, 0], [7, 5]], [[10, 8], [11, 15]]])
np_val = self._buildParams(expected_result, dtype)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
indices_tf = constant_op.constant([1])
self.assertAllEqual(
[[b"qwer", b"uiop"]],
self.evaluate(array_ops.batch_gather(params, indices_tf)))
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32, shape=[None, None])
gather_t = array_ops.batch_gather(params, indices)
self.assertEqual([1, None], gather_t.get_shape().as_list())
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0\] = 7 is not in \[0, 2\)"):
self.evaluate(array_ops.batch_gather(params, [7]))
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
self.assertAllEqual(
self.evaluate(array_ops.batch_gather(params, indices)),
np.zeros((2, 0, 0)))
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ops.array_ops.placeholder",
"absl.testing.parameterized.parameters",
"tensorflow.python.framework.constant_op.constant",
"numpy.array",
"numpy.zeros",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.batch_gather"
] | [((1569, 1621), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['dtypes.int32', 'dtypes.int64'], {}), '(dtypes.int32, dtypes.int64)\n', (1593, 1621), False, 'from absl.testing import parameterized\n'), ((2313, 2365), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['dtypes.int32', 'dtypes.int64'], {}), '(dtypes.int32, dtypes.int64)\n', (2337, 2365), False, 'from absl.testing import parameterized\n'), ((5047, 5058), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (5056, 5058), False, 'from tensorflow.python.platform import test\n'), ((1678, 1728), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13]'], {}), '([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])\n', (1686, 1728), True, 'import numpy as np\n'), ((2417, 2471), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]]'], {}), '([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])\n', (2425, 2471), True, 'import numpy as np\n'), ((3101, 3163), 'numpy.array', 'np.array', (['[[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]]'], {}), '([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])\n', (3109, 3163), True, 'import numpy as np\n'), ((3824, 3874), 'numpy.array', 'np.array', (["[[b'asdf', b'zxcv'], [b'qwer', b'uiop']]"], {}), "([[b'asdf', b'zxcv'], [b'qwer', b'uiop']])\n", (3832, 3874), True, 'import numpy as np\n'), ((4157, 4190), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[0, 1, 2]]'], {}), '([[0, 1, 2]])\n', (4177, 4190), False, 'from tensorflow.python.framework import constant_op\n'), ((4205, 4260), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int32'], {'shape': '[None, None]'}), '(dtypes.int32, shape=[None, None])\n', (4226, 4260), False, 'from tensorflow.python.ops import array_ops\n'), ((4276, 4315), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', 'indices'], {}), '(params, indices)\n', (4298, 4315), False, 'from tensorflow.python.ops import array_ops\n'), ((3926, 3951), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1]'], {}), '([1])\n', (3946, 3951), False, 'from tensorflow.python.framework import constant_op\n'), ((1887, 1918), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['params_np'], {}), '(params_np)\n', (1907, 1918), False, 'from tensorflow.python.framework import constant_op\n'), ((1940, 1990), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['indices'], {'dtype': 'indices_dtype'}), '(indices, dtype=indices_dtype)\n', (1960, 1990), False, 'from tensorflow.python.framework import constant_op\n'), ((2010, 2052), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', 'indices_tf'], {}), '(params, indices_tf)\n', (2032, 2052), False, 'from tensorflow.python.ops import array_ops\n'), ((2079, 2095), 'numpy.array', 'np.array', (['[3, 7]'], {}), '([3, 7])\n', (2087, 2095), True, 'import numpy as np\n'), ((2634, 2665), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['params_np'], {}), '(params_np)\n', (2654, 2665), False, 'from tensorflow.python.framework import constant_op\n'), ((2687, 2737), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['indices'], {'dtype': 'indices_dtype'}), '(indices, dtype=indices_dtype)\n', (2707, 2737), False, 'from tensorflow.python.framework import constant_op\n'), ((2757, 2799), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', 'indices_tf'], {}), '(params, indices_tf)\n', (2779, 2799), False, 'from tensorflow.python.ops import array_ops\n'), ((2826, 2847), 'numpy.array', 'np.array', (['[[3], [15]]'], {}), '([[3], [15]])\n', (2834, 2847), True, 'import numpy as np\n'), ((3352, 3383), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['params_np'], {}), '(params_np)\n', (3372, 3383), False, 'from tensorflow.python.framework import constant_op\n'), ((3405, 3434), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['indices'], {}), '(indices)\n', (3425, 3434), False, 'from tensorflow.python.framework import constant_op\n'), ((3454, 3496), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', 'indices_tf'], {}), '(params, indices_tf)\n', (3476, 3496), False, 'from tensorflow.python.ops import array_ops\n'), ((3568, 3617), 'numpy.array', 'np.array', (['[[[2, 0], [7, 5]], [[10, 8], [11, 15]]]'], {}), '([[[2, 0], [7, 5]], [[10, 8], [11, 15]]])\n', (3576, 3617), True, 'import numpy as np\n'), ((4035, 4077), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', 'indices_tf'], {}), '(params, indices_tf)\n', (4057, 4077), False, 'from tensorflow.python.ops import array_ops\n'), ((4587, 4622), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', '[7]'], {}), '(params, [7])\n', (4609, 4622), False, 'from tensorflow.python.ops import array_ops\n'), ((4783, 4830), 'numpy.zeros', 'np.zeros', (['(7, 0, 0)'], {'dtype': 'dtype.as_numpy_dtype'}), '((7, 0, 0), dtype=dtype.as_numpy_dtype)\n', (4791, 4830), True, 'import numpy as np\n'), ((4851, 4880), 'numpy.array', 'np.array', (['[3, 4]'], {'dtype': 'itype'}), '([3, 4], dtype=itype)\n', (4859, 4880), True, 'import numpy as np\n'), ((4996, 5015), 'numpy.zeros', 'np.zeros', (['(2, 0, 0)'], {}), '((2, 0, 0))\n', (5004, 5015), True, 'import numpy as np\n'), ((4940, 4979), 'tensorflow.python.ops.array_ops.batch_gather', 'array_ops.batch_gather', (['params', 'indices'], {}), '(params, indices)\n', (4962, 4979), False, 'from tensorflow.python.ops import array_ops\n')] |
import numpy as np
import matplotlib.pyplot as plt
import os
import pydicom as pyd
from glob import glob
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
import bisect
import random
import math
from Augmentor import *
def import_dicom_data(path):
data_path = path + '/images/'
annot_path = path + '/labels/'
data_list = glob(data_path + '*.dcm')
annot_list = glob(annot_path + '*.dcm')
N = len(data_list)
data = []
annot = []
annot_frames = np.zeros((N))
print('Data Image Resolutions')
for i in range(N):
x = pyd.read_file(data_list[i]).pixel_array
x = x[:len(x) / 2]
y = pyd.read_file(annot_list[i]).pixel_array
y = y[:len(y) / 2]
n_frame = 0
for j in range(y.shape[0]):
if np.where(y[j] == 1)[0].size > 0:
n_frame += 1
annot_frames[i] = n_frame
print(x.shape, n_frame)
data.append(x)
annot.append(y)
return data, annot
def zeropad(data, annot, h_max, w_max):
# If the data is a list of images of different resolutions
# useful in testing
if isinstance(data, list):
n = len(data)
data_pad = np.zeros((n, h_max, w_max))
annot_pad = np.zeros((n, h_max, w_max))
for i in range(n):
pad_l1 = (h_max - data[i].shape[0]) // 2
pad_l2 = (h_max - data[i].shape[0]) - (h_max - data[i].shape[0]) // 2
pad_h1 = (w_max - data[i].shape[1]) // 2
pad_h2 = (w_max - data[i].shape[1]) - (w_max - data[i].shape[1]) // 2
data_pad[i] = np.pad(data[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0)))
annot_pad[i] = np.pad(annot[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0)))
# If data is a numpy array with images of same resolution
else:
pad_l1 = (h_max - data.shape[1]) // 2
pad_l2 = (h_max - data.shape[1]) - (h_max - data.shape[1]) // 2
pad_h1 = (w_max - data.shape[2]) // 2
pad_h2 = (w_max - data.shape[2]) - (w_max - data.shape[2]) // 2
data_pad = np.pad(data, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0), (0, 0)))
annot_pad = np.pad(annot, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0), (0, 0)))
return data_pad, annot_pad
def data_augment(imgs, lb):
p = Pipeline()
p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10)
imgs_temp, lb_temp = np.zeros(imgs.shape), np.zeros(imgs.shape)
for i in range(imgs.shape[0]):
pil_images = p.sample_with_array(imgs[i], ground_truth=lb[i], mode='L')
imgs_temp[i], lb_temp[i] = np.asarray(pil_images[0]), np.asarray(pil_images[1])
return imgs_temp, lb_temp
def get_weighted_batch(imgs, labels, batch_size, data_aug, high_skew=False):
while 1:
thy_re = [np.count_nonzero(labels[i] == 1) * 1.0 / np.prod(labels[i].shape) for i in range(imgs.shape[0])]
if high_skew==True:
thy_re = [el**2 for el in thy_re]
cumul = [thy_re[0]]
for item in thy_re[1:]: cumul.append(cumul[-1] + item)
total_prob = sum(thy_re)
ar_inds = [bisect.bisect_right(cumul, random.uniform(0, total_prob)) for i in range(batch_size)]
lb, batch_imgs = labels[ar_inds], imgs[ar_inds]
l, r, t, b = 0, batch_imgs.shape[1], 0, batch_imgs.shape[2]
for i in range(batch_imgs.shape[1]):
if np.all(batch_imgs[:, i, :] == 0):
l = i + 1
else:
break
for i in range(batch_imgs.shape[1] - 1, -1, -1):
if np.all(batch_imgs[:, i, :] == 0):
r = i
else:
break
for i in range(batch_imgs.shape[2]):
if np.all(batch_imgs[:, :, i] == 0):
t = i + 1
else:
break
for i in range(batch_imgs.shape[2] - 1, -1, -1):
if np.all(batch_imgs[:, :, i] == 0):
b = i
else:
break
l, r, t, b = (l // 16) * 16, math.ceil(r * 1.0 / 16) * 16, (t // 16) * 16, math.ceil(b * 1.0 / 16) * 16
l, r, t, b = int(l), int(r), int(t), int(b)
batch_imgs, lb = batch_imgs[:, l:r, t:b], lb[:, l:r, t:b]
if (data_aug):
batch_imgs, lb = data_augment(batch_imgs, lb)
yield np.expand_dims(batch_imgs, axis=3),np.expand_dims(lb, axis=3)
def get_weighted_batch_window_2d(imgs, labels, batch_size, data_aug, n_window=0, high_skew=False):
# a=0
# if a==0:
# print('datagen')
while 1:
thy_re = [np.count_nonzero(labels[i] == 1) * 1.0 / np.prod(labels[i].shape) for i in range(imgs.shape[0])]
if high_skew==True:
thy_re = [el**2 for el in thy_re]
cumul = [thy_re[0]]
for item in thy_re[1:]: cumul.append(cumul[-1] + item)
total_prob = sum(thy_re)
ar_inds = [bisect.bisect_right(cumul, random.uniform(0, total_prob)) for i in range(batch_size)]
if n_window==0:
batch_imgs = imgs[ar_inds]
# Get n_window frames per index.
else:
batch_imgs = np.zeros((batch_size*n_window,imgs.shape[1],imgs.shape[2]))
for i in range(batch_size):
if ar_inds[i]==0:
ar_inds[i] = 1
elif ar_inds[i] == len(imgs)-1:
ar_inds[i] -= 1
batch_imgs[n_window*i:n_window*(i+1)] = imgs[ar_inds[i]-1:ar_inds[i]+2]
lb = labels[ar_inds]
l, r, t, b = 0, batch_imgs.shape[1], 0, batch_imgs.shape[2]
for i in range(batch_imgs.shape[1]):
if np.all(batch_imgs[:, i, :] == 0):
l = i + 1
else:
break
for i in range(batch_imgs.shape[1] - 1, -1, -1):
if np.all(batch_imgs[:, i, :] == 0):
r = i
else:
break
for i in range(batch_imgs.shape[2]):
if np.all(batch_imgs[:, :, i] == 0):
t = i + 1
else:
break
for i in range(batch_imgs.shape[2] - 1, -1, -1):
if np.all(batch_imgs[:, :, i] == 0):
b = i
else:
break
l, r, t, b = (l // 16) * 16, math.ceil(r * 1.0 / 16) * 16, (t // 16) * 16, math.ceil(b * 1.0 / 16) * 16
l, r, t, b = int(l), int(r), int(t), int(b)
batch_imgs, lb = batch_imgs[:, l:r, t:b], lb[:, l:r, t:b]
# batch_imgs_3d = np.zeros((batch_size,imgs.shape[1], imgs.shape[2], n_window))
# k=0
# for i in range(batch_size):
# for j in range(n_window):
# batch_imgs_3d[i,:,:,j] = batch_imgs[k,:,:]
# k += 1
batch_imgs = np.array([np.rollaxis(batch_imgs[n_window*i:n_window*(i+1)],0,3) for i in range(batch_size)])
if (data_aug):
batch_imgs, lb = batch_imgs, lb#data_augment(batch_imgs, lb)
# print('batch = ',batch_imgs.shape, lb.shape)
yield batch_imgs,np.expand_dims(lb, axis=3)
def get_max_dimensions(data_list):
return 320, 448
def create_generators(datadir=None, batch_size=64, augmentation_args=None,\
model='unet', zero_padding=[0,0], data_skew=False, validation_index=None, window=0):
# Load data from the data directory
if datadir==None:
raise Exception("Data directory not specified")
data_list, annot_list = import_dicom_data(datadir)
print(len(data_list))
# Get the max dimensions of the DICOM frames, and zeropad all images
h_max, w_max = get_max_dimensions(data_list)
for i, data in enumerate(data_list):
data_list[i], annot_list[i] = zeropad(data_list[i], annot_list[i], h_max, w_max)
# Get train and validation data
N = len(data_list)
if validation_index==None:
raise Exception("Please specify validation indices")
else:
trn_imgs = []
trn_labels = []
val_imgs = []
val_labels = []
for i in range(len(data_list)):
if i in validation_index:
val_imgs.append(data_list[i])
val_labels.append(annot_list[i])
else:
trn_imgs.append(data_list[i])
trn_labels.append(annot_list[i])
val_imgs = np.concatenate(val_imgs,axis=0)
val_labels = np.concatenate(val_labels,axis=0)
trn_imgs = np.concatenate(trn_imgs,axis=0)
trn_labels = np.concatenate(trn_labels,axis=0)
print(val_imgs.shape, val_labels.shape, trn_imgs.shape, trn_labels.shape)
# Data generator for augmentation
if augmentation_args !=None:
data_augment=True
datagen = ImageDataGenerator(
rotation_range=augmentation_args['rotation_range'],
width_shift_range=augmentation_args['width_shift_range'],
height_shift_range=augmentation_args['height_shift_range'],
shear_range=augmentation_args['shear_range'],
zoom_range=augmentation_args['zoom_range'],
fill_mode=augmentation_args['fill_mode'])
else:
data_augment=False
datagen = ImageDataGenerator(
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
horizontal_flip=0.,
fill_mode=0.)
# Get model specific data generators
if model in ['unet', 'dilated-unet', 'dilated-densenet']:
if data_skew==True:
train_generator = get_weighted_batch(trn_imgs, trn_labels, batch_size, data_augment)
val_generator = get_weighted_batch(val_imgs, val_labels, batch_size, data_augment, high_skew=True)
else:
train_generator = datagen.flow(x=np.expand_dims(trn_imgs, axis=3), y=np.expand_dims(trn_labels, axis=3), batch_size=16)
val_generator = datagen.flow(x=np.expand_dims(val_imgs, axis=3), y=np.expand_dims(val_labels, axis=3), batch_size=16)
elif model=='window-unet':
train_generator = get_weighted_batch_window_2d(trn_imgs, trn_labels, batch_size, data_augment, window)
val_generator = get_weighted_batch_window_2d(val_imgs, val_labels, batch_size, data_augment, window, high_skew=True)
return train_generator, val_generator
| [
"numpy.prod",
"numpy.all",
"random.uniform",
"math.ceil",
"numpy.where",
"numpy.asarray",
"numpy.rollaxis",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.count_nonzero",
"numpy.zeros",
"pydicom.read_file",
"numpy.concatenate",
"numpy.expand_dims",
"numpy.pad",
"glob.glob"
] | [((380, 405), 'glob.glob', 'glob', (["(data_path + '*.dcm')"], {}), "(data_path + '*.dcm')\n", (384, 405), False, 'from glob import glob\n'), ((423, 449), 'glob.glob', 'glob', (["(annot_path + '*.dcm')"], {}), "(annot_path + '*.dcm')\n", (427, 449), False, 'from glob import glob\n'), ((521, 532), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (529, 532), True, 'import numpy as np\n'), ((1222, 1249), 'numpy.zeros', 'np.zeros', (['(n, h_max, w_max)'], {}), '((n, h_max, w_max))\n', (1230, 1249), True, 'import numpy as np\n'), ((1270, 1297), 'numpy.zeros', 'np.zeros', (['(n, h_max, w_max)'], {}), '((n, h_max, w_max))\n', (1278, 1297), True, 'import numpy as np\n'), ((2244, 2360), 'numpy.pad', 'np.pad', (['data', '((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2))', '"""constant"""'], {'constant_values': '((0, 0), (0, 0), (0, 0))'}), "(data, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',\n constant_values=((0, 0), (0, 0), (0, 0)))\n", (2250, 2360), True, 'import numpy as np\n'), ((2403, 2520), 'numpy.pad', 'np.pad', (['annot', '((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2))', '"""constant"""'], {'constant_values': '((0, 0), (0, 0), (0, 0))'}), "(annot, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',\n constant_values=((0, 0), (0, 0), (0, 0)))\n", (2409, 2520), True, 'import numpy as np\n'), ((2723, 2743), 'numpy.zeros', 'np.zeros', (['imgs.shape'], {}), '(imgs.shape)\n', (2731, 2743), True, 'import numpy as np\n'), ((2745, 2765), 'numpy.zeros', 'np.zeros', (['imgs.shape'], {}), '(imgs.shape)\n', (2753, 2765), True, 'import numpy as np\n'), ((8549, 8581), 'numpy.concatenate', 'np.concatenate', (['val_imgs'], {'axis': '(0)'}), '(val_imgs, axis=0)\n', (8563, 8581), True, 'import numpy as np\n'), ((8602, 8636), 'numpy.concatenate', 'np.concatenate', (['val_labels'], {'axis': '(0)'}), '(val_labels, axis=0)\n', (8616, 8636), True, 'import numpy as np\n'), ((8655, 8687), 'numpy.concatenate', 'np.concatenate', (['trn_imgs'], {'axis': '(0)'}), '(trn_imgs, axis=0)\n', (8669, 8687), True, 'import numpy as np\n'), ((8708, 8742), 'numpy.concatenate', 'np.concatenate', (['trn_labels'], {'axis': '(0)'}), '(trn_labels, axis=0)\n', (8722, 8742), True, 'import numpy as np\n'), ((8940, 9278), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': "augmentation_args['rotation_range']", 'width_shift_range': "augmentation_args['width_shift_range']", 'height_shift_range': "augmentation_args['height_shift_range']", 'shear_range': "augmentation_args['shear_range']", 'zoom_range': "augmentation_args['zoom_range']", 'fill_mode': "augmentation_args['fill_mode']"}), "(rotation_range=augmentation_args['rotation_range'],\n width_shift_range=augmentation_args['width_shift_range'],\n height_shift_range=augmentation_args['height_shift_range'], shear_range\n =augmentation_args['shear_range'], zoom_range=augmentation_args[\n 'zoom_range'], fill_mode=augmentation_args['fill_mode'])\n", (8958, 9278), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((9389, 9551), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(0.0)', 'width_shift_range': '(0.0)', 'height_shift_range': '(0.0)', 'shear_range': '(0.0)', 'zoom_range': '(0.0)', 'horizontal_flip': '(0.0)', 'fill_mode': '(0.0)'}), '(rotation_range=0.0, width_shift_range=0.0,\n height_shift_range=0.0, shear_range=0.0, zoom_range=0.0,\n horizontal_flip=0.0, fill_mode=0.0)\n', (9407, 9551), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((606, 633), 'pydicom.read_file', 'pyd.read_file', (['data_list[i]'], {}), '(data_list[i])\n', (619, 633), True, 'import pydicom as pyd\n'), ((685, 713), 'pydicom.read_file', 'pyd.read_file', (['annot_list[i]'], {}), '(annot_list[i])\n', (698, 713), True, 'import pydicom as pyd\n'), ((1621, 1724), 'numpy.pad', 'np.pad', (['data[i]', '((pad_l1, pad_l2), (pad_h1, pad_h2))', '"""constant"""'], {'constant_values': '((0, 0), (0, 0))'}), "(data[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',\n constant_values=((0, 0), (0, 0)))\n", (1627, 1724), True, 'import numpy as np\n'), ((1781, 1885), 'numpy.pad', 'np.pad', (['annot[i]', '((pad_l1, pad_l2), (pad_h1, pad_h2))', '"""constant"""'], {'constant_values': '((0, 0), (0, 0))'}), "(annot[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',\n constant_values=((0, 0), (0, 0)))\n", (1787, 1885), True, 'import numpy as np\n'), ((2917, 2942), 'numpy.asarray', 'np.asarray', (['pil_images[0]'], {}), '(pil_images[0])\n', (2927, 2942), True, 'import numpy as np\n'), ((2944, 2969), 'numpy.asarray', 'np.asarray', (['pil_images[1]'], {}), '(pil_images[1])\n', (2954, 2969), True, 'import numpy as np\n'), ((3695, 3727), 'numpy.all', 'np.all', (['(batch_imgs[:, i, :] == 0)'], {}), '(batch_imgs[:, i, :] == 0)\n', (3701, 3727), True, 'import numpy as np\n'), ((3867, 3899), 'numpy.all', 'np.all', (['(batch_imgs[:, i, :] == 0)'], {}), '(batch_imgs[:, i, :] == 0)\n', (3873, 3899), True, 'import numpy as np\n'), ((4023, 4055), 'numpy.all', 'np.all', (['(batch_imgs[:, :, i] == 0)'], {}), '(batch_imgs[:, :, i] == 0)\n', (4029, 4055), True, 'import numpy as np\n'), ((4195, 4227), 'numpy.all', 'np.all', (['(batch_imgs[:, :, i] == 0)'], {}), '(batch_imgs[:, :, i] == 0)\n', (4201, 4227), True, 'import numpy as np\n'), ((5402, 5465), 'numpy.zeros', 'np.zeros', (['(batch_size * n_window, imgs.shape[1], imgs.shape[2])'], {}), '((batch_size * n_window, imgs.shape[1], imgs.shape[2]))\n', (5410, 5465), True, 'import numpy as np\n'), ((5900, 5932), 'numpy.all', 'np.all', (['(batch_imgs[:, i, :] == 0)'], {}), '(batch_imgs[:, i, :] == 0)\n', (5906, 5932), True, 'import numpy as np\n'), ((6072, 6104), 'numpy.all', 'np.all', (['(batch_imgs[:, i, :] == 0)'], {}), '(batch_imgs[:, i, :] == 0)\n', (6078, 6104), True, 'import numpy as np\n'), ((6228, 6260), 'numpy.all', 'np.all', (['(batch_imgs[:, :, i] == 0)'], {}), '(batch_imgs[:, :, i] == 0)\n', (6234, 6260), True, 'import numpy as np\n'), ((6400, 6432), 'numpy.all', 'np.all', (['(batch_imgs[:, :, i] == 0)'], {}), '(batch_imgs[:, :, i] == 0)\n', (6406, 6432), True, 'import numpy as np\n'), ((3151, 3175), 'numpy.prod', 'np.prod', (['labels[i].shape'], {}), '(labels[i].shape)\n', (3158, 3175), True, 'import numpy as np\n'), ((3452, 3481), 'random.uniform', 'random.uniform', (['(0)', 'total_prob'], {}), '(0, total_prob)\n', (3466, 3481), False, 'import random\n'), ((4328, 4351), 'math.ceil', 'math.ceil', (['(r * 1.0 / 16)'], {}), '(r * 1.0 / 16)\n', (4337, 4351), False, 'import math\n'), ((4374, 4397), 'math.ceil', 'math.ceil', (['(b * 1.0 / 16)'], {}), '(b * 1.0 / 16)\n', (4383, 4397), False, 'import math\n'), ((4617, 4651), 'numpy.expand_dims', 'np.expand_dims', (['batch_imgs'], {'axis': '(3)'}), '(batch_imgs, axis=3)\n', (4631, 4651), True, 'import numpy as np\n'), ((4652, 4678), 'numpy.expand_dims', 'np.expand_dims', (['lb'], {'axis': '(3)'}), '(lb, axis=3)\n', (4666, 4678), True, 'import numpy as np\n'), ((4899, 4923), 'numpy.prod', 'np.prod', (['labels[i].shape'], {}), '(labels[i].shape)\n', (4906, 4923), True, 'import numpy as np\n'), ((5200, 5229), 'random.uniform', 'random.uniform', (['(0)', 'total_prob'], {}), '(0, total_prob)\n', (5214, 5229), False, 'import random\n'), ((6533, 6556), 'math.ceil', 'math.ceil', (['(r * 1.0 / 16)'], {}), '(r * 1.0 / 16)\n', (6542, 6556), False, 'import math\n'), ((6579, 6602), 'math.ceil', 'math.ceil', (['(b * 1.0 / 16)'], {}), '(b * 1.0 / 16)\n', (6588, 6602), False, 'import math\n'), ((7023, 7085), 'numpy.rollaxis', 'np.rollaxis', (['batch_imgs[n_window * i:n_window * (i + 1)]', '(0)', '(3)'], {}), '(batch_imgs[n_window * i:n_window * (i + 1)], 0, 3)\n', (7034, 7085), True, 'import numpy as np\n'), ((7284, 7310), 'numpy.expand_dims', 'np.expand_dims', (['lb'], {'axis': '(3)'}), '(lb, axis=3)\n', (7298, 7310), True, 'import numpy as np\n'), ((3110, 3142), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels[i] == 1)'], {}), '(labels[i] == 1)\n', (3126, 3142), True, 'import numpy as np\n'), ((4858, 4890), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels[i] == 1)'], {}), '(labels[i] == 1)\n', (4874, 4890), True, 'import numpy as np\n'), ((10021, 10053), 'numpy.expand_dims', 'np.expand_dims', (['trn_imgs'], {'axis': '(3)'}), '(trn_imgs, axis=3)\n', (10035, 10053), True, 'import numpy as np\n'), ((10057, 10091), 'numpy.expand_dims', 'np.expand_dims', (['trn_labels'], {'axis': '(3)'}), '(trn_labels, axis=3)\n', (10071, 10091), True, 'import numpy as np\n'), ((10151, 10183), 'numpy.expand_dims', 'np.expand_dims', (['val_imgs'], {'axis': '(3)'}), '(val_imgs, axis=3)\n', (10165, 10183), True, 'import numpy as np\n'), ((10187, 10221), 'numpy.expand_dims', 'np.expand_dims', (['val_labels'], {'axis': '(3)'}), '(val_labels, axis=3)\n', (10201, 10221), True, 'import numpy as np\n'), ((824, 843), 'numpy.where', 'np.where', (['(y[j] == 1)'], {}), '(y[j] == 1)\n', (832, 843), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# pyre-ignore-all-errors[2,3,16,33,6,23]
# NOTE: most Any type in this file should be torch._C.Value - which was not yet annotated.
# pyre also doesn't work well with many Optional in this file
import typing
from collections import Counter, OrderedDict
from typing import Any, Callable, List, Optional
from numpy import prod
Handle = Callable[[List[Any], List[Any]], typing.Counter[str]]
def generic_activation_jit(
op_name: str,
) -> Callable[[List[Any], List[Any]], typing.Counter[str]]:
"""
This method return a handle that counts the number of activation from the
output shape for the specified operation.
Args:
op_name (str): The name of the operation.
Returns:
Callable: An activation handle for the given operation.
"""
def _generic_activation_jit(outputs: List[Any]) -> int:
"""
This is a generic jit handle that counts the number of activations for any
operation given the output shape.
Args:
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object.
Returns:
int: Total number of activations for each operation.
"""
out_shape = get_shape(outputs[0])
ac_count = prod(out_shape)
return ac_count
return lambda inputs, outputs: Counter({op_name: _generic_activation_jit(outputs)})
def get_shape(val: Any) -> Optional[List[int]]:
"""
Get the shapes from a jit value object.
Args:
val (torch._C.Value): jit value object.
Returns:
list(int): return a list of ints.
"""
if val.isCompleteTensor():
return val.type().sizes()
else:
return None
"""
Below are flop counters for various ops. Every counter has the following signature:
Args:
inputs (list(torch._C.Value)): The inputs of the op in the form of a list of jit object.
outputs (list(torch._C.Value)): The outputs of the op in the form of a list of jit object.
Returns:
Counter: A Counter dictionary that records the number of flops for each operation.
"""
def addmm_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for fully connected layers.
"""
# Count flop for nn.Linear
# inputs is a list of length 3.
input_shapes = [get_shape(v) for v in inputs[1:3]]
# input_shapes[0]: [batch size, input feature dimension]
# input_shapes[1]: [batch size, output feature dimension]
assert len(input_shapes[0]) == 2, input_shapes[0]
assert len(input_shapes[1]) == 2, input_shapes[1]
batch_size, input_dim = input_shapes[0]
output_dim = input_shapes[1][1]
flop = batch_size * input_dim * output_dim
flop_counter = Counter({"addmm": flop})
return flop_counter
def linear_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for the aten::linear operator.
"""
# Inputs is a list of length 3; unlike aten::addmm, it is the first
# two elements that are relevant.
input_shapes = [get_shape(v) for v in inputs[0:2]]
# input_shapes[0]: [dim0, dim1, ..., input_feature_dim]
# input_shapes[1]: [output_feature_dim, input_feature_dim]
assert input_shapes[0][-1] == input_shapes[1][-1]
flops = prod(input_shapes[0]) * input_shapes[1][0]
flop_counter = Counter({"linear": flops})
return flop_counter
def bmm_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for the bmm operation.
"""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two tensor.
assert len(inputs) == 2, len(inputs)
input_shapes = [get_shape(v) for v in inputs]
n, c, t = input_shapes[0]
d = input_shapes[-1][-1]
flop = n * c * t * d
flop_counter = Counter({"bmm": flop})
return flop_counter
def conv_flop_count(
x_shape: List[int], w_shape: List[int], out_shape: List[int]
) -> typing.Counter[str]:
"""
Count flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
batch_size, Cin_dim, Cout_dim = x_shape[0], w_shape[1], out_shape[1]
out_size = prod(out_shape[2:])
kernel_size = prod(w_shape[2:])
flop = batch_size * out_size * Cout_dim * Cin_dim * kernel_size
flop_counter = Counter({"conv": flop})
return flop_counter
def conv_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for convolution.
"""
# Inputs of Convolution should be a list of length 12 or 13. They represent:
# 0) input tensor, 1) convolution filter, 2) bias, 3) stride, 4) padding,
# 5) dilation, 6) transposed, 7) out_pad, 8) groups, 9) benchmark_cudnn,
# 10) deterministic_cudnn and 11) user_enabled_cudnn.
# starting with #40737 it will be 12) user_enabled_tf32
assert len(inputs) == 12 or len(inputs) == 13, len(inputs)
x, w = inputs[:2]
x_shape, w_shape, out_shape = (get_shape(x), get_shape(w), get_shape(outputs[0]))
return conv_flop_count(x_shape, w_shape, out_shape)
def einsum_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for the einsum operation. We currently support
two einsum operations: "nct,ncp->ntp" and "ntg,ncg->nct".
"""
# Inputs of einsum should be a list of length 2.
# Inputs[0] stores the equation used for einsum.
# Inputs[1] stores the list of input shapes.
assert len(inputs) == 2, len(inputs)
equation = inputs[0].toIValue()
# Get rid of white space in the equation string.
equation = equation.replace(" ", "")
# Re-map equation so that same equation with different alphabet
# representations will look the same.
letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys()
mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)}
equation = equation.translate(mapping)
input_shapes_jit = inputs[1].node().inputs()
input_shapes = [get_shape(v) for v in input_shapes_jit]
if equation == "abc,abd->acd":
n, c, t = input_shapes[0]
p = input_shapes[-1][-1]
flop = n * c * t * p
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abc,adc->adb":
n, t, g = input_shapes[0]
c = input_shapes[-1][1]
flop = n * t * g * c
flop_counter = Counter({"einsum": flop})
return flop_counter
else:
raise NotImplementedError("Unsupported einsum operation.")
def matmul_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for matmul.
"""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two matrices.
input_shapes = [get_shape(v) for v in inputs]
assert len(input_shapes) == 2, input_shapes
assert input_shapes[0][-1] == input_shapes[1][-2], input_shapes
flop = prod(input_shapes[0]) * input_shapes[-1][-1]
flop_counter = Counter({"matmul": flop})
return flop_counter
def norm_flop_counter(name: str, affine_arg_index: int) -> Handle:
"""
Args:
name: name to return in the counter
affine_arg_index: index of the affine argument in inputs
"""
def norm_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
Count flops for norm layers.
"""
# Inputs[0] contains the shape of the input.
input_shape = get_shape(inputs[0])
has_affine = get_shape(inputs[affine_arg_index]) is not None
assert 2 <= len(input_shape) <= 5, input_shape
# 5 is just a rough estimate
flop = prod(input_shape) * (5 if has_affine else 4)
return Counter({name: flop})
return norm_flop_jit
def elementwise_flop_counter(
name: str, input_scale: float = 1, output_scale: float = 0
) -> Handle:
"""
Count flops by
input_tensor.numel() * input_scale + output_tensor.numel() * output_scale
Args:
name: name to return in the counter
input_scale: scale of the input tensor (first argument)
output_scale: scale of the output tensor (first element in outputs)
"""
def elementwise_flop(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
ret = 0
if input_scale != 0:
shape = get_shape(inputs[0])
ret += input_scale * prod(shape)
if output_scale != 0:
shape = get_shape(outputs[0])
ret += output_scale * prod(shape)
return Counter({name: ret})
return elementwise_flop
| [
"collections.Counter",
"numpy.prod"
] | [((2811, 2835), 'collections.Counter', 'Counter', (["{'addmm': flop}"], {}), "({'addmm': flop})\n", (2818, 2835), False, 'from collections import Counter, OrderedDict\n'), ((3424, 3450), 'collections.Counter', 'Counter', (["{'linear': flops}"], {}), "({'linear': flops})\n", (3431, 3450), False, 'from collections import Counter, OrderedDict\n'), ((3897, 3919), 'collections.Counter', 'Counter', (["{'bmm': flop}"], {}), "({'bmm': flop})\n", (3904, 3919), False, 'from collections import Counter, OrderedDict\n'), ((4588, 4607), 'numpy.prod', 'prod', (['out_shape[2:]'], {}), '(out_shape[2:])\n', (4592, 4607), False, 'from numpy import prod\n'), ((4626, 4643), 'numpy.prod', 'prod', (['w_shape[2:]'], {}), '(w_shape[2:])\n', (4630, 4643), False, 'from numpy import prod\n'), ((4731, 4754), 'collections.Counter', 'Counter', (["{'conv': flop}"], {}), "({'conv': flop})\n", (4738, 4754), False, 'from collections import Counter, OrderedDict\n'), ((7412, 7437), 'collections.Counter', 'Counter', (["{'matmul': flop}"], {}), "({'matmul': flop})\n", (7419, 7437), False, 'from collections import Counter, OrderedDict\n'), ((1333, 1348), 'numpy.prod', 'prod', (['out_shape'], {}), '(out_shape)\n', (1337, 1348), False, 'from numpy import prod\n'), ((3362, 3383), 'numpy.prod', 'prod', (['input_shapes[0]'], {}), '(input_shapes[0])\n', (3366, 3383), False, 'from numpy import prod\n'), ((6607, 6632), 'collections.Counter', 'Counter', (["{'einsum': flop}"], {}), "({'einsum': flop})\n", (6614, 6632), False, 'from collections import Counter, OrderedDict\n'), ((7348, 7369), 'numpy.prod', 'prod', (['input_shapes[0]'], {}), '(input_shapes[0])\n', (7352, 7369), False, 'from numpy import prod\n'), ((8145, 8166), 'collections.Counter', 'Counter', (['{name: flop}'], {}), '({name: flop})\n', (8152, 8166), False, 'from collections import Counter, OrderedDict\n'), ((8966, 8986), 'collections.Counter', 'Counter', (['{name: ret}'], {}), '({name: ret})\n', (8973, 8986), False, 'from collections import Counter, OrderedDict\n'), ((6817, 6842), 'collections.Counter', 'Counter', (["{'einsum': flop}"], {}), "({'einsum': flop})\n", (6824, 6842), False, 'from collections import Counter, OrderedDict\n'), ((8085, 8102), 'numpy.prod', 'prod', (['input_shape'], {}), '(input_shape)\n', (8089, 8102), False, 'from numpy import prod\n'), ((8821, 8832), 'numpy.prod', 'prod', (['shape'], {}), '(shape)\n', (8825, 8832), False, 'from numpy import prod\n'), ((8939, 8950), 'numpy.prod', 'prod', (['shape'], {}), '(shape)\n', (8943, 8950), False, 'from numpy import prod\n')] |
import os
import uuid
import base64
import logging
import urllib.request
import time
import numpy as np
import yaml
import cv2
import paddle.fluid as fluid
from yolo_infer import offset_to_lengths
from yolo_infer import coco17_category_info, bbox2out
from yolo_infer import Preprocess
def temp_directory():
return os.path.abspath(os.path.join('.', 'data'))
COCO_MODEL_PATH = os.path.join(temp_directory(), "yolov3_darknet")
YOLO_CONFIG_PATH = os.path.join(COCO_MODEL_PATH, "yolo.yml")
LOCAL_TMP_PATH = "./tmp/"
class BoundingBox:
def __init__(self, x1, y1, x2, y2, score, label=None):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.score = score
self.label = label
def cv2base64(image):
try:
tmp_file_name = os.path.join(LOCAL_TMP_PATH, "%s.jpg" % uuid.uuid1())
cv2.imwrite(tmp_file_name, image)
with open(tmp_file_name, "rb") as f:
base64_data = base64.b64encode(f.read())
base64_data = base64_data.decode("utf-8")
return base64_data
except Exception as e:
err_msg = "Convert cv2 object to base64 failed: "
logging.error(err_msg, e, exc_info=True)
raise e
class YOLO_v3:
def __init__(self):
self.model_init = False
self.user_config = self.get_operator_config()
self.model_path = COCO_MODEL_PATH
self.config_path = YOLO_CONFIG_PATH
with open(self.config_path) as f:
self.conf = yaml.safe_load(f)
self.infer_prog, self.feed_var_names, self.fetch_targets = fluid.io.load_inference_model(
dirname=self.model_path,
executor=self.executor,
model_filename='__model__',
params_filename='__params__')
self.clsid2catid, self.catid2name = coco17_category_info(False)
self.execute(np.zeros((300, 300, 3), dtype='float32'))
def get_operator_config(self):
try:
config = {}
self.device_str = os.environ.get("device_id", "/cpu:0")
if "gpu" not in self.device_str.lower():
self.place = fluid.CPUPlace()
else:
gpu_device_id = int(self.device_str.split(':')[-1])
gpu_mem_limit = float(os.environ.get("gpu_mem_limit", 0.3))
os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = str(
gpu_mem_limit)
config["gpu_memory_limit"] = gpu_mem_limit
self.place = fluid.CUDAPlace(gpu_device_id)
self.executor = fluid.Executor(self.place)
return config
except Exception as e:
logging.error("unexpected error happen during read config",
exc_info=True)
raise e
def get_bboxes(self, bbox_results, threshold=0.5):
bboxes = [[]]
for item in bbox_results:
box, score, cls = item["bbox"], item["score"], item["category_id"]
idx = item["image_id"]
if score > threshold:
assert idx == 0, "get_bboxes function now must input image = 1"
bboxes[idx].append(BoundingBox(x1=box[0], y1=box[1],
x2=box[0] + box[2],
y2=box[1] + box[3],
score=score,
label=self.catid2name[int(cls)]))
return bboxes
@staticmethod
def get_obj_image(images, bboxes):
obj_images = []
for i, frame_bboxes in enumerate(bboxes):
frame_object = []
for j, bbox in enumerate(frame_bboxes):
tmp_obj = images[i][int(bbox.y1):int(
bbox.y2), int(bbox.x1):int(bbox.x2)]
frame_object.append(cv2base64(tmp_obj))
obj_images.append(frame_object)
return obj_images
def execute(self, image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_data = Preprocess(image,
self.conf['arch'],
self.conf['Preprocess'])
data_dict = {k: v for k, v in zip(self.feed_var_names, img_data)}
outs = self.executor.run(self.infer_prog,
feed=data_dict,
fetch_list=self.fetch_targets,
return_numpy=False)
out = outs[-1]
lod = out.lod()
lengths = offset_to_lengths(lod)
np_data = np.array(out)
res = {'bbox': (np_data, lengths), 'im_id': np.array([[0]])}
bbox_results = bbox2out([res], self.clsid2catid, False)
bboxes = self.get_bboxes(bbox_results, 0.5)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
objs = self.get_obj_image([image], bboxes)
return objs[0]
def bulk_execute(self, images):
objs = []
for image in images:
objs.append(self.execute(image))
return objs
@property
def name(self):
return "paddle_yolo"
@property
def type(self):
return "processor"
@property
def input(self):
return "image"
@property
def output(self):
return "images"
@property
def dimension(self):
return "-1"
@property
def metric_type(self):
return "-1"
def save_tmp_file(name, file_data=None, url=None):
start = time.time()
extension = 'jpg'
file_path = os.path.join(LOCAL_TMP_PATH, name + '.' + extension)
if file_data:
img_data = file_data.split(",")
if len(img_data) == 2:
posting = img_data[0]
data_type = posting.split("/")[1]
extension = data_type.split(";")[0]
encode_method = data_type.split(";")[1]
if encode_method != "base64":
logging.error("Encode method not base64")
raise
# raise DecodeError("Encode method not base64")
imgstring = img_data[1]
else:
imgstring = img_data[0]
file_path = os.path.join(LOCAL_TMP_PATH, name + '.' + extension)
with open(file_path, "wb") as f:
f.write(base64.b64decode(imgstring))
if url:
try:
urllib.request.urlretrieve(url, file_path)
except Exception as e:
logging.error("Download file from url error : %s", str(e), exc_info=True)
raise
# raise DownloadFileError("Download file from url %s" % url, e)
end = time.time()
logging.info(' save_tmp_file cost: {:.3f}s'.format(end - start))
return file_path
def run(detector, images, urls):
result_images = []
start = time.time()
try:
if images:
for img in images:
file_name = "{}-{}".format("processor", uuid.uuid4().hex)
image_path = save_tmp_file(file_name, file_data=img)
if image_path:
image = cv2.imread(image_path)
result_images.append(detector.execute(image))
else:
for url in urls:
file_name = "{}-{}".format("processor", uuid.uuid4().hex)
image_path = save_tmp_file(file_name, url=url)
if image_path:
image = cv2.imread(image_path)
result_images.append(detector.execute(image))
except Exception as e:
logging.error("something error: %s", str(e), exc_info=True)
pass
end = time.time()
logging.info('%s cost: {:.3f}s, get %d results'.format(end - start),
"yolov3 detector", len(result_images))
return result_images
| [
"yolo_infer.Preprocess",
"yolo_infer.coco17_category_info",
"yolo_infer.offset_to_lengths",
"yolo_infer.bbox2out",
"numpy.array",
"paddle.fluid.Executor",
"logging.error",
"paddle.fluid.CPUPlace",
"uuid.uuid4",
"uuid.uuid1",
"cv2.cvtColor",
"time.time",
"cv2.imread",
"cv2.imwrite",
"padd... | [((451, 492), 'os.path.join', 'os.path.join', (['COCO_MODEL_PATH', '"""yolo.yml"""'], {}), "(COCO_MODEL_PATH, 'yolo.yml')\n", (463, 492), False, 'import os\n'), ((5471, 5482), 'time.time', 'time.time', ([], {}), '()\n', (5480, 5482), False, 'import time\n'), ((5521, 5573), 'os.path.join', 'os.path.join', (['LOCAL_TMP_PATH', "(name + '.' + extension)"], {}), "(LOCAL_TMP_PATH, name + '.' + extension)\n", (5533, 5573), False, 'import os\n'), ((6579, 6590), 'time.time', 'time.time', ([], {}), '()\n', (6588, 6590), False, 'import time\n'), ((6752, 6763), 'time.time', 'time.time', ([], {}), '()\n', (6761, 6763), False, 'import time\n'), ((7560, 7571), 'time.time', 'time.time', ([], {}), '()\n', (7569, 7571), False, 'import time\n'), ((336, 361), 'os.path.join', 'os.path.join', (['"""."""', '"""data"""'], {}), "('.', 'data')\n", (348, 361), False, 'import os\n'), ((856, 889), 'cv2.imwrite', 'cv2.imwrite', (['tmp_file_name', 'image'], {}), '(tmp_file_name, image)\n', (867, 889), False, 'import cv2\n'), ((1584, 1725), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'self.model_path', 'executor': 'self.executor', 'model_filename': '"""__model__"""', 'params_filename': '"""__params__"""'}), "(dirname=self.model_path, executor=self.\n executor, model_filename='__model__', params_filename='__params__')\n", (1613, 1725), True, 'import paddle.fluid as fluid\n'), ((1814, 1841), 'yolo_infer.coco17_category_info', 'coco17_category_info', (['(False)'], {}), '(False)\n', (1834, 1841), False, 'from yolo_infer import coco17_category_info, bbox2out\n'), ((3983, 4021), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3995, 4021), False, 'import cv2\n'), ((4042, 4103), 'yolo_infer.Preprocess', 'Preprocess', (['image', "self.conf['arch']", "self.conf['Preprocess']"], {}), "(image, self.conf['arch'], self.conf['Preprocess'])\n", (4052, 4103), False, 'from yolo_infer import Preprocess\n'), ((4519, 4541), 'yolo_infer.offset_to_lengths', 'offset_to_lengths', (['lod'], {}), '(lod)\n', (4536, 4541), False, 'from yolo_infer import offset_to_lengths\n'), ((4560, 4573), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (4568, 4573), True, 'import numpy as np\n'), ((4667, 4707), 'yolo_infer.bbox2out', 'bbox2out', (['[res]', 'self.clsid2catid', '(False)'], {}), '([res], self.clsid2catid, False)\n', (4675, 4707), False, 'from yolo_infer import coco17_category_info, bbox2out\n'), ((4776, 4814), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4788, 4814), False, 'import cv2\n'), ((6135, 6187), 'os.path.join', 'os.path.join', (['LOCAL_TMP_PATH', "(name + '.' + extension)"], {}), "(LOCAL_TMP_PATH, name + '.' + extension)\n", (6147, 6187), False, 'import os\n'), ((1162, 1202), 'logging.error', 'logging.error', (['err_msg', 'e'], {'exc_info': '(True)'}), '(err_msg, e, exc_info=True)\n', (1175, 1202), False, 'import logging\n'), ((1498, 1515), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1512, 1515), False, 'import yaml\n'), ((1863, 1903), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {'dtype': '"""float32"""'}), "((300, 300, 3), dtype='float32')\n", (1871, 1903), True, 'import numpy as np\n'), ((2008, 2045), 'os.environ.get', 'os.environ.get', (['"""device_id"""', '"""/cpu:0"""'], {}), "('device_id', '/cpu:0')\n", (2022, 2045), False, 'import os\n'), ((2562, 2588), 'paddle.fluid.Executor', 'fluid.Executor', (['self.place'], {}), '(self.place)\n', (2576, 2588), True, 'import paddle.fluid as fluid\n'), ((4627, 4642), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (4635, 4642), True, 'import numpy as np\n'), ((834, 846), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (844, 846), False, 'import uuid\n'), ((2128, 2144), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2142, 2144), True, 'import paddle.fluid as fluid\n'), ((2503, 2533), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['gpu_device_id'], {}), '(gpu_device_id)\n', (2518, 2533), True, 'import paddle.fluid as fluid\n'), ((2658, 2732), 'logging.error', 'logging.error', (['"""unexpected error happen during read config"""'], {'exc_info': '(True)'}), "('unexpected error happen during read config', exc_info=True)\n", (2671, 2732), False, 'import logging\n'), ((5901, 5942), 'logging.error', 'logging.error', (['"""Encode method not base64"""'], {}), "('Encode method not base64')\n", (5914, 5942), False, 'import logging\n'), ((6249, 6276), 'base64.b64decode', 'base64.b64decode', (['imgstring'], {}), '(imgstring)\n', (6265, 6276), False, 'import base64\n'), ((2269, 2305), 'os.environ.get', 'os.environ.get', (['"""gpu_mem_limit"""', '(0.3)'], {}), "('gpu_mem_limit', 0.3)\n", (2283, 2305), False, 'import os\n'), ((7025, 7047), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (7035, 7047), False, 'import cv2\n'), ((7353, 7375), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (7363, 7375), False, 'import cv2\n'), ((6879, 6891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6889, 6891), False, 'import uuid\n'), ((7213, 7225), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7223, 7225), False, 'import uuid\n')] |
import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, optim
from model import MyAwesomeModel
class TrainOREvaluate(object):
""" Helper class that will help launch class methods as commands
from a single script
"""
def __init__(self):
parser = argparse.ArgumentParser(
description="Script for either training or evaluating",
usage="python main.py <command>"
)
parser.add_argument("command", help="Subcommand to run")
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def train(self):
loss_list = []
print("Training day and night")
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--lr', default=0.003)
# add any additional argument that you want
args = parser.parse_args(sys.argv[2:])
print(args)
# TODO: Implement training loop here
model = MyAwesomeModel()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=float(args.lr))
#print(os.listdir('./MyNetworkDay1/data/processed)
train_data, train_label = torch.load('../../data/processed/training.pt')
train_data = torch.unsqueeze(train_data,1)
trainloader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(*(train_data,train_label)), batch_size=64, shuffle=True)
#import pdb
#pdb.set_trace()
epochs = 5
steps = 0
model.train()
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
optimizer.zero_grad()
log_ps = model(images.float())
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
loss_list.append(running_loss/len(trainloader))
print(f"Training loss: {running_loss/len(trainloader)}")
plt.figure()
epoch = np.arange(len(loss_list))
print(len(loss_list))
print(epoch)
plt.plot(epoch, loss_list)
plt.legend(['Training loss'])
plt.xlabel('Epochs'), plt.ylabel('Loss')
plt.show()
plt.savefig('../../reports/figures/loss_curve')
torch.save(model, '../../models/model.pth')
def evaluate(self):
accuracy_list = []
print("Evaluating until hitting the ceiling")
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--load_model_from', default='../../models/model.pth')
# add any additional argument that you want
args = parser.parse_args(sys.argv[2:])
print(args)
# TODO: Implement evaluation logic here
if args.load_model_from:
model = torch.load(args.load_model_from)
test_data, test_label = torch.load('../../data/processed/test.pt')
test_data = torch.unsqueeze(test_data,1)
testloader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(*(test_data,test_label)), batch_size=64, shuffle=True)
model.eval()
with torch.no_grad():
for images, labels in testloader:
#images, labels = next(iter(testloader))
ps = torch.exp(model(images.float()))
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy = torch.mean(equals.type(torch.FloatTensor))
accuracy_list.append(accuracy.item()*100)
else:
print(f'Accuracy: {accuracy.item()*100}%')
epoch = np.arange(len(accuracy_list))
print("mean of accuracy = ", np.mean(accuracy_list))
plt.figure()
plt.plot(epoch, accuracy_list)
plt.legend(['Test set accuacy'])
plt.xlabel('Epochs'), plt.ylabel('Accuacy')
plt.show()
torch.save(model, '../../models/model.pth')
if __name__ == '__main__':
TrainOREvaluate()
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"model.MyAwesomeModel",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"torch.unsqueeze",
"torch.load",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"torch.utils.data.TensorDataset",
"matplotlib.pyplot.figure",
"torch.nn.NLLLoss",
"... | [((376, 499), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for either training or evaluating"""', 'usage': '"""python main.py <command>"""'}), "(description=\n 'Script for either training or evaluating', usage=\n 'python main.py <command>')\n", (399, 499), False, 'import argparse\n'), ((977, 1034), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training arguments"""'}), "(description='Training arguments')\n", (1000, 1034), False, 'import argparse\n'), ((1266, 1282), 'model.MyAwesomeModel', 'MyAwesomeModel', ([], {}), '()\n', (1280, 1282), False, 'from model import MyAwesomeModel\n'), ((1303, 1315), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (1313, 1315), False, 'from torch import nn, optim\n'), ((1479, 1525), 'torch.load', 'torch.load', (['"""../../data/processed/training.pt"""'], {}), "('../../data/processed/training.pt')\n", (1489, 1525), False, 'import torch\n'), ((1547, 1577), 'torch.unsqueeze', 'torch.unsqueeze', (['train_data', '(1)'], {}), '(train_data, 1)\n', (1562, 1577), False, 'import torch\n'), ((2364, 2376), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2374, 2376), True, 'import matplotlib.pyplot as plt\n'), ((2478, 2504), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'loss_list'], {}), '(epoch, loss_list)\n', (2486, 2504), True, 'import matplotlib.pyplot as plt\n'), ((2513, 2542), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training loss']"], {}), "(['Training loss'])\n", (2523, 2542), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2608, 2610), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2666), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../../reports/figures/loss_curve"""'], {}), "('../../reports/figures/loss_curve')\n", (2630, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2675, 2718), 'torch.save', 'torch.save', (['model', '"""../../models/model.pth"""'], {}), "(model, '../../models/model.pth')\n", (2685, 2718), False, 'import torch\n'), ((2842, 2899), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training arguments"""'}), "(description='Training arguments')\n", (2865, 2899), False, 'import argparse\n'), ((3277, 3319), 'torch.load', 'torch.load', (['"""../../data/processed/test.pt"""'], {}), "('../../data/processed/test.pt')\n", (3287, 3319), False, 'import torch\n'), ((3340, 3369), 'torch.unsqueeze', 'torch.unsqueeze', (['test_data', '(1)'], {}), '(test_data, 1)\n', (3355, 3369), False, 'import torch\n'), ((4166, 4178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4176, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4187, 4217), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'accuracy_list'], {}), '(epoch, accuracy_list)\n', (4195, 4217), True, 'import matplotlib.pyplot as plt\n'), ((4226, 4258), 'matplotlib.pyplot.legend', 'plt.legend', (["['Test set accuacy']"], {}), "(['Test set accuacy'])\n", (4236, 4258), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4327, 4329), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4381), 'torch.save', 'torch.save', (['model', '"""../../models/model.pth"""'], {}), "(model, '../../models/model.pth')\n", (4348, 4381), False, 'import torch\n'), ((1640, 1698), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['*(train_data, train_label)'], {}), '(*(train_data, train_label))\n', (1670, 1698), False, 'import torch\n'), ((2551, 2571), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2561, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2583, 2591), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3244), 'torch.load', 'torch.load', (['args.load_model_from'], {}), '(args.load_model_from)\n', (3222, 3244), False, 'import torch\n'), ((3431, 3487), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['*(test_data, test_label)'], {}), '(*(test_data, test_label))\n', (3461, 3487), False, 'import torch\n'), ((3551, 3566), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3564, 3566), False, 'import torch\n'), ((4134, 4156), 'numpy.mean', 'np.mean', (['accuracy_list'], {}), '(accuracy_list)\n', (4141, 4156), True, 'import numpy as np\n'), ((4267, 4287), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (4277, 4287), True, 'import matplotlib.pyplot as plt\n'), ((4289, 4310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuacy"""'], {}), "('Accuacy')\n", (4299, 4310), True, 'import matplotlib.pyplot as plt\n')] |
"""A selection of rate functions, i.e., *speed curves* for animations.
Please find a standard list at https://easings.net/. Here is a picture
for the non-standard ones
.. manim:: RateFuncExample
:save_last_frame:
class RateFuncExample(Scene):
def construct(self):
x = VGroup()
for k, v in rate_functions.__dict__.items():
if "function" in str(v):
if (
not k.startswith("__")
and not k.startswith("sqrt")
and not k.startswith("bezier")
):
try:
rate_func = v
plot = (
ParametricFunction(
lambda x: [x, rate_func(x), 0],
t_min=0,
t_max=1,
use_smoothing=False,
color=YELLOW,
)
.stretch_to_fit_width(1.5)
.stretch_to_fit_height(1)
)
plot_bg = SurroundingRectangle(plot).set_color(WHITE)
plot_title = (
Text(rate_func.__name__, weight=BOLD)
.scale(0.5)
.next_to(plot_bg, UP, buff=0.1)
)
x.add(VGroup(plot_bg, plot, plot_title))
except: # because functions `not_quite_there`, `function squish_rate_func` are not working.
pass
x.arrange_in_grid(cols=8)
x.height = config.frame_height
x.width = config.frame_width
x.move_to(ORIGIN).scale(0.95)
self.add(x)
There are primarily 3 kinds of standard easing functions:
#. Ease In - The animation has a smooth start.
#. Ease Out - The animation has a smooth end.
#. Ease In Out - The animation has a smooth start as well as smooth end.
.. note:: The standard functions are not exported, so to use them you do something like this:
rate_func=rate_functions.ease_in_sine
On the other hand, the non-standard functions, which are used more commonly, are exported and can be used directly.
.. manim:: RateFunctions1Example
class RateFunctions1Example(Scene):
def construct(self):
line1 = Line(3*LEFT, 3*RIGHT).shift(UP).set_color(RED)
line2 = Line(3*LEFT, 3*RIGHT).set_color(GREEN)
line3 = Line(3*LEFT, 3*RIGHT).shift(DOWN).set_color(BLUE)
dot1 = Dot().move_to(line1.get_left())
dot2 = Dot().move_to(line2.get_left())
dot3 = Dot().move_to(line3.get_left())
label1 = Tex("Ease In").next_to(line1, RIGHT)
label2 = Tex("Ease out").next_to(line2, RIGHT)
label3 = Tex("Ease In Out").next_to(line3, RIGHT)
self.play(
FadeIn(VGroup(line1, line2, line3)),
FadeIn(VGroup(dot1, dot2, dot3)),
Write(VGroup(label1, label2, label3)),
)
self.play(
MoveAlongPath(dot1, line1, rate_func=rate_functions.ease_in_sine),
MoveAlongPath(dot2, line2, rate_func=rate_functions.ease_out_sine),
MoveAlongPath(dot3, line3, rate_func=rate_functions.ease_in_out_sine),
run_time=7
)
self.wait()
"""
__all__ = [
"linear",
"smooth",
"rush_into",
"rush_from",
"slow_into",
"double_smooth",
"there_and_back",
"there_and_back_with_pause",
"running_start",
"not_quite_there",
"wiggle",
"squish_rate_func",
"lingering",
"exponential_decay",
]
import typing
from math import sqrt
import numpy as np
from ..utils.bezier import bezier
from ..utils.simple_functions import sigmoid
def linear(t: float) -> float:
return t
def smooth(t: float, inflection: float = 10.0) -> float:
error = sigmoid(-inflection / 2)
return min(
max((sigmoid(inflection * (t - 0.5)) - error) / (1 - 2 * error), 0),
1,
)
def rush_into(t: float, inflection: float = 10.0) -> float:
return 2 * smooth(t / 2.0, inflection)
def rush_from(t: float, inflection: float = 10.0) -> float:
return 2 * smooth(t / 2.0 + 0.5, inflection) - 1
def slow_into(t: float) -> float:
return np.sqrt(1 - (1 - t) * (1 - t))
def double_smooth(t: float) -> float:
if t < 0.5:
return 0.5 * smooth(2 * t)
else:
return 0.5 * (1 + smooth(2 * t - 1))
def there_and_back(t: float, inflection: float = 10.0) -> float:
new_t = 2 * t if t < 0.5 else 2 * (1 - t)
return smooth(new_t, inflection)
def there_and_back_with_pause(t: float, pause_ratio: float = 1.0 / 3) -> float:
a = 1.0 / pause_ratio
if t < 0.5 - pause_ratio / 2:
return smooth(a * t)
elif t < 0.5 + pause_ratio / 2:
return 1
else:
return smooth(a - a * t)
def running_start(
t: float, pull_factor: float = -0.5
) -> typing.Iterable: # what is func return type?
return bezier([0, 0, pull_factor, pull_factor, 1, 1, 1])(t)
def not_quite_there(
func: typing.Callable[[float], float] = smooth,
proportion: float = 0.7,
) -> typing.Callable[[float], float]:
def result(t):
return proportion * func(t)
return result
def wiggle(t: float, wiggles: float = 2) -> float:
return there_and_back(t) * np.sin(wiggles * np.pi * t)
def squish_rate_func(
func: typing.Callable[[float], float],
a: float = 0.4,
b: float = 0.6,
) -> typing.Callable[[float], float]:
def result(t):
if a == b:
return a
if t < a:
return func(0)
elif t > b:
return func(1)
else:
return func((t - a) / (b - a))
return result
# Stylistically, should this take parameters (with default values)?
# Ultimately, the functionality is entirely subsumed by squish_rate_func,
# but it may be useful to have a nice name for with nice default params for
# "lingering", different from squish_rate_func's default params
def lingering(t: float) -> float:
return squish_rate_func(lambda t: t, 0, 0.8)(t)
def exponential_decay(t: float, half_life: float = 0.1) -> float:
# The half-life should be rather small to minimize
# the cut-off error at the end
return 1 - np.exp(-t / half_life)
def ease_in_sine(t: float) -> float:
return 1 - np.cos((t * np.pi) / 2)
def ease_out_sine(t: float) -> float:
return np.sin((t * np.pi) / 2)
def ease_in_out_sine(t: float) -> float:
return -(np.cos(np.pi * t) - 1) / 2
def ease_in_quad(t: float) -> float:
return t * t
def ease_out_quad(t: float) -> float:
return 1 - (1 - t) * (1 - t)
def ease_in_out_quad(t: float) -> float:
return 2 * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 2) / 2
def ease_in_cubic(t: float) -> float:
return t * t * t
def ease_out_cubic(t: float) -> float:
return 1 - pow(1 - t, 3)
def ease_in_out_cubic(t: float) -> float:
return 4 * t * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 3) / 2
def ease_in_quart(t: float) -> float:
return t * t * t * t
def ease_out_quart(t: float) -> float:
return 1 - pow(1 - t, 4)
def ease_in_out_quart(t: float) -> float:
return 8 * t * t * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 4) / 2
def ease_in_quint(t: float) -> float:
return t * t * t * t * t
def ease_out_quint(t: float) -> float:
return 1 - pow(1 - t, 5)
def ease_in_out_quint(t: float) -> float:
return 16 * t * t * t * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 5) / 2
def ease_in_expo(t: float) -> float:
return 0 if t == 0 else pow(2, 10 * t - 10)
def ease_out_expo(t: float) -> float:
return 1 if t == 1 else 1 - pow(2, -10 * t)
def ease_in_out_expo(t: float) -> float:
if t == 0:
return 0
elif t == 1:
return 1
elif t < 0.5:
return pow(2, 20 * t - 10) / 2
else:
return 2 - pow(2, -20 * t + 10) / 2
def ease_in_circ(t: float) -> float:
return 1 - sqrt(1 - pow(t, 2))
def ease_out_circ(t: float) -> float:
return sqrt(1 - pow(t - 1, 2))
def ease_in_out_circ(t: float) -> float:
return (
(1 - sqrt(1 - pow(2 * t, 2))) / 2
if t < 0.5
else (sqrt(1 - pow(-2 * t + 2, 2)) + 1) / 2
)
def ease_in_back(t: float) -> float:
c1 = 1.70158
c3 = c1 + 1
return c3 * t * t * t - c1 * t * t
def ease_out_back(t: float) -> float:
c1 = 1.70158
c3 = c1 + 1
return 1 + c3 * pow(t - 1, 3) + c1 * pow(t - 1, 2)
def ease_in_out_back(t: float) -> float:
c1 = 1.70158
c2 = c1 * 1.525
return (
(pow(2 * t, 2) * ((c2 + 1) * 2 * t - c2)) / 2
if t < 0.5
else (pow(2 * t - 2, 2) * ((c2 + 1) * (t * 2 - 2) + c2) + 2) / 2
)
def ease_in_elastic(t: float) -> float:
c4 = (2 * np.pi) / 3
if t == 0:
return 0
elif t == 1:
return 1
else:
return -pow(2, 10 * t - 10) * np.sin((t * 10 - 10.75) * c4)
def ease_out_elastic(t: float) -> float:
c4 = (2 * np.pi) / 3
if t == 0:
return 0
elif t == 1:
return 1
else:
return pow(2, -10 * t) * np.sin((t * 10 - 0.75) * c4) + 1
def ease_in_out_elastic(t: float) -> float:
c5 = (2 * np.pi) / 4.5
if t == 0:
return 0
elif t == 1:
return 1
elif t < 0.5:
return -(pow(2, 20 * t - 10) * np.sin((20 * t - 11.125) * c5)) / 2
else:
return (pow(2, -20 * t + 10) * np.sin((20 * t - 11.125) * c5)) / 2 + 1
def ease_in_bounce(t: float) -> float:
return 1 - ease_out_bounce(1 - t)
def ease_out_bounce(t: float) -> float:
n1 = 7.5625
d1 = 2.75
if t < 1 / d1:
return n1 * t * t
elif t < 2 / d1:
return n1 * (t - 1.5 / d1) * t + 0.75
elif t < 2.5 / d1:
return n1 * (t - 2.25 / d1) * t + 0.9375
else:
return n1 * (t - 2.625 / d1) * t + 0.984375
def ease_in_out_bounce(t: float) -> float:
c1 = 1.70158
c2 = c1 * 1.525
return (
(pow(2 * t, 2) * ((c2 + 1) * 2 * t - c2)) / 2
if t < 0.5
else (pow(2 * t - 2, 2) * ((c2 + 1) * (t * 2 - 2) + c2) + 2) / 2
)
| [
"numpy.sin",
"numpy.exp",
"numpy.sqrt",
"numpy.cos"
] | [((4565, 4595), 'numpy.sqrt', 'np.sqrt', (['(1 - (1 - t) * (1 - t))'], {}), '(1 - (1 - t) * (1 - t))\n', (4572, 4595), True, 'import numpy as np\n'), ((6733, 6754), 'numpy.sin', 'np.sin', (['(t * np.pi / 2)'], {}), '(t * np.pi / 2)\n', (6739, 6754), True, 'import numpy as np\n'), ((5635, 5662), 'numpy.sin', 'np.sin', (['(wiggles * np.pi * t)'], {}), '(wiggles * np.pi * t)\n', (5641, 5662), True, 'import numpy as np\n'), ((6581, 6603), 'numpy.exp', 'np.exp', (['(-t / half_life)'], {}), '(-t / half_life)\n', (6587, 6603), True, 'import numpy as np\n'), ((6658, 6679), 'numpy.cos', 'np.cos', (['(t * np.pi / 2)'], {}), '(t * np.pi / 2)\n', (6664, 6679), True, 'import numpy as np\n'), ((6813, 6830), 'numpy.cos', 'np.cos', (['(np.pi * t)'], {}), '(np.pi * t)\n', (6819, 6830), True, 'import numpy as np\n'), ((9214, 9243), 'numpy.sin', 'np.sin', (['((t * 10 - 10.75) * c4)'], {}), '((t * 10 - 10.75) * c4)\n', (9220, 9243), True, 'import numpy as np\n'), ((9421, 9449), 'numpy.sin', 'np.sin', (['((t * 10 - 0.75) * c4)'], {}), '((t * 10 - 0.75) * c4)\n', (9427, 9449), True, 'import numpy as np\n'), ((9650, 9680), 'numpy.sin', 'np.sin', (['((20 * t - 11.125) * c5)'], {}), '((20 * t - 11.125) * c5)\n', (9656, 9680), True, 'import numpy as np\n'), ((9735, 9765), 'numpy.sin', 'np.sin', (['((20 * t - 11.125) * c5)'], {}), '((20 * t - 11.125) * c5)\n', (9741, 9765), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from numpy.fft import rfft, rfftfreq
from scipy import signal
import python_speech_features as psf
from matplotlib import cm
def plot_mfcc(sample_rate, sig):
"""
calculates mfcc features of single data point
"""
mfcc_feat = psf.mfcc(sig, samplerate=sample_rate)
print("LOG: ", mfcc_feat)
ig, ax = plt.subplots()
mfcc_data= np.swapaxes(mfcc_feat, 0 ,1)
cax = ax.imshow(mfcc_data, interpolation='nearest', cmap=cm.coolwarm, origin='lower', aspect='auto')
ax.set_title('MFCC')
#Showing mfcc_data
plt.show()
#Showing mfcc_feat
plt.plot(mfcc_feat)
plt.show()
def plot_raw_signal(sig):
""" plot signal over time """
plt.title('Signal Wave')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.plot(np.arange(len(sig)),signal)
plt.show()
def plot_spectogram(sig, samplerate):
"""-"""
f, t, Sxx = signal.spectrogram(sig, samplerate)
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()
def plot_mel_scale():
""" plot Mel scale to Hertz scale ratio """
fs = np.arange(20000)
ms = 2595 * np.log10(1 + fs/700)
plt.title('Mel/Hz ratio')
plt.xlabel('Hertz scale')
plt.ylabel('Mel scale')
plt.plot(fs,ms)
plt.grid(True)
plt.show()
pass
def basic_plots(sig, samplerate):
plot_mfcc(samplerate, sig)
plt.figure(1)
plt.subplot(211)
plt.title('Signal Wave')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.plot(np.arange(len(sig)),sig)
plt.subplot(212)
plt.title('Spectogram')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
f, t, Sxx = signal.spectrogram(sig, samplerate, nperseg=1024)
# plt.ylim(5000)
plt.pcolormesh(t, f, Sxx)
#
plt.show()
| [
"matplotlib.pyplot.grid",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"scipy.signal.spectrogram",
"matplotlib.pyplot.pcolormesh",
"python_speech_features.mfcc",
"numpy.swapaxes",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplo... | [((293, 330), 'python_speech_features.mfcc', 'psf.mfcc', (['sig'], {'samplerate': 'sample_rate'}), '(sig, samplerate=sample_rate)\n', (301, 330), True, 'import python_speech_features as psf\n'), ((374, 388), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (386, 388), True, 'import matplotlib.pyplot as plt\n'), ((404, 432), 'numpy.swapaxes', 'np.swapaxes', (['mfcc_feat', '(0)', '(1)'], {}), '(mfcc_feat, 0, 1)\n', (415, 432), True, 'import numpy as np\n'), ((590, 600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((628, 647), 'matplotlib.pyplot.plot', 'plt.plot', (['mfcc_feat'], {}), '(mfcc_feat)\n', (636, 647), True, 'import matplotlib.pyplot as plt\n'), ((652, 662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (660, 662), True, 'import matplotlib.pyplot as plt\n'), ((728, 752), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal Wave"""'], {}), "('Signal Wave')\n", (737, 752), True, 'import matplotlib.pyplot as plt\n'), ((757, 775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (767, 775), True, 'import matplotlib.pyplot as plt\n'), ((780, 803), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (790, 803), True, 'import matplotlib.pyplot as plt\n'), ((849, 859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (857, 859), True, 'import matplotlib.pyplot as plt\n'), ((927, 962), 'scipy.signal.spectrogram', 'signal.spectrogram', (['sig', 'samplerate'], {}), '(sig, samplerate)\n', (945, 962), False, 'from scipy import signal\n'), ((967, 992), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['t', 'f', 'Sxx'], {}), '(t, f, Sxx)\n', (981, 992), True, 'import matplotlib.pyplot as plt\n'), ((997, 1025), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (1007, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1054), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [sec]"""'], {}), "('Time [sec]')\n", (1040, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1069), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1067, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1167), 'numpy.arange', 'np.arange', (['(20000)'], {}), '(20000)\n', (1160, 1167), True, 'import numpy as np\n'), ((1209, 1234), 'matplotlib.pyplot.title', 'plt.title', (['"""Mel/Hz ratio"""'], {}), "('Mel/Hz ratio')\n", (1218, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1264), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hertz scale"""'], {}), "('Hertz scale')\n", (1249, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mel scale"""'], {}), "('Mel scale')\n", (1279, 1292), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1313), 'matplotlib.pyplot.plot', 'plt.plot', (['fs', 'ms'], {}), '(fs, ms)\n', (1305, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1331), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1325, 1331), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1344, 1346), True, 'import matplotlib.pyplot as plt\n'), ((1429, 1442), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1439, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1463), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1458, 1463), True, 'import matplotlib.pyplot as plt\n'), ((1468, 1492), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal Wave"""'], {}), "('Signal Wave')\n", (1477, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1515), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (1507, 1515), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1543), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (1530, 1543), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1603), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1598, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1631), 'matplotlib.pyplot.title', 'plt.title', (['"""Spectogram"""'], {}), "('Spectogram')\n", (1617, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (1646, 1664), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1693), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [sec]"""'], {}), "('Time [sec]')\n", (1679, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1759), 'scipy.signal.spectrogram', 'signal.spectrogram', (['sig', 'samplerate'], {'nperseg': '(1024)'}), '(sig, samplerate, nperseg=1024)\n', (1728, 1759), False, 'from scipy import signal\n'), ((1785, 1810), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['t', 'f', 'Sxx'], {}), '(t, f, Sxx)\n', (1799, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1829, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1206), 'numpy.log10', 'np.log10', (['(1 + fs / 700)'], {}), '(1 + fs / 700)\n', (1192, 1206), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
RCCSD for real integrals
8-fold permutation symmetry has been used
(ij|kl) = (ji|kl) = (kl|ij) = ...
'''
import ctypes
from functools import reduce
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.cc import _ccsd
from pyscf.mp.mp2 import get_nocc, get_nmo, get_frozen_mask, _mo_without_core
from pyscf import __config__
BLKMIN = getattr(__config__, 'cc_ccsd_blkmin', 4)
MEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)
# t1: ia
# t2: ijab
def kernel(mycc, eris=None, t1=None, t2=None, max_cycle=50, tol=1e-8,
tolnormt=1e-6, verbose=None):
log = logger.new_logger(mycc, verbose)
if eris is None:
eris = mycc.ao2mo(mycc.mo_coeff)
if t1 is None and t2 is None:
t1, t2 = mycc.get_init_guess(eris)
elif t2 is None:
t2 = mycc.get_init_guess(eris)[1]
cput1 = cput0 = (logger.process_clock(), logger.perf_counter())
eold = 0
eccsd = mycc.energy(t1, t2, eris)
log.info('Init E_corr(CCSD) = %.15g', eccsd)
if isinstance(mycc.diis, lib.diis.DIIS):
adiis = mycc.diis
elif mycc.diis:
adiis = lib.diis.DIIS(mycc, mycc.diis_file, incore=mycc.incore_complete)
adiis.space = mycc.diis_space
else:
adiis = None
conv = False
for istep in range(max_cycle):
t1new, t2new = mycc.update_amps(t1, t2, eris)
tmpvec = mycc.amplitudes_to_vector(t1new, t2new)
tmpvec -= mycc.amplitudes_to_vector(t1, t2)
normt = numpy.linalg.norm(tmpvec)
tmpvec = None
if mycc.iterative_damping < 1.0:
alpha = mycc.iterative_damping
t1new = (1-alpha) * t1 + alpha * t1new
t2new *= alpha
t2new += (1-alpha) * t2
t1, t2 = t1new, t2new
t1new = t2new = None
t1, t2 = mycc.run_diis(t1, t2, istep, normt, eccsd-eold, adiis)
eold, eccsd = eccsd, mycc.energy(t1, t2, eris)
log.info('cycle = %d E_corr(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g',
istep+1, eccsd, eccsd - eold, normt)
cput1 = log.timer('CCSD iter', *cput1)
if abs(eccsd-eold) < tol and normt < tolnormt:
conv = True
break
log.timer('CCSD', *cput0)
return conv, eccsd, t1, t2
def update_amps(mycc, t1, t2, eris):
if mycc.cc2:
raise NotImplementedError
assert(isinstance(eris, _ChemistsERIs))
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
fock = eris.fock
mo_e_o = eris.mo_energy[:nocc]
mo_e_v = eris.mo_energy[nocc:] + mycc.level_shift
t1new = numpy.zeros_like(t1)
t2new = mycc._add_vvvv(t1, t2, eris, t2sym='jiba')
t2new *= .5 # *.5 because t2+t2.transpose(1,0,3,2) in the end
time1 = log.timer_debug1('vvvv', *time0)
#** make_inter_F
fov = fock[:nocc,nocc:].copy()
t1new += fov
foo = fock[:nocc,:nocc] - numpy.diag(mo_e_o)
foo += .5 * numpy.einsum('ia,ja->ij', fock[:nocc,nocc:], t1)
fvv = fock[nocc:,nocc:] - numpy.diag(mo_e_v)
fvv -= .5 * numpy.einsum('ia,ib->ab', t1, fock[:nocc,nocc:])
if mycc.incore_complete:
fswap = None
else:
fswap = lib.H5TmpFile()
fwVOov, fwVooV = _add_ovvv_(mycc, t1, t2, eris, fvv, t1new, t2new, fswap)
time1 = log.timer_debug1('ovvv', *time1)
woooo = numpy.asarray(eris.oooo).transpose(0,2,1,3).copy()
unit = nocc**2*nvir*7 + nocc**3 + nocc*nvir**2
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
blksize = min(nvir, max(BLKMIN, int((max_memory*.9e6/8-nocc**4)/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
for p0, p1 in lib.prange(0, nvir, blksize):
wVOov = fwVOov[p0:p1]
wVooV = fwVooV[p0:p1]
eris_ovoo = eris.ovoo[:,p0:p1]
eris_oovv = numpy.empty((nocc,nocc,p1-p0,nvir))
def load_oovv(p0, p1):
eris_oovv[:] = eris.oovv[:,:,p0:p1]
with lib.call_in_background(load_oovv, sync=not mycc.async_io) as prefetch_oovv:
#:eris_oovv = eris.oovv[:,:,p0:p1]
prefetch_oovv(p0, p1)
foo += numpy.einsum('kc,kcji->ij', 2*t1[:,p0:p1], eris_ovoo)
foo += numpy.einsum('kc,icjk->ij', -t1[:,p0:p1], eris_ovoo)
tmp = lib.einsum('la,jaik->lkji', t1[:,p0:p1], eris_ovoo)
woooo += tmp + tmp.transpose(1,0,3,2)
tmp = None
wVOov -= lib.einsum('jbik,ka->bjia', eris_ovoo, t1)
t2new[:,:,p0:p1] += wVOov.transpose(1,2,0,3)
wVooV += lib.einsum('kbij,ka->bija', eris_ovoo, t1)
eris_ovoo = None
load_oovv = prefetch_oovv = None
eris_ovvo = numpy.empty((nocc,p1-p0,nvir,nocc))
def load_ovvo(p0, p1):
eris_ovvo[:] = eris.ovvo[:,p0:p1]
with lib.call_in_background(load_ovvo, sync=not mycc.async_io) as prefetch_ovvo:
#:eris_ovvo = eris.ovvo[:,p0:p1]
prefetch_ovvo(p0, p1)
t1new[:,p0:p1] -= numpy.einsum('jb,jiab->ia', t1, eris_oovv)
wVooV -= eris_oovv.transpose(2,0,1,3)
wVOov += wVooV*.5 #: bjia + bija*.5
load_ovvo = prefetch_ovvo = None
t2new[:,:,p0:p1] += (eris_ovvo*0.5).transpose(0,3,1,2)
eris_voov = eris_ovvo.conj().transpose(1,0,3,2)
t1new[:,p0:p1] += 2*numpy.einsum('jb,aijb->ia', t1, eris_voov)
eris_ovvo = None
tmp = lib.einsum('ic,kjbc->ibkj', t1, eris_oovv)
tmp += lib.einsum('bjkc,ic->jbki', eris_voov, t1)
t2new[:,:,p0:p1] -= lib.einsum('ka,jbki->jiba', t1, tmp)
eris_oovv = tmp = None
fov[:,p0:p1] += numpy.einsum('kc,aikc->ia', t1, eris_voov) * 2
fov[:,p0:p1] -= numpy.einsum('kc,akic->ia', t1, eris_voov)
tau = numpy.einsum('ia,jb->ijab', t1[:,p0:p1]*.5, t1)
tau += t2[:,:,p0:p1]
theta = tau.transpose(1,0,2,3) * 2
theta -= tau
fvv -= lib.einsum('cjia,cjib->ab', theta.transpose(2,1,0,3), eris_voov)
foo += lib.einsum('aikb,kjab->ij', eris_voov, theta)
tau = theta = None
tau = t2[:,:,p0:p1] + numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
woooo += lib.einsum('ijab,aklb->ijkl', tau, eris_voov)
tau = None
def update_wVooV(q0, q1, tau):
wVooV[:] += lib.einsum('bkic,jkca->bija', eris_voov[:,:,:,q0:q1], tau)
with lib.call_in_background(update_wVooV, sync=not mycc.async_io) as update_wVooV:
for q0, q1 in lib.prange(0, nvir, blksize):
tau = t2[:,:,q0:q1] * .5
tau += numpy.einsum('ia,jb->ijab', t1[:,q0:q1], t1)
#:wVooV += lib.einsum('bkic,jkca->bija', eris_voov[:,:,:,q0:q1], tau)
update_wVooV(q0, q1, tau)
tau = update_wVooV = None
def update_t2(q0, q1, tmp):
t2new[:,:,q0:q1] += tmp.transpose(2,0,1,3)
tmp *= .5
t2new[:,:,q0:q1] += tmp.transpose(0,2,1,3)
with lib.call_in_background(update_t2, sync=not mycc.async_io) as update_t2:
for q0, q1 in lib.prange(0, nvir, blksize):
tmp = lib.einsum('jkca,ckib->jaib', t2[:,:,p0:p1,q0:q1], wVooV)
#:t2new[:,:,q0:q1] += tmp.transpose(2,0,1,3)
#:tmp *= .5
#:t2new[:,:,q0:q1] += tmp.transpose(0,2,1,3)
update_t2(q0, q1, tmp)
tmp = None
wVOov += eris_voov
eris_VOov = -.5 * eris_voov.transpose(0,2,1,3)
eris_VOov += eris_voov
eris_voov = None
def update_wVOov(q0, q1, tau):
wVOov[:,:,:,q0:q1] += .5 * lib.einsum('aikc,kcjb->aijb', eris_VOov, tau)
with lib.call_in_background(update_wVOov, sync=not mycc.async_io) as update_wVOov:
for q0, q1 in lib.prange(0, nvir, blksize):
tau = t2[:,:,q0:q1].transpose(1,3,0,2) * 2
tau -= t2[:,:,q0:q1].transpose(0,3,1,2)
tau -= numpy.einsum('ia,jb->ibja', t1[:,q0:q1]*2, t1)
#:wVOov[:,:,:,q0:q1] += .5 * lib.einsum('aikc,kcjb->aijb', eris_VOov, tau)
update_wVOov(q0, q1, tau)
tau = None
def update_t2(q0, q1, theta):
t2new[:,:,q0:q1] += lib.einsum('kica,ckjb->ijab', theta, wVOov)
with lib.call_in_background(update_t2, sync=not mycc.async_io) as update_t2:
for q0, q1 in lib.prange(0, nvir, blksize):
theta = t2[:,:,p0:p1,q0:q1] * 2
theta -= t2[:,:,p0:p1,q0:q1].transpose(1,0,2,3)
#:t2new[:,:,q0:q1] += lib.einsum('kica,ckjb->ijab', theta, wVOov)
update_t2(q0, q1, theta)
theta = None
eris_VOov = wVOov = wVooV = update_wVOov = None
time1 = log.timer_debug1('voov [%d:%d]'%(p0, p1), *time1)
fwVOov = fwVooV = fswap = None
for p0, p1 in lib.prange(0, nvir, blksize):
theta = t2[:,:,p0:p1].transpose(1,0,2,3) * 2 - t2[:,:,p0:p1]
t1new += numpy.einsum('jb,ijba->ia', fov[:,p0:p1], theta)
t1new -= lib.einsum('jbki,kjba->ia', eris.ovoo[:,p0:p1], theta)
tau = numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
tau += t2[:,:,p0:p1]
t2new[:,:,p0:p1] += .5 * lib.einsum('ijkl,klab->ijab', woooo, tau)
theta = tau = None
ft_ij = foo + numpy.einsum('ja,ia->ij', .5*t1, fov)
ft_ab = fvv - numpy.einsum('ia,ib->ab', .5*t1, fov)
t2new += lib.einsum('ijac,bc->ijab', t2, ft_ab)
t2new -= lib.einsum('ki,kjab->ijab', ft_ij, t2)
eia = mo_e_o[:,None] - mo_e_v
t1new += numpy.einsum('ib,ab->ia', t1, fvv)
t1new -= numpy.einsum('ja,ji->ia', t1, foo)
t1new /= eia
#: t2new = t2new + t2new.transpose(1,0,3,2)
for i in range(nocc):
if i > 0:
t2new[i,:i] += t2new[:i,i].transpose(0,2,1)
t2new[i,:i] /= lib.direct_sum('a,jb->jab', eia[i], eia[:i])
t2new[:i,i] = t2new[i,:i].transpose(0,2,1)
t2new[i,i] = t2new[i,i] + t2new[i,i].T
t2new[i,i] /= lib.direct_sum('a,b->ab', eia[i], eia[i])
time0 = log.timer_debug1('update t1 t2', *time0)
return t1new, t2new
def _add_ovvv_(mycc, t1, t2, eris, fvv, t1new, t2new, fswap):
time1 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
nvir_pair = nvir * (nvir+1) // 2
if fswap is None:
wVOov = numpy.zeros((nvir,nocc,nocc,nvir))
else:
wVOov = fswap.create_dataset('wVOov', (nvir,nocc,nocc,nvir), 'f8')
wooVV = numpy.zeros((nocc,nocc*nvir_pair))
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc*nvir**2*3 + nocc**2*nvir + 2
blksize = min(nvir, max(BLKMIN, int((max_memory*.95e6/8-wooVV.size)/unit)))
if not mycc.direct:
unit = nocc*nvir**2*3 + nocc**2*nvir + 2 + nocc*nvir**2 + nocc*nvir
blksize = min(nvir, max(BLKMIN, int((max_memory*.95e6/8-wooVV.size-nocc**2*nvir)/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
def load_ovvv(buf, p0):
if p0 < nvir:
p1 = min(nvir, p0+blksize)
buf[:p1-p0] = eris.ovvv[:,p0:p1].transpose(1,0,2)
with lib.call_in_background(load_ovvv, sync=not mycc.async_io) as prefetch:
buf = numpy.empty((blksize,nocc,nvir_pair))
buf_prefetch = numpy.empty((blksize,nocc,nvir_pair))
load_ovvv(buf_prefetch, 0)
for p0, p1 in lib.prange(0, nvir, blksize):
buf, buf_prefetch = buf_prefetch, buf
prefetch(buf_prefetch, p1)
eris_vovv = buf[:p1-p0]
#:wooVV -= numpy.einsum('jc,ciba->jiba', t1[:,p0:p1], eris_vovv)
lib.ddot(numpy.asarray(t1[:,p0:p1], order='C'),
eris_vovv.reshape(p1-p0,-1), -1, wooVV, 1)
eris_vovv = lib.unpack_tril(eris_vovv.reshape((p1-p0)*nocc,nvir_pair))
eris_vovv = eris_vovv.reshape(p1-p0,nocc,nvir,nvir)
fvv += 2*numpy.einsum('kc,ckab->ab', t1[:,p0:p1], eris_vovv)
fvv[:,p0:p1] -= numpy.einsum('kc,bkca->ab', t1, eris_vovv)
if not mycc.direct:
vvvo = eris_vovv.transpose(0,2,3,1).copy()
for i in range(nocc):
tau = t2[i,:,p0:p1] + numpy.einsum('a,jb->jab', t1[i,p0:p1], t1)
tmp = lib.einsum('jcd,cdbk->jbk', tau, vvvo)
t2new[i] -= lib.einsum('ka,jbk->jab', t1, tmp)
tau = tmp = None
wVOov[p0:p1] = lib.einsum('biac,jc->bija', eris_vovv, t1)
theta = t2[:,:,p0:p1].transpose(1,2,0,3) * 2
theta -= t2[:,:,p0:p1].transpose(0,2,1,3)
t1new += lib.einsum('icjb,cjba->ia', theta, eris_vovv)
theta = None
time1 = log.timer_debug1('vovv [%d:%d]'%(p0, p1), *time1)
if fswap is None:
wooVV = lib.unpack_tril(wooVV.reshape(nocc**2,nvir_pair))
return wVOov, wooVV.reshape(nocc,nocc,nvir,nvir).transpose(2,1,0,3)
else:
fswap.create_dataset('wVooV', (nvir,nocc,nocc,nvir), 'f8')
wooVV = wooVV.reshape(nocc,nocc,nvir_pair)
tril2sq = lib.square_mat_in_trilu_indices(nvir)
for p0, p1 in lib.prange(0, nvir, blksize):
fswap['wVooV'][p0:p1] = wooVV[:,:,tril2sq[p0:p1]].transpose(2,1,0,3)
return fswap['wVOov'], fswap['wVooV']
def _add_vvvv(mycc, t1, t2, eris, out=None, with_ovvv=None, t2sym=None):
'''t2sym: whether t2 has the symmetry t2[ijab]==t2[jiba] or
t2[ijab]==-t2[jiab] or t2[ijab]==-t2[jiba]
'''
#TODO: Guess the symmetry of t2 amplitudes
#if t2sym is None:
# if t2.shape[0] != t2.shape[1]:
# t2sym = ''
# elif abs(t2-t2.transpose(1,0,3,2)).max() < 1e-12:
# t2sym = 'jiba'
# elif abs(t2+t2.transpose(1,0,2,3)).max() < 1e-12:
# t2sym = '-jiab'
# elif abs(t2+t2.transpose(1,0,3,2)).max() < 1e-12:
# t2sym = '-jiba'
if t2sym in ('jiba', '-jiba', '-jiab'):
Ht2tril = _add_vvvv_tril(mycc, t1, t2, eris, with_ovvv=with_ovvv)
nocc, nvir = t2.shape[1:3]
Ht2 = _unpack_t2_tril(Ht2tril, nocc, nvir, out, t2sym)
else:
Ht2 = _add_vvvv_full(mycc, t1, t2, eris, out, with_ovvv)
return Ht2
def _add_vvvv_tril(mycc, t1, t2, eris, out=None, with_ovvv=None):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
Using symmetry t2[ijab] = t2[jiba] and Ht2[ijab] = Ht2[jiba], compute the
lower triangular part of Ht2
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
if with_ovvv is None:
with_ovvv = mycc.direct
nocc, nvir = t2.shape[1:3]
nocc2 = nocc*(nocc+1)//2
if t1 is None:
tau = t2[numpy.tril_indices(nocc)]
else:
tau = numpy.empty((nocc2,nvir,nvir), dtype=t2.dtype)
p1 = 0
for i in range(nocc):
p0, p1 = p1, p1 + i+1
tau[p0:p1] = numpy.einsum('a,jb->jab', t1[i], t1[:i+1])
tau[p0:p1] += t2[i,:i+1]
if mycc.direct: # AO-direct CCSD
mo = getattr(eris, 'mo_coeff', None)
if mo is None: # If eris does not have the attribute mo_coeff
mo = _mo_without_core(mycc, mycc.mo_coeff)
nao, nmo = mo.shape
aos = numpy.asarray(mo[:,nocc:].T, order='F')
tau = _ao2mo.nr_e2(tau.reshape(nocc2,nvir**2), aos, (0,nao,0,nao), 's1', 's1')
tau = tau.reshape(nocc2,nao,nao)
time0 = log.timer_debug1('vvvv-tau', *time0)
buf = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
buf = buf.reshape(nocc2,nao,nao)
Ht2tril = _ao2mo.nr_e2(buf, mo.conj(), (nocc,nmo,nocc,nmo), 's1', 's1')
Ht2tril = Ht2tril.reshape(nocc2,nvir,nvir)
if with_ovvv:
#: tmp = numpy.einsum('ijcd,ka,kdcb->ijba', tau, t1, eris.ovvv)
#: t2new -= tmp + tmp.transpose(1,0,3,2)
tmp = _ao2mo.nr_e2(buf, mo.conj(), (nocc,nmo,0,nocc), 's1', 's1')
Ht2tril -= lib.ddot(tmp.reshape(nocc2*nvir,nocc), t1).reshape(nocc2,nvir,nvir)
tmp = _ao2mo.nr_e2(buf, mo.conj(), (0,nocc,nocc,nmo), 's1', 's1')
#: Ht2tril -= numpy.einsum('xkb,ka->xab', tmp.reshape(-1,nocc,nvir), t1)
tmp = lib.transpose(tmp.reshape(nocc2,nocc,nvir), axes=(0,2,1), out=buf)
tmp = lib.ddot(tmp.reshape(nocc2*nvir,nocc), t1, 1,
numpy.ndarray((nocc2*nvir,nvir), buffer=tau), 0)
tmp = lib.transpose(tmp.reshape(nocc2,nvir,nvir), axes=(0,2,1), out=buf)
Ht2tril -= tmp.reshape(nocc2,nvir,nvir)
else:
assert(not with_ovvv)
Ht2tril = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
return Ht2tril
def _add_vvvv_full(mycc, t1, t2, eris, out=None, with_ovvv=False):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
without using symmetry t2[ijab] = t2[jiba] in t2 or Ht2
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
if t1 is None:
tau = t2
else:
tau = numpy.einsum('ia,jb->ijab', t1, t1)
tau += t2
if mycc.direct: # AO-direct CCSD
if with_ovvv:
raise NotImplementedError
mo = getattr(eris, 'mo_coeff', None)
if mo is None: # If eris does not have the attribute mo_coeff
mo = _mo_without_core(mycc, mycc.mo_coeff)
nocc, nvir = t2.shape[1:3]
nao, nmo = mo.shape
aos = numpy.asarray(mo[:,nocc:].T, order='F')
tau = _ao2mo.nr_e2(tau.reshape(nocc**2,nvir,nvir), aos, (0,nao,0,nao), 's1', 's1')
tau = tau.reshape(nocc,nocc,nao,nao)
time0 = log.timer_debug1('vvvv-tau mo2ao', *time0)
buf = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
buf = buf.reshape(nocc**2,nao,nao)
Ht2 = _ao2mo.nr_e2(buf, mo.conj(), (nocc,nmo,nocc,nmo), 's1', 's1')
else:
assert(not with_ovvv)
Ht2 = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
return Ht2.reshape(t2.shape)
def _contract_vvvv_t2(mycc, mol, vvvv, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acbd->ijab', t2, vvvv)
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
if vvvv is None or len(vvvv.shape) == 2:
# AO-direct or vvvv in 4-fold symmetry
return _contract_s4vvvv_t2(mycc, mol, vvvv, t2, out, verbose)
else:
return _contract_s1vvvv_t2(mycc, mol, vvvv, t2, out, verbose)
def _contract_s4vvvv_t2(mycc, mol, vvvv, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acbd->ijab', t2, vvvv)
where vvvv has to be real and has the 4-fold permutation symmetry
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
assert(t2.dtype == numpy.double)
if t2.size == 0:
return numpy.zeros_like(t2)
_dgemm = lib.numpy_helper._dgemm
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mycc, verbose)
nvira, nvirb = t2.shape[-2:]
x2 = t2.reshape(-1,nvira,nvirb)
nocc2 = x2.shape[0]
nvir2 = nvira * nvirb
Ht2 = numpy.ndarray(x2.shape, dtype=x2.dtype, buffer=out)
Ht2[:] = 0
def contract_blk_(eri, i0, i1, j0, j1):
ic = i1 - i0
jc = j1 - j0
#:Ht2[:,j0:j1] += numpy.einsum('xef,efab->xab', x2[:,i0:i1], eri)
_dgemm('N', 'N', nocc2, jc*nvirb, ic*nvirb,
x2.reshape(-1,nvir2), eri.reshape(-1,jc*nvirb),
Ht2.reshape(-1,nvir2), 1, 1, i0*nvirb, 0, j0*nvirb)
if i0 > j0:
#:Ht2[:,i0:i1] += numpy.einsum('xef,abef->xab', x2[:,j0:j1], eri)
_dgemm('N', 'T', nocc2, ic*nvirb, jc*nvirb,
x2.reshape(-1,nvir2), eri.reshape(-1,jc*nvirb),
Ht2.reshape(-1,nvir2), 1, 1, j0*nvirb, 0, i0*nvirb)
max_memory = max(MEMORYMIN, mycc.max_memory - lib.current_memory()[0])
if vvvv is None: # AO-direct CCSD
ao_loc = mol.ao_loc_nr()
assert(nvira == nvirb == ao_loc[-1])
intor = mol._add_suffix('int2e')
ao2mopt = _ao2mo.AO2MOpt(mol, intor, 'CVHFnr_schwarz_cond',
'CVHFsetnr_direct_scf')
blksize = max(BLKMIN, numpy.sqrt(max_memory*.9e6/8/nvirb**2/2.5))
blksize = int(min((nvira+3)/4, blksize))
sh_ranges = ao2mo.outcore.balance_partition(ao_loc, blksize)
blksize = max(x[2] for x in sh_ranges)
eribuf = numpy.empty((blksize,blksize,nvirb,nvirb))
loadbuf = numpy.empty((blksize,blksize,nvirb,nvirb))
fint = gto.moleintor.getints4c
for ip, (ish0, ish1, ni) in enumerate(sh_ranges):
for jsh0, jsh1, nj in sh_ranges[:ip]:
eri = fint(intor, mol._atm, mol._bas, mol._env,
shls_slice=(ish0,ish1,jsh0,jsh1), aosym='s2kl',
ao_loc=ao_loc, cintopt=ao2mopt._cintopt, out=eribuf)
i0, i1 = ao_loc[ish0], ao_loc[ish1]
j0, j1 = ao_loc[jsh0], ao_loc[jsh1]
tmp = numpy.ndarray((i1-i0,nvirb,j1-j0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, j0, j1),
ctypes.c_int(nvirb))
contract_blk_(tmp, i0, i1, j0, j1)
time0 = log.timer_debug1('AO-vvvv [%d:%d,%d:%d]' %
(ish0,ish1,jsh0,jsh1), *time0)
eri = fint(intor, mol._atm, mol._bas, mol._env,
shls_slice=(ish0,ish1,ish0,ish1), aosym='s4',
ao_loc=ao_loc, cintopt=ao2mopt._cintopt, out=eribuf)
i0, i1 = ao_loc[ish0], ao_loc[ish1]
eri = lib.unpack_tril(eri, axis=0)
tmp = numpy.ndarray((i1-i0,nvirb,i1-i0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, i0, i1),
ctypes.c_int(nvirb))
eri = None
contract_blk_(tmp, i0, i1, i0, i1)
time0 = log.timer_debug1('AO-vvvv [%d:%d,%d:%d]' %
(ish0,ish1,ish0,ish1), *time0)
else:
nvir_pair = nvirb * (nvirb+1) // 2
unit = nvira*nvir_pair*2 + nvirb**2*nvira/4 + 1
if mycc.async_io:
fmap = lib.map_with_prefetch
unit += nvira*nvir_pair
else:
fmap = map
blksize = numpy.sqrt(max(BLKMIN**2, max_memory*.95e6/8/unit))
blksize = int(min((nvira+3)/4, blksize))
def load(v_slice):
i0, i1 = v_slice
off0 = i0*(i0+1)//2
off1 = i1*(i1+1)//2
return numpy.asarray(vvvv[off0:off1], order='C')
tril2sq = lib.square_mat_in_trilu_indices(nvira)
loadbuf = numpy.empty((blksize,blksize,nvirb,nvirb))
slices = [(i0, i1) for i0, i1 in lib.prange(0, nvira, blksize)]
for istep, wwbuf in enumerate(fmap(load, lib.prange(0, nvira, blksize))):
i0, i1 = slices[istep]
off0 = i0*(i0+1)//2
for j0, j1 in lib.prange(0, i1, blksize):
eri = wwbuf[tril2sq[i0:i1,j0:j1]-off0]
tmp = numpy.ndarray((i1-i0,nvirb,j1-j0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, j0, j1),
ctypes.c_int(nvirb))
contract_blk_(tmp, i0, i1, j0, j1)
wwbuf = None
time0 = log.timer_debug1('vvvv [%d:%d]'%(i0,i1), *time0)
return Ht2.reshape(t2.shape)
def _contract_s1vvvv_t2(mycc, mol, vvvv, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
where vvvv can be real or complex and no permutation symmetry is available in vvvv.
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
# vvvv == None means AO-direct CCSD. It should redirect to
# _contract_s4vvvv_t2(mycc, mol, vvvv, t2, out, verbose)
assert(vvvv is not None)
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mycc, verbose)
nvira, nvirb = t2.shape[-2:]
x2 = t2.reshape(-1,nvira,nvirb)
nocc2 = x2.shape[0]
dtype = numpy.result_type(t2, vvvv)
Ht2 = numpy.ndarray(x2.shape, dtype=dtype, buffer=out)
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nvirb**2*nvira*2 + nocc2*nvirb + 1
blksize = min(nvira, max(BLKMIN, int(max_memory*1e6/8/unit)))
for p0,p1 in lib.prange(0, nvira, blksize):
Ht2[:,p0:p1] = lib.einsum('xcd,acbd->xab', x2, vvvv[p0:p1])
time0 = log.timer_debug1('vvvv [%d:%d]' % (p0,p1), *time0)
return Ht2.reshape(t2.shape)
def _unpack_t2_tril(t2tril, nocc, nvir, out=None, t2sym='jiba'):
t2 = numpy.ndarray((nocc,nocc,nvir,nvir), dtype=t2tril.dtype, buffer=out)
idx,idy = numpy.tril_indices(nocc)
if t2sym == 'jiba':
t2[idy,idx] = t2tril.transpose(0,2,1)
t2[idx,idy] = t2tril
elif t2sym == '-jiba':
t2[idy,idx] = -t2tril.transpose(0,2,1)
t2[idx,idy] = t2tril
elif t2sym == '-jiab':
t2[idy,idx] =-t2tril
t2[idx,idy] = t2tril
t2[numpy.diag_indices(nocc)] = 0
return t2
def _unpack_4fold(c2vec, nocc, nvir, anti_symm=True):
t2 = numpy.zeros((nocc**2,nvir**2), dtype=c2vec.dtype)
if nocc > 1 and nvir > 1:
t2tril = c2vec.reshape(nocc*(nocc-1)//2,nvir*(nvir-1)//2)
otril = numpy.tril_indices(nocc, k=-1)
vtril = numpy.tril_indices(nvir, k=-1)
lib.takebak_2d(t2, t2tril, otril[0]*nocc+otril[1], vtril[0]*nvir+vtril[1])
lib.takebak_2d(t2, t2tril, otril[1]*nocc+otril[0], vtril[1]*nvir+vtril[0])
if anti_symm: # anti-symmetry when exchanging two particle indices
t2tril = -t2tril
lib.takebak_2d(t2, t2tril, otril[0]*nocc+otril[1], vtril[1]*nvir+vtril[0])
lib.takebak_2d(t2, t2tril, otril[1]*nocc+otril[0], vtril[0]*nvir+vtril[1])
return t2.reshape(nocc,nocc,nvir,nvir)
def amplitudes_to_vector(t1, t2, out=None):
nocc, nvir = t1.shape
nov = nocc * nvir
size = nov + nov*(nov+1)//2
vector = numpy.ndarray(size, t1.dtype, buffer=out)
vector[:nov] = t1.ravel()
lib.pack_tril(t2.transpose(0,2,1,3).reshape(nov,nov), out=vector[nov:])
return vector
def vector_to_amplitudes(vector, nmo, nocc):
nvir = nmo - nocc
nov = nocc * nvir
t1 = vector[:nov].copy().reshape((nocc,nvir))
# filltriu=lib.SYMMETRIC because t2[iajb] == t2[jbia]
t2 = lib.unpack_tril(vector[nov:], filltriu=lib.SYMMETRIC)
t2 = t2.reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3)
return t1, numpy.asarray(t2, order='C')
def amplitudes_to_vector_s4(t1, t2, out=None):
nocc, nvir = t1.shape
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
vector = numpy.ndarray(size, t1.dtype, buffer=out)
vector[:nov] = t1.ravel()
otril = numpy.tril_indices(nocc, k=-1)
vtril = numpy.tril_indices(nvir, k=-1)
lib.take_2d(t2.reshape(nocc**2,nvir**2), otril[0]*nocc+otril[1],
vtril[0]*nvir+vtril[1], out=vector[nov:])
return vector
def vector_to_amplitudes_s4(vector, nmo, nocc):
nvir = nmo - nocc
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
t1 = vector[:nov].copy().reshape(nocc,nvir)
t2 = numpy.zeros((nocc,nocc,nvir,nvir), dtype=vector.dtype)
t2 = _unpack_4fold(vector[nov:size], nocc, nvir)
return t1, t2
def energy(mycc, t1=None, t2=None, eris=None):
'''CCSD correlation energy'''
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if eris is None: eris = mycc.ao2mo()
nocc, nvir = t1.shape
fock = eris.fock
e = numpy.einsum('ia,ia', fock[:nocc,nocc:], t1) * 2
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = int(min(nvir, max(BLKMIN, max_memory*.3e6/8/(nocc**2*nvir+1))))
for p0, p1 in lib.prange(0, nvir, blksize):
eris_ovvo = eris.ovvo[:,p0:p1]
tau = t2[:,:,p0:p1] + numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
e += 2 * numpy.einsum('ijab,iabj', tau, eris_ovvo)
e -= numpy.einsum('jiab,iabj', tau, eris_ovvo)
if abs(e.imag) > 1e-4:
logger.warn(mycc, 'Non-zero imaginary part found in CCSD energy %s', e)
return e.real
def restore_from_diis_(mycc, diis_file, inplace=True):
'''Reuse an existed DIIS object in the CCSD calculation.
The CCSD amplitudes will be restored from the DIIS object to generate t1
and t2 amplitudes. The t1/t2 amplitudes of the CCSD object will be
overwritten by the generated t1 and t2 amplitudes. The amplitudes vector
and error vector will be reused in the CCSD calculation.
'''
adiis = lib.diis.DIIS(mycc, mycc.diis_file, incore=mycc.incore_complete)
adiis.restore(diis_file, inplace=inplace)
ccvec = adiis.extrapolate()
mycc.t1, mycc.t2 = mycc.vector_to_amplitudes(ccvec)
if inplace:
mycc.diis = adiis
return mycc
def get_t1_diagnostic(t1):
'''Returns the t1 amplitude norm, normalized by number of correlated electrons.'''
nelectron = 2 * t1.shape[0]
return numpy.sqrt(numpy.linalg.norm(t1)**2 / nelectron)
def get_d1_diagnostic(t1):
'''D1 diagnostic given in
Janssen, et. al Chem. Phys. Lett. 290 (1998) 423
'''
f = lambda x: numpy.sqrt(numpy.sort(numpy.abs(x[0])))[-1]
d1norm_ij = f(numpy.linalg.eigh(numpy.einsum('ia,ja->ij',t1,t1)))
d1norm_ab = f(numpy.linalg.eigh(numpy.einsum('ia,ib->ab',t1,t1)))
d1norm = max(d1norm_ij, d1norm_ab)
return d1norm
def get_d2_diagnostic(t2):
'''D2 diagnostic given in
Nielsen, et. al Chem. Phys. Lett. 310 (1999) 568
Note: This is currently only defined in the literature for restricted
closed-shell systems.
'''
f = lambda x: numpy.sqrt(numpy.sort(numpy.abs(x[0])))[-1]
d2norm_ij = f(numpy.linalg.eigh(numpy.einsum('ikab,jkab->ij',t2,t2)))
d2norm_ab = f(numpy.linalg.eigh(numpy.einsum('ijac,ijbc->ab',t2,t2)))
d2norm = max(d2norm_ij, d2norm_ab)
return d2norm
def as_scanner(cc):
'''Generating a scanner/solver for CCSD PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CCSD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CCSD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, cc
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> cc_scanner = cc.CCSD(scf.RHF(mol)).as_scanner()
>>> e_tot = cc_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = cc_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
if isinstance(cc, lib.SinglePointScanner):
return cc
logger.info(cc, 'Set %s as a scanner', cc.__class__)
class CCSD_Scanner(cc.__class__, lib.SinglePointScanner):
def __init__(self, cc):
self.__dict__.update(cc.__dict__)
self._scf = cc._scf.as_scanner()
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
if self.t2 is not None:
last_size = self.vector_size()
else:
last_size = 0
self.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
if last_size != self.vector_size():
self.t1 = self.t2 = None
self.kernel(self.t1, self.t2, **kwargs)
return self.e_tot
return CCSD_Scanner(cc)
class CCSD(lib.StreamObject):
'''restricted CCSD
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-7.
conv_tol_normt : float
converge threshold for norm(t1,t2). Default is 1e-5.
max_cycle : int
max number of iterations. Default is 50.
diis_space : int
DIIS space size. Default is 6.
diis_start_cycle : int
The step to start DIIS. Default is 0.
iterative_damping : float
The self consistent damping parameter.
direct : bool
AO-direct CCSD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
incore_complete : bool
Avoid all I/O (also for DIIS). Default is False.
level_shift : float
A shift on virtual orbital energies to stablize the CCSD iteration
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CC
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CC calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> mycc = cc.CCSD(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> mycc.set(frozen = [0,1,16,17,18]).run()
Saved results
converged : bool
CCSD converged or not
e_corr : float
CCSD correlation correction
e_tot : float
Total CCSD energy (HF + correlation)
t1, t2 :
T amplitudes t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)
l1, l2 :
Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)
'''
max_cycle = getattr(__config__, 'cc_ccsd_CCSD_max_cycle', 50)
conv_tol = getattr(__config__, 'cc_ccsd_CCSD_conv_tol', 1e-7)
iterative_damping = getattr(__config__, 'cc_ccsd_CCSD_iterative_damping', 1.0)
conv_tol_normt = getattr(__config__, 'cc_ccsd_CCSD_conv_tol_normt', 1e-5)
diis = getattr(__config__, 'cc_ccsd_CCSD_diis', True)
diis_space = getattr(__config__, 'cc_ccsd_CCSD_diis_space', 6)
diis_file = None
diis_start_cycle = getattr(__config__, 'cc_ccsd_CCSD_diis_start_cycle', 0)
# FIXME: Should we avoid DIIS starting early?
diis_start_energy_diff = getattr(__config__, 'cc_ccsd_CCSD_diis_start_energy_diff', 1e9)
direct = getattr(__config__, 'cc_ccsd_CCSD_direct', False)
async_io = getattr(__config__, 'cc_ccsd_CCSD_async_io', True)
incore_complete = getattr(__config__, 'cc_ccsd_CCSD_incore_complete', False)
cc2 = getattr(__config__, 'cc_ccsd_CCSD_cc2', False)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if isinstance(mf, gto.Mole):
raise RuntimeError('''
You see this error message because of the API updates in pyscf v0.10.
In the new API, the first argument of CC class is HF objects. Please see
http://sunqm.net/pyscf/code-rule.html#api-rules for the details of API conventions''')
from pyscf.scf import hf
if isinstance(mf, hf.KohnShamDFT):
raise RuntimeError('CCSD Warning: The first argument mf is a DFT object. '
'CCSD calculation should be initialized with HF object.\n'
'DFT object can be converted to HF object with '
'the code:\n'
' mf_hf = mol.HF()\n'
' if getattr(mf_dft, "with_x2c", False):\n'
' mf_hf = mf_hf.x2c()\n'
' mf_hf.__dict__.update(mf_dft.__dict__)\n')
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
self.incore_complete = self.incore_complete or self.mol.incore_anyway
self.level_shift = 0
##################################################
# don't modify the following attributes, they are not input options
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self.converged = False
self.converged_lambda = False
self.emp2 = None
self.e_hf = None
self.e_corr = None
self.t1 = None
self.t2 = None
self.l1 = None
self.l2 = None
self._nocc = None
self._nmo = None
self.chkfile = mf.chkfile
keys = set(('max_cycle', 'conv_tol', 'iterative_damping',
'conv_tol_normt', 'diis', 'diis_space', 'diis_file',
'diis_start_cycle', 'diis_start_energy_diff', 'direct',
'async_io', 'incore_complete', 'cc2'))
self._keys = set(self.__dict__.keys()).union(keys)
@property
def ecc(self):
return self.e_corr
@property
def e_tot(self):
return (self.e_hf or self._scf.e_tot) + self.e_corr
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def reset(self, mol=None):
if mol is not None:
self.mol = mol
self._scf.reset(mol)
return self
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('CC2 = %g', self.cc2)
log.info('CCSD nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not None:
log.info('frozen orbitals %s', self.frozen)
log.info('max_cycle = %d', self.max_cycle)
log.info('direct = %d', self.direct)
log.info('conv_tol = %g', self.conv_tol)
log.info('conv_tol_normt = %s', self.conv_tol_normt)
log.info('diis_space = %d', self.diis_space)
#log.info('diis_file = %s', self.diis_file)
log.info('diis_start_cycle = %d', self.diis_start_cycle)
log.info('diis_start_energy_diff = %g', self.diis_start_energy_diff)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
if (log.verbose >= logger.DEBUG1 and
self.__class__ == CCSD):
nocc = self.nocc
nvir = self.nmo - self.nocc
flops = _flops(nocc, nvir)
log.debug1('total FLOPs %s', flops)
return self
def get_init_guess(self, eris=None):
return self.init_amps(eris)[1:]
def init_amps(self, eris=None):
time0 = logger.process_clock(), logger.perf_counter()
if eris is None:
eris = self.ao2mo(self.mo_coeff)
mo_e = eris.mo_energy
nocc = self.nocc
nvir = mo_e.size - nocc
eia = mo_e[:nocc,None] - mo_e[None,nocc:]
t1 = eris.fock[:nocc,nocc:] / eia
t2 = numpy.empty((nocc,nocc,nvir,nvir), dtype=eris.ovov.dtype)
max_memory = self.max_memory - lib.current_memory()[0]
blksize = int(min(nvir, max(BLKMIN, max_memory*.3e6/8/(nocc**2*nvir+1))))
emp2 = 0
for p0, p1 in lib.prange(0, nvir, blksize):
eris_ovov = eris.ovov[:,p0:p1]
t2[:,:,p0:p1] = (eris_ovov.transpose(0,2,1,3).conj()
/ lib.direct_sum('ia,jb->ijab', eia[:,p0:p1], eia))
emp2 += 2 * numpy.einsum('ijab,iajb', t2[:,:,p0:p1], eris_ovov)
emp2 -= numpy.einsum('jiab,iajb', t2[:,:,p0:p1], eris_ovov)
self.emp2 = emp2.real
e_hf = self.e_hf or eris.e_hf
logger.info(self, 'Init t2, MP2 energy = %.15g E_corr(MP2) %.15g',
e_hf + self.emp2, self.emp2)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
energy = energy
_add_vvvv = _add_vvvv
update_amps = update_amps
def kernel(self, t1=None, t2=None, eris=None):
return self.ccsd(t1, t2, eris)
def ccsd(self, t1=None, t2=None, eris=None):
assert(self.mo_coeff is not None)
assert(self.mo_occ is not None)
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
if eris is None:
eris = self.ao2mo(self.mo_coeff)
self.e_hf = getattr(eris, 'e_hf', None)
if self.e_hf is None:
self.e_hf = self._scf.e_tot
self.converged, self.e_corr, self.t1, self.t2 = \
kernel(self, eris, t1, t2, max_cycle=self.max_cycle,
tol=self.conv_tol, tolnormt=self.conv_tol_normt,
verbose=self.verbose)
self._finalize()
return self.e_corr, self.t1, self.t2
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
if self.converged:
logger.info(self, '%s converged', self.__class__.__name__)
else:
logger.note(self, '%s not converged', self.__class__.__name__)
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
self.__class__.__name__, self.e_tot, self.e_corr)
return self
as_scanner = as_scanner
restore_from_diis_ = restore_from_diis_
def solve_lambda(self, t1=None, t2=None, l1=None, l2=None,
eris=None):
from pyscf.cc import ccsd_lambda
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
self.converged_lambda, self.l1, self.l2 = \
ccsd_lambda.kernel(self, eris, t1, t2, l1, l2,
max_cycle=self.max_cycle,
tol=self.conv_tol_normt,
verbose=self.verbose)
return self.l1, self.l2
def ccsd_t(self, t1=None, t2=None, eris=None):
from pyscf.cc import ccsd_t
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
return ccsd_t.kernel(self, eris, t1, t2, self.verbose)
def ipccsd(self, nroots=1, left=False, koopmans=False, guess=None,
partition=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMIP(self).kernel(nroots, left, koopmans, guess,
partition, eris)
def eaccsd(self, nroots=1, left=False, koopmans=False, guess=None,
partition=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEA(self).kernel(nroots, left, koopmans, guess,
partition, eris)
def eeccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEE(self).kernel(nroots, koopmans, guess, eris)
def eomee_ccsd_singlet(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEESinglet(self).kernel(nroots, koopmans, guess, eris)
def eomee_ccsd_triplet(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEETriplet(self).kernel(nroots, koopmans, guess, eris)
def eomsf_ccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEESpinFlip(self).kernel(nroots, koopmans, guess, eris)
def eomip_method(self):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMIP(self)
def eomea_method(self):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEA(self)
def eomee_method(self):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEE(self)
def make_rdm1(self, t1=None, t2=None, l1=None, l2=None, ao_repr=False):
'''Un-relaxed 1-particle density matrix in MO space'''
from pyscf.cc import ccsd_rdm
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if l1 is None: l1 = self.l1
if l2 is None: l2 = self.l2
if l1 is None: l1, l2 = self.solve_lambda(t1, t2)
return ccsd_rdm.make_rdm1(self, t1, t2, l1, l2, ao_repr=ao_repr)
def make_rdm2(self, t1=None, t2=None, l1=None, l2=None, ao_repr=False):
'''2-particle density matrix in MO space. The density matrix is
stored as
dm2[p,r,q,s] = <p^+ q^+ s r>
'''
from pyscf.cc import ccsd_rdm
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if l1 is None: l1 = self.l1
if l2 is None: l2 = self.l2
if l1 is None: l1, l2 = self.solve_lambda(t1, t2)
return ccsd_rdm.make_rdm2(self, t1, t2, l1, l2, ao_repr=ao_repr)
def ao2mo(self, mo_coeff=None):
# Pseudo code how eris are implemented:
# nocc = self.nocc
# nmo = self.nmo
# nvir = nmo - nocc
# eris = _ChemistsERIs()
# eri = ao2mo.incore.full(self._scf._eri, mo_coeff)
# eri = ao2mo.restore(1, eri, nmo)
# eris.oooo = eri[:nocc,:nocc,:nocc,:nocc].copy()
# eris.ovoo = eri[:nocc,nocc:,:nocc,:nocc].copy()
# eris.ovvo = eri[nocc:,:nocc,nocc:,:nocc].copy()
# eris.ovov = eri[nocc:,:nocc,:nocc,nocc:].copy()
# eris.oovv = eri[:nocc,:nocc,nocc:,nocc:].copy()
# ovvv = eri[:nocc,nocc:,nocc:,nocc:].copy()
# eris.ovvv = lib.pack_tril(ovvv.reshape(-1,nvir,nvir))
# eris.vvvv = ao2mo.restore(4, eri[nocc:,nocc:,nocc:,nocc:], nvir)
# eris.fock = numpy.diag(self._scf.mo_energy)
# return eris
nmo = self.nmo
nao = self.mo_coeff.shape[0]
nmo_pair = nmo * (nmo+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory or self.incore_complete)):
return _make_eris_incore(self, mo_coeff)
elif getattr(self._scf, 'with_df', None):
logger.warn(self, 'CCSD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CCSD calculations')
return _make_df_eris_outcore(self, mo_coeff)
else:
return _make_eris_outcore(self, mo_coeff)
def run_diis(self, t1, t2, istep, normt, de, adiis):
if (adiis and
istep >= self.diis_start_cycle and
abs(de) < self.diis_start_energy_diff):
vec = self.amplitudes_to_vector(t1, t2)
t1, t2 = self.vector_to_amplitudes(adiis.update(vec))
logger.debug1(self, 'DIIS for step %d', istep)
return t1, t2
def amplitudes_to_vector(self, t1, t2, out=None):
return amplitudes_to_vector(t1, t2, out)
def vector_to_amplitudes(self, vec, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
return vector_to_amplitudes(vec, nmo, nocc)
def vector_size(self, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
nvir = nmo - nocc
nov = nocc * nvir
return nov + nov*(nov+1)//2
def dump_chk(self, t1_t2=None, frozen=None, mo_coeff=None, mo_occ=None):
if not self.chkfile:
return self
if t1_t2 is None: t1_t2 = self.t1, self.t2
t1, t2 = t1_t2
if frozen is None: frozen = self.frozen
# "None" cannot be serialized by the chkfile module
if frozen is None:
frozen = 0
cc_chk = {'e_corr': self.e_corr,
't1': t1,
't2': t2,
'frozen': frozen}
if mo_coeff is not None: cc_chk['mo_coeff'] = mo_coeff
if mo_occ is not None: cc_chk['mo_occ'] = mo_occ
if self._nmo is not None: cc_chk['_nmo'] = self._nmo
if self._nocc is not None: cc_chk['_nocc'] = self._nocc
lib.chkfile.save(self.chkfile, 'ccsd', cc_chk)
def density_fit(self, auxbasis=None, with_df=None):
from pyscf.cc import dfccsd
mycc = dfccsd.RCCSD(self._scf, self.frozen, self.mo_coeff, self.mo_occ)
if with_df is not None:
mycc.with_df = with_df
if mycc.with_df.auxbasis != auxbasis:
import copy
mycc.with_df = copy.copy(mycc.with_df)
mycc.with_df.auxbasis = auxbasis
return mycc
def nuc_grad_method(self):
from pyscf.grad import ccsd
return ccsd.Gradients(self)
def get_t1_diagnostic(self, t1=None):
if t1 is None: t1 = self.t1
return get_t1_diagnostic(t1)
def get_d1_diagnostic(self, t1=None):
if t1 is None: t1 = self.t1
return get_d1_diagnostic(t1)
def get_d2_diagnostic(self, t2=None):
if t2 is None: t2 = self.t2
return get_d2_diagnostic(t2)
CC = RCCSD = CCSD
class _ChemistsERIs:
'''(pq|rs)'''
def __init__(self, mol=None):
self.mol = mol
self.mo_coeff = None
self.nocc = None
self.fock = None
self.e_hf = None
self.oooo = None
self.ovoo = None
self.oovv = None
self.ovvo = None
self.ovov = None
self.ovvv = None
self.vvvv = None
def _common_init_(self, mycc, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mycc.mo_coeff
self.mo_coeff = mo_coeff = _mo_without_core(mycc, mo_coeff)
# Note: Recomputed fock matrix and HF energy since SCF may not be fully converged.
dm = mycc._scf.make_rdm1(mycc.mo_coeff, mycc.mo_occ)
vhf = mycc._scf.get_veff(mycc.mol, dm)
fockao = mycc._scf.get_fock(vhf=vhf, dm=dm)
self.fock = reduce(numpy.dot, (mo_coeff.conj().T, fockao, mo_coeff))
self.e_hf = mycc._scf.energy_tot(dm=dm, vhf=vhf)
nocc = self.nocc = mycc.nocc
self.mol = mycc.mol
# Note self.mo_energy can be different to fock.diagonal().
# self.mo_energy is used in the initial guess function (to generate
# MP2 amplitudes) and CCSD update_amps preconditioner.
# fock.diagonal() should only be used to compute the expectation value
# of Slater determinants.
mo_e = self.mo_energy = self.fock.diagonal().real
try:
gap = abs(mo_e[:nocc,None] - mo_e[None,nocc:]).min()
if gap < 1e-5:
logger.warn(mycc, 'HOMO-LUMO gap %s too small for CCSD.\n'
'CCSD may be difficult to converge. Increasing '
'CCSD Attribute level_shift may improve '
'convergence.', gap)
except ValueError: # gap.size == 0
pass
return self
def get_ovvv(self, *slices):
'''To access a subblock of ovvv tensor'''
ovw = numpy.asarray(self.ovvv[slices])
nocc, nvir, nvir_pair = ovw.shape
ovvv = lib.unpack_tril(ovw.reshape(nocc*nvir,nvir_pair))
nvir1 = ovvv.shape[2]
return ovvv.reshape(nocc,nvir,nvir1,nvir1)
def _contract_vvvv_t2(self, mycc, t2, vvvv_or_direct=False, out=None, verbose=None):
if isinstance(vvvv_or_direct, numpy.ndarray):
vvvv = vvvv_or_direct
elif vvvv_or_direct: # AO-direct contraction
vvvv = None
else:
vvvv = self.vvvv
return _contract_vvvv_t2(mycc, self.mol, vvvv, t2, out, verbose)
def _contract_vvvv_oov(self, mycc, r2, out=None):
raise NotImplementedError
def _contract_vvvv_ovv(self, mycc, r2, out=None):
raise NotImplementedError
def _make_eris_incore(mycc, mo_coeff=None):
cput0 = (logger.process_clock(), logger.perf_counter())
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
nocc = eris.nocc
nmo = eris.fock.shape[0]
nvir = nmo - nocc
eri1 = ao2mo.incore.full(mycc._scf._eri, eris.mo_coeff)
#:eri1 = ao2mo.restore(1, eri1, nmo)
#:eris.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()
#:eris.ovoo = eri1[:nocc,nocc:,:nocc,:nocc].copy()
#:eris.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()
#:eris.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()
#:eris.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()
#:ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()
#:eris.ovvv = lib.pack_tril(ovvv.reshape(-1,nvir,nvir)).reshape(nocc,nvir,-1)
#:eris.vvvv = ao2mo.restore(4, eri1[nocc:,nocc:,nocc:,nocc:], nvir)
if eri1.ndim == 4:
eri1 = ao2mo.restore(4, eri1, nmo)
nvir_pair = nvir * (nvir+1) // 2
eris.oooo = numpy.empty((nocc,nocc,nocc,nocc))
eris.ovoo = numpy.empty((nocc,nvir,nocc,nocc))
eris.ovvo = numpy.empty((nocc,nvir,nvir,nocc))
eris.ovov = numpy.empty((nocc,nvir,nocc,nvir))
eris.ovvv = numpy.empty((nocc,nvir,nvir_pair))
eris.vvvv = numpy.empty((nvir_pair,nvir_pair))
ij = 0
outbuf = numpy.empty((nmo,nmo,nmo))
oovv = numpy.empty((nocc,nocc,nvir,nvir))
for i in range(nocc):
buf = lib.unpack_tril(eri1[ij:ij+i+1], out=outbuf[:i+1])
for j in range(i+1):
eris.oooo[i,j] = eris.oooo[j,i] = buf[j,:nocc,:nocc]
oovv[i,j] = oovv[j,i] = buf[j,nocc:,nocc:]
ij += i + 1
eris.oovv = oovv
oovv = None
ij1 = 0
for i in range(nocc,nmo):
buf = lib.unpack_tril(eri1[ij:ij+i+1], out=outbuf[:i+1])
eris.ovoo[:,i-nocc] = buf[:nocc,:nocc,:nocc]
eris.ovvo[:,i-nocc] = buf[:nocc,nocc:,:nocc]
eris.ovov[:,i-nocc] = buf[:nocc,:nocc,nocc:]
eris.ovvv[:,i-nocc] = lib.pack_tril(buf[:nocc,nocc:,nocc:])
dij = i - nocc + 1
lib.pack_tril(buf[nocc:i+1,nocc:,nocc:],
out=eris.vvvv[ij1:ij1+dij])
ij += i + 1
ij1 += dij
logger.timer(mycc, 'CCSD integral transformation', *cput0)
return eris
def _make_eris_outcore(mycc, mo_coeff=None):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(mycc.stdout, mycc.verbose)
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
mol = mycc.mol
mo_coeff = numpy.asarray(eris.mo_coeff, order='F')
nocc = eris.nocc
nao, nmo = mo_coeff.shape
nvir = nmo - nocc
orbo = mo_coeff[:,:nocc]
orbv = mo_coeff[:,nocc:]
nvpair = nvir * (nvir+1) // 2
eris.feri1 = lib.H5TmpFile()
eris.oooo = eris.feri1.create_dataset('oooo', (nocc,nocc,nocc,nocc), 'f8')
eris.oovv = eris.feri1.create_dataset('oovv', (nocc,nocc,nvir,nvir), 'f8', chunks=(nocc,nocc,1,nvir))
eris.ovoo = eris.feri1.create_dataset('ovoo', (nocc,nvir,nocc,nocc), 'f8', chunks=(nocc,1,nocc,nocc))
eris.ovvo = eris.feri1.create_dataset('ovvo', (nocc,nvir,nvir,nocc), 'f8', chunks=(nocc,1,nvir,nocc))
eris.ovov = eris.feri1.create_dataset('ovov', (nocc,nvir,nocc,nvir), 'f8', chunks=(nocc,1,nocc,nvir))
eris.ovvv = eris.feri1.create_dataset('ovvv', (nocc,nvir,nvpair), 'f8')
def save_occ_frac(p0, p1, eri):
eri = eri.reshape(p1-p0,nocc,nmo,nmo)
eris.oooo[p0:p1] = eri[:,:,:nocc,:nocc]
eris.oovv[p0:p1] = eri[:,:,nocc:,nocc:]
def save_vir_frac(p0, p1, eri):
eri = eri.reshape(p1-p0,nocc,nmo,nmo)
eris.ovoo[:,p0:p1] = eri[:,:,:nocc,:nocc].transpose(1,0,2,3)
eris.ovvo[:,p0:p1] = eri[:,:,nocc:,:nocc].transpose(1,0,2,3)
eris.ovov[:,p0:p1] = eri[:,:,:nocc,nocc:].transpose(1,0,2,3)
vvv = lib.pack_tril(eri[:,:,nocc:,nocc:].reshape((p1-p0)*nocc,nvir,nvir))
eris.ovvv[:,p0:p1] = vvv.reshape(p1-p0,nocc,nvpair).transpose(1,0,2)
cput1 = logger.process_clock(), logger.perf_counter()
if not mycc.direct:
max_memory = max(MEMORYMIN, mycc.max_memory-lib.current_memory()[0])
eris.feri2 = lib.H5TmpFile()
ao2mo.full(mol, orbv, eris.feri2, max_memory=max_memory, verbose=log)
eris.vvvv = eris.feri2['eri_mo']
cput1 = log.timer_debug1('transforming vvvv', *cput1)
fswap = lib.H5TmpFile()
max_memory = max(MEMORYMIN, mycc.max_memory-lib.current_memory()[0])
int2e = mol._add_suffix('int2e')
ao2mo.outcore.half_e1(mol, (mo_coeff,orbo), fswap, int2e,
's4', 1, max_memory, verbose=log)
ao_loc = mol.ao_loc_nr()
nao_pair = nao * (nao+1) // 2
blksize = int(min(8e9,max_memory*.5e6)/8/(nao_pair+nmo**2)/nocc)
blksize = min(nmo, max(BLKMIN, blksize))
log.debug1('blksize %d', blksize)
cput2 = cput1
fload = ao2mo.outcore._load_from_h5g
buf = numpy.empty((blksize*nocc,nao_pair))
buf_prefetch = numpy.empty_like(buf)
def load(buf_prefetch, p0, rowmax):
if p0 < rowmax:
p1 = min(rowmax, p0+blksize)
fload(fswap['0'], p0*nocc, p1*nocc, buf_prefetch)
outbuf = numpy.empty((blksize*nocc,nmo**2))
with lib.call_in_background(load, sync=not mycc.async_io) as prefetch:
prefetch(buf_prefetch, 0, nocc)
for p0, p1 in lib.prange(0, nocc, blksize):
buf, buf_prefetch = buf_prefetch, buf
prefetch(buf_prefetch, p1, nocc)
nrow = (p1 - p0) * nocc
dat = ao2mo._ao2mo.nr_e2(buf[:nrow], mo_coeff, (0,nmo,0,nmo),
's4', 's1', out=outbuf, ao_loc=ao_loc)
save_occ_frac(p0, p1, dat)
cput2 = log.timer_debug1('transforming oopp', *cput2)
prefetch(buf_prefetch, nocc, nmo)
for p0, p1 in lib.prange(0, nvir, blksize):
buf, buf_prefetch = buf_prefetch, buf
prefetch(buf_prefetch, nocc+p1, nmo)
nrow = (p1 - p0) * nocc
dat = ao2mo._ao2mo.nr_e2(buf[:nrow], mo_coeff, (0,nmo,0,nmo),
's4', 's1', out=outbuf, ao_loc=ao_loc)
save_vir_frac(p0, p1, dat)
cput2 = log.timer_debug1('transforming ovpp [%d:%d]'%(p0,p1), *cput2)
cput1 = log.timer_debug1('transforming oppp', *cput1)
log.timer('CCSD integral transformation', *cput0)
return eris
def _make_df_eris_outcore(mycc, mo_coeff=None):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(mycc.stdout, mycc.verbose)
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
mo_coeff = numpy.asarray(eris.mo_coeff, order='F')
nocc = eris.nocc
nao, nmo = mo_coeff.shape
nvir = nmo - nocc
nvir_pair = nvir*(nvir+1)//2
naux = mycc._scf.with_df.get_naoaux()
Loo = numpy.empty((naux,nocc,nocc))
Lov = numpy.empty((naux,nocc,nvir))
Lvo = numpy.empty((naux,nvir,nocc))
Lvv = numpy.empty((naux,nvir_pair))
ijslice = (0, nmo, 0, nmo)
Lpq = None
p1 = 0
for eri1 in mycc._scf.with_df.loop():
Lpq = _ao2mo.nr_e2(eri1, mo_coeff, ijslice, aosym='s2', out=Lpq).reshape(-1,nmo,nmo)
p0, p1 = p1, p1 + Lpq.shape[0]
Loo[p0:p1] = Lpq[:,:nocc,:nocc]
Lov[p0:p1] = Lpq[:,:nocc,nocc:]
Lvo[p0:p1] = Lpq[:,nocc:,:nocc]
Lvv[p0:p1] = lib.pack_tril(Lpq[:,nocc:,nocc:].reshape(-1,nvir,nvir))
Loo = Loo.reshape(naux,nocc*nocc)
Lov = Lov.reshape(naux,nocc*nvir)
Lvo = Lvo.reshape(naux,nocc*nvir)
eris.feri1 = lib.H5TmpFile()
eris.oooo = eris.feri1.create_dataset('oooo', (nocc,nocc,nocc,nocc), 'f8')
eris.oovv = eris.feri1.create_dataset('oovv', (nocc,nocc,nvir,nvir), 'f8', chunks=(nocc,nocc,1,nvir))
eris.ovoo = eris.feri1.create_dataset('ovoo', (nocc,nvir,nocc,nocc), 'f8', chunks=(nocc,1,nocc,nocc))
eris.ovvo = eris.feri1.create_dataset('ovvo', (nocc,nvir,nvir,nocc), 'f8', chunks=(nocc,1,nvir,nocc))
eris.ovov = eris.feri1.create_dataset('ovov', (nocc,nvir,nocc,nvir), 'f8', chunks=(nocc,1,nocc,nvir))
eris.ovvv = eris.feri1.create_dataset('ovvv', (nocc,nvir,nvir_pair), 'f8')
eris.vvvv = eris.feri1.create_dataset('vvvv', (nvir_pair,nvir_pair), 'f8')
eris.oooo[:] = lib.ddot(Loo.T, Loo).reshape(nocc,nocc,nocc,nocc)
eris.ovoo[:] = lib.ddot(Lov.T, Loo).reshape(nocc,nvir,nocc,nocc)
eris.oovv[:] = lib.unpack_tril(lib.ddot(Loo.T, Lvv)).reshape(nocc,nocc,nvir,nvir)
eris.ovvo[:] = lib.ddot(Lov.T, Lvo).reshape(nocc,nvir,nvir,nocc)
eris.ovov[:] = lib.ddot(Lov.T, Lov).reshape(nocc,nvir,nocc,nvir)
eris.ovvv[:] = lib.ddot(Lov.T, Lvv).reshape(nocc,nvir,nvir_pair)
eris.vvvv[:] = lib.ddot(Lvv.T, Lvv)
log.timer('CCSD integral transformation', *cput0)
return eris
def _flops(nocc, nvir):
'''Total float points'''
return (nocc**3*nvir**2*2 + nocc**2*nvir**3*2 + # Ftilde
nocc**4*nvir*2 * 2 + nocc**4*nvir**2*2 + # Wijkl
nocc*nvir**4*2 * 2 + # Wabcd
nocc**2*nvir**3*2 + nocc**3*nvir**2*2 +
nocc**3*nvir**3*2 + nocc**3*nvir**3*2 +
nocc**2*nvir**3*2 + nocc**3*nvir**2*2 + # Wiabj
nocc**2*nvir**3*2 + nocc**3*nvir**2*2 + # t1
nocc**3*nvir**2*2 * 2 + nocc**4*nvir**2*2 +
nocc*(nocc+1)/2*nvir**4*2 + # vvvv
nocc**2*nvir**3*2 * 2 + nocc**3*nvir**2*2 * 2 + # t2
nocc**3*nvir**3*2 +
nocc**3*nvir**3*2 * 2 + nocc**3*nvir**2*2 * 4) # Wiabj
if __name__ == '__main__':
from pyscf import scf
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
rhf = scf.RHF(mol)
rhf.scf() # -76.0267656731
mf = rhf.density_fit(auxbasis='weigend')
mf._eri = None
mcc = CCSD(mf)
eris = mcc.ao2mo()
emp2, t1, t2 = mcc.init_amps(eris)
print(abs(t2).sum() - 4.9318753386922278)
print(emp2 - -0.20401737899811551)
t1, t2 = update_amps(mcc, t1, t2, eris)
print(abs(t1).sum() - 0.046961325647584914)
print(abs(t2).sum() - 5.378260578551683 )
mcc = CCSD(rhf)
eris = mcc.ao2mo()
emp2, t1, t2 = mcc.init_amps(eris)
print(abs(t2).sum() - 4.9556571218177)
print(emp2 - -0.2040199672883385)
t1, t2 = update_amps(mcc, t1, t2, eris)
print(abs(t1).sum()-0.0475038989126)
print(abs(t2).sum()-5.401823846018721)
print(energy(mcc, t1, t2, eris) - -0.208967840546667)
t1, t2 = update_amps(mcc, t1, t2, eris)
print(energy(mcc, t1, t2, eris) - -0.212173678670510)
print(abs(t1).sum() - 0.05470123093500083)
print(abs(t2).sum() - 5.5605208391876539)
mcc.ccsd()
print(mcc.ecc - -0.213343234198275)
print(abs(mcc.t2).sum() - 5.63970304662375)
mcc.max_memory = 1
mcc.direct = True
mcc.ccsd()
print(mcc.ecc - -0.213343234198275)
print(abs(mcc.t2).sum() - 5.63970304662375)
e, v = mcc.ipccsd(nroots=3)
print(e[0] - 0.43356041409195489)
print(e[1] - 0.51876598058509493)
print(e[2] - 0.6782879569941862 )
e, v = mcc.eeccsd(nroots=4)
print(e[0] - 0.2757159395886167)
print(e[1] - 0.2757159395886167)
print(e[2] - 0.2757159395886167)
print(e[3] - 0.3005716731825082)
| [
"pyscf.lib.takebak_2d",
"pyscf.cc.eom_rccsd.EOMEESpinFlip",
"pyscf.lib.logger.new_logger",
"numpy.sqrt",
"pyscf.lib.logger.timer",
"pyscf.ao2mo._ao2mo.AO2MOpt",
"pyscf.lib.logger.process_clock",
"pyscf.cc.ccsd_rdm.make_rdm1",
"pyscf.ao2mo.full",
"pyscf.lib.logger.Logger",
"numpy.einsum",
"nump... | [((1347, 1379), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['mycc', 'verbose'], {}), '(mycc, verbose)\n', (1364, 1379), False, 'from pyscf.lib import logger\n'), ((3203, 3243), 'pyscf.lib.logger.Logger', 'logger.Logger', (['mycc.stdout', 'mycc.verbose'], {}), '(mycc.stdout, mycc.verbose)\n', (3216, 3243), False, 'from pyscf.lib import logger\n'), ((3393, 3413), 'numpy.zeros_like', 'numpy.zeros_like', (['t1'], {}), '(t1)\n', (3409, 3413), False, 'import numpy\n'), ((4514, 4542), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (4524, 4542), False, 'from pyscf import lib\n'), ((9660, 9688), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (9670, 9688), False, 'from pyscf import lib\n'), ((10214, 10252), 'pyscf.lib.einsum', 'lib.einsum', (['"""ijac,bc->ijab"""', 't2', 'ft_ab'], {}), "('ijac,bc->ijab', t2, ft_ab)\n", (10224, 10252), False, 'from pyscf import lib\n'), ((10266, 10304), 'pyscf.lib.einsum', 'lib.einsum', (['"""ki,kjab->ijab"""', 'ft_ij', 't2'], {}), "('ki,kjab->ijab', ft_ij, t2)\n", (10276, 10304), False, 'from pyscf import lib\n'), ((10353, 10387), 'numpy.einsum', 'numpy.einsum', (['"""ib,ab->ia"""', 't1', 'fvv'], {}), "('ib,ab->ia', t1, fvv)\n", (10365, 10387), False, 'import numpy\n'), ((10401, 10435), 'numpy.einsum', 'numpy.einsum', (['"""ja,ji->ia"""', 't1', 'foo'], {}), "('ja,ji->ia', t1, foo)\n", (10413, 10435), False, 'import numpy\n'), ((11050, 11090), 'pyscf.lib.logger.Logger', 'logger.Logger', (['mycc.stdout', 'mycc.verbose'], {}), '(mycc.stdout, mycc.verbose)\n', (11063, 11090), False, 'from pyscf.lib import logger\n'), ((11325, 11362), 'numpy.zeros', 'numpy.zeros', (['(nocc, nocc * nvir_pair)'], {}), '((nocc, nocc * nvir_pair))\n', (11336, 11362), False, 'import numpy\n'), ((15378, 15418), 'pyscf.lib.logger.Logger', 'logger.Logger', (['mycc.stdout', 'mycc.verbose'], {}), '(mycc.stdout, mycc.verbose)\n', (15391, 15418), False, 'from pyscf.lib import logger\n'), ((17811, 17851), 'pyscf.lib.logger.Logger', 'logger.Logger', (['mycc.stdout', 'mycc.verbose'], {}), '(mycc.stdout, mycc.verbose)\n', (17824, 17851), False, 'from pyscf.lib import logger\n'), ((19931, 19963), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['mycc', 'verbose'], {}), '(mycc, verbose)\n', (19948, 19963), False, 'from pyscf.lib import logger\n'), ((20094, 20145), 'numpy.ndarray', 'numpy.ndarray', (['x2.shape'], {'dtype': 'x2.dtype', 'buffer': 'out'}), '(x2.shape, dtype=x2.dtype, buffer=out)\n', (20107, 20145), False, 'import numpy\n'), ((25511, 25543), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['mycc', 'verbose'], {}), '(mycc, verbose)\n', (25528, 25543), False, 'from pyscf.lib import logger\n'), ((25650, 25677), 'numpy.result_type', 'numpy.result_type', (['t2', 'vvvv'], {}), '(t2, vvvv)\n', (25667, 25677), False, 'import numpy\n'), ((25688, 25736), 'numpy.ndarray', 'numpy.ndarray', (['x2.shape'], {'dtype': 'dtype', 'buffer': 'out'}), '(x2.shape, dtype=dtype, buffer=out)\n', (25701, 25736), False, 'import numpy\n'), ((25927, 25956), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvira', 'blksize'], {}), '(0, nvira, blksize)\n', (25937, 25956), False, 'from pyscf import lib\n'), ((26201, 26272), 'numpy.ndarray', 'numpy.ndarray', (['(nocc, nocc, nvir, nvir)'], {'dtype': 't2tril.dtype', 'buffer': 'out'}), '((nocc, nocc, nvir, nvir), dtype=t2tril.dtype, buffer=out)\n', (26214, 26272), False, 'import numpy\n'), ((26284, 26308), 'numpy.tril_indices', 'numpy.tril_indices', (['nocc'], {}), '(nocc)\n', (26302, 26308), False, 'import numpy\n'), ((26715, 26769), 'numpy.zeros', 'numpy.zeros', (['(nocc ** 2, nvir ** 2)'], {'dtype': 'c2vec.dtype'}), '((nocc ** 2, nvir ** 2), dtype=c2vec.dtype)\n', (26726, 26769), False, 'import numpy\n'), ((27573, 27614), 'numpy.ndarray', 'numpy.ndarray', (['size', 't1.dtype'], {'buffer': 'out'}), '(size, t1.dtype, buffer=out)\n', (27586, 27614), False, 'import numpy\n'), ((27946, 27999), 'pyscf.lib.unpack_tril', 'lib.unpack_tril', (['vector[nov:]'], {'filltriu': 'lib.SYMMETRIC'}), '(vector[nov:], filltriu=lib.SYMMETRIC)\n', (27961, 27999), False, 'from pyscf import lib\n'), ((28264, 28305), 'numpy.ndarray', 'numpy.ndarray', (['size', 't1.dtype'], {'buffer': 'out'}), '(size, t1.dtype, buffer=out)\n', (28277, 28305), False, 'import numpy\n'), ((28348, 28378), 'numpy.tril_indices', 'numpy.tril_indices', (['nocc'], {'k': '(-1)'}), '(nocc, k=-1)\n', (28366, 28378), False, 'import numpy\n'), ((28391, 28421), 'numpy.tril_indices', 'numpy.tril_indices', (['nvir'], {'k': '(-1)'}), '(nvir, k=-1)\n', (28409, 28421), False, 'import numpy\n'), ((28768, 28825), 'numpy.zeros', 'numpy.zeros', (['(nocc, nocc, nvir, nvir)'], {'dtype': 'vector.dtype'}), '((nocc, nocc, nvir, nvir), dtype=vector.dtype)\n', (28779, 28825), False, 'import numpy\n'), ((29342, 29370), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (29352, 29370), False, 'from pyscf import lib\n'), ((30153, 30217), 'pyscf.lib.diis.DIIS', 'lib.diis.DIIS', (['mycc', 'mycc.diis_file'], {'incore': 'mycc.incore_complete'}), '(mycc, mycc.diis_file, incore=mycc.incore_complete)\n', (30166, 30217), False, 'from pyscf import lib\n'), ((32460, 32512), 'pyscf.lib.logger.info', 'logger.info', (['cc', '"""Set %s as a scanner"""', 'cc.__class__'], {}), "(cc, 'Set %s as a scanner', cc.__class__)\n", (32471, 32512), False, 'from pyscf.lib import logger\n'), ((54191, 54239), 'pyscf.ao2mo.incore.full', 'ao2mo.incore.full', (['mycc._scf._eri', 'eris.mo_coeff'], {}), '(mycc._scf._eri, eris.mo_coeff)\n', (54208, 54239), False, 'from pyscf import ao2mo\n'), ((54881, 54918), 'numpy.empty', 'numpy.empty', (['(nocc, nocc, nocc, nocc)'], {}), '((nocc, nocc, nocc, nocc))\n', (54892, 54918), False, 'import numpy\n'), ((54932, 54969), 'numpy.empty', 'numpy.empty', (['(nocc, nvir, nocc, nocc)'], {}), '((nocc, nvir, nocc, nocc))\n', (54943, 54969), False, 'import numpy\n'), ((54983, 55020), 'numpy.empty', 'numpy.empty', (['(nocc, nvir, nvir, nocc)'], {}), '((nocc, nvir, nvir, nocc))\n', (54994, 55020), False, 'import numpy\n'), ((55034, 55071), 'numpy.empty', 'numpy.empty', (['(nocc, nvir, nocc, nvir)'], {}), '((nocc, nvir, nocc, nvir))\n', (55045, 55071), False, 'import numpy\n'), ((55085, 55121), 'numpy.empty', 'numpy.empty', (['(nocc, nvir, nvir_pair)'], {}), '((nocc, nvir, nvir_pair))\n', (55096, 55121), False, 'import numpy\n'), ((55136, 55171), 'numpy.empty', 'numpy.empty', (['(nvir_pair, nvir_pair)'], {}), '((nvir_pair, nvir_pair))\n', (55147, 55171), False, 'import numpy\n'), ((55196, 55224), 'numpy.empty', 'numpy.empty', (['(nmo, nmo, nmo)'], {}), '((nmo, nmo, nmo))\n', (55207, 55224), False, 'import numpy\n'), ((55234, 55271), 'numpy.empty', 'numpy.empty', (['(nocc, nocc, nvir, nvir)'], {}), '((nocc, nocc, nvir, nvir))\n', (55245, 55271), False, 'import numpy\n'), ((56070, 56128), 'pyscf.lib.logger.timer', 'logger.timer', (['mycc', '"""CCSD integral transformation"""', '*cput0'], {}), "(mycc, 'CCSD integral transformation', *cput0)\n", (56082, 56128), False, 'from pyscf.lib import logger\n'), ((56261, 56301), 'pyscf.lib.logger.Logger', 'logger.Logger', (['mycc.stdout', 'mycc.verbose'], {}), '(mycc.stdout, mycc.verbose)\n', (56274, 56301), False, 'from pyscf.lib import logger\n'), ((56403, 56442), 'numpy.asarray', 'numpy.asarray', (['eris.mo_coeff'], {'order': '"""F"""'}), "(eris.mo_coeff, order='F')\n", (56416, 56442), False, 'import numpy\n'), ((56625, 56640), 'pyscf.lib.H5TmpFile', 'lib.H5TmpFile', ([], {}), '()\n', (56638, 56640), False, 'from pyscf import lib\n'), ((58239, 58254), 'pyscf.lib.H5TmpFile', 'lib.H5TmpFile', ([], {}), '()\n', (58252, 58254), False, 'from pyscf import lib\n'), ((58369, 58465), 'pyscf.ao2mo.outcore.half_e1', 'ao2mo.outcore.half_e1', (['mol', '(mo_coeff, orbo)', 'fswap', 'int2e', '"""s4"""', '(1)', 'max_memory'], {'verbose': 'log'}), "(mol, (mo_coeff, orbo), fswap, int2e, 's4', 1,\n max_memory, verbose=log)\n", (58390, 58465), False, 'from pyscf import ao2mo\n'), ((58773, 58812), 'numpy.empty', 'numpy.empty', (['(blksize * nocc, nao_pair)'], {}), '((blksize * nocc, nao_pair))\n', (58784, 58812), False, 'import numpy\n'), ((58829, 58850), 'numpy.empty_like', 'numpy.empty_like', (['buf'], {}), '(buf)\n', (58845, 58850), False, 'import numpy\n'), ((59032, 59071), 'numpy.empty', 'numpy.empty', (['(blksize * nocc, nmo ** 2)'], {}), '((blksize * nocc, nmo ** 2))\n', (59043, 59071), False, 'import numpy\n'), ((60367, 60407), 'pyscf.lib.logger.Logger', 'logger.Logger', (['mycc.stdout', 'mycc.verbose'], {}), '(mycc.stdout, mycc.verbose)\n', (60380, 60407), False, 'from pyscf.lib import logger\n'), ((60490, 60529), 'numpy.asarray', 'numpy.asarray', (['eris.mo_coeff'], {'order': '"""F"""'}), "(eris.mo_coeff, order='F')\n", (60503, 60529), False, 'import numpy\n'), ((60689, 60720), 'numpy.empty', 'numpy.empty', (['(naux, nocc, nocc)'], {}), '((naux, nocc, nocc))\n', (60700, 60720), False, 'import numpy\n'), ((60729, 60760), 'numpy.empty', 'numpy.empty', (['(naux, nocc, nvir)'], {}), '((naux, nocc, nvir))\n', (60740, 60760), False, 'import numpy\n'), ((60769, 60800), 'numpy.empty', 'numpy.empty', (['(naux, nvir, nocc)'], {}), '((naux, nvir, nocc))\n', (60780, 60800), False, 'import numpy\n'), ((60809, 60839), 'numpy.empty', 'numpy.empty', (['(naux, nvir_pair)'], {}), '((naux, nvir_pair))\n', (60820, 60839), False, 'import numpy\n'), ((61399, 61414), 'pyscf.lib.H5TmpFile', 'lib.H5TmpFile', ([], {}), '()\n', (61412, 61414), False, 'from pyscf import lib\n'), ((62526, 62546), 'pyscf.lib.ddot', 'lib.ddot', (['Lvv.T', 'Lvv'], {}), '(Lvv.T, Lvv)\n', (62534, 62546), False, 'from pyscf import lib\n'), ((63451, 63461), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (63459, 63461), False, 'from pyscf import gto\n'), ((63681, 63693), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (63688, 63693), False, 'from pyscf import scf\n'), ((1604, 1626), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (1624, 1626), False, 'from pyscf.lib import logger\n'), ((1628, 1649), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (1647, 1649), False, 'from pyscf.lib import logger\n'), ((2225, 2250), 'numpy.linalg.norm', 'numpy.linalg.norm', (['tmpvec'], {}), '(tmpvec)\n', (2242, 2250), False, 'import numpy\n'), ((3147, 3169), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (3167, 3169), False, 'from pyscf.lib import logger\n'), ((3171, 3192), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (3190, 3192), False, 'from pyscf.lib import logger\n'), ((3682, 3700), 'numpy.diag', 'numpy.diag', (['mo_e_o'], {}), '(mo_e_o)\n', (3692, 3700), False, 'import numpy\n'), ((3717, 3766), 'numpy.einsum', 'numpy.einsum', (['"""ia,ja->ij"""', 'fock[:nocc, nocc:]', 't1'], {}), "('ia,ja->ij', fock[:nocc, nocc:], t1)\n", (3729, 3766), False, 'import numpy\n'), ((3797, 3815), 'numpy.diag', 'numpy.diag', (['mo_e_v'], {}), '(mo_e_v)\n', (3807, 3815), False, 'import numpy\n'), ((3832, 3881), 'numpy.einsum', 'numpy.einsum', (['"""ia,ib->ab"""', 't1', 'fock[:nocc, nocc:]'], {}), "('ia,ib->ab', t1, fock[:nocc, nocc:])\n", (3844, 3881), False, 'import numpy\n'), ((3958, 3973), 'pyscf.lib.H5TmpFile', 'lib.H5TmpFile', ([], {}), '()\n', (3971, 3973), False, 'from pyscf import lib\n'), ((4227, 4247), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (4245, 4247), False, 'from pyscf import lib\n'), ((4663, 4703), 'numpy.empty', 'numpy.empty', (['(nocc, nocc, p1 - p0, nvir)'], {}), '((nocc, nocc, p1 - p0, nvir))\n', (4674, 4703), False, 'import numpy\n'), ((5515, 5555), 'numpy.empty', 'numpy.empty', (['(nocc, p1 - p0, nvir, nocc)'], {}), '((nocc, p1 - p0, nvir, nocc))\n', (5526, 5555), False, 'import numpy\n'), ((6241, 6283), 'pyscf.lib.einsum', 'lib.einsum', (['"""ic,kjbc->ibkj"""', 't1', 'eris_oovv'], {}), "('ic,kjbc->ibkj', t1, eris_oovv)\n", (6251, 6283), False, 'from pyscf import lib\n'), ((6299, 6341), 'pyscf.lib.einsum', 'lib.einsum', (['"""bjkc,ic->jbki"""', 'eris_voov', 't1'], {}), "('bjkc,ic->jbki', eris_voov, t1)\n", (6309, 6341), False, 'from pyscf import lib\n'), ((6370, 6406), 'pyscf.lib.einsum', 'lib.einsum', (['"""ka,jbki->jiba"""', 't1', 'tmp'], {}), "('ka,jbki->jiba', t1, tmp)\n", (6380, 6406), False, 'from pyscf import lib\n'), ((6534, 6576), 'numpy.einsum', 'numpy.einsum', (['"""kc,akic->ia"""', 't1', 'eris_voov'], {}), "('kc,akic->ia', t1, eris_voov)\n", (6546, 6576), False, 'import numpy\n'), ((6593, 6644), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ijab"""', '(t1[:, p0:p1] * 0.5)', 't1'], {}), "('ia,jb->ijab', t1[:, p0:p1] * 0.5, t1)\n", (6605, 6644), False, 'import numpy\n'), ((6830, 6875), 'pyscf.lib.einsum', 'lib.einsum', (['"""aikb,kjab->ij"""', 'eris_voov', 'theta'], {}), "('aikb,kjab->ij', eris_voov, theta)\n", (6840, 6875), False, 'from pyscf import lib\n'), ((6996, 7041), 'pyscf.lib.einsum', 'lib.einsum', (['"""ijab,aklb->ijkl"""', 'tau', 'eris_voov'], {}), "('ijab,aklb->ijkl', tau, eris_voov)\n", (7006, 7041), False, 'from pyscf import lib\n'), ((9776, 9825), 'numpy.einsum', 'numpy.einsum', (['"""jb,ijba->ia"""', 'fov[:, p0:p1]', 'theta'], {}), "('jb,ijba->ia', fov[:, p0:p1], theta)\n", (9788, 9825), False, 'import numpy\n'), ((9842, 9897), 'pyscf.lib.einsum', 'lib.einsum', (['"""jbki,kjba->ia"""', 'eris.ovoo[:, p0:p1]', 'theta'], {}), "('jbki,kjba->ia', eris.ovoo[:, p0:p1], theta)\n", (9852, 9897), False, 'from pyscf import lib\n'), ((9912, 9957), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ijab"""', 't1[:, p0:p1]', 't1'], {}), "('ia,jb->ijab', t1[:, p0:p1], t1)\n", (9924, 9957), False, 'import numpy\n'), ((10107, 10147), 'numpy.einsum', 'numpy.einsum', (['"""ja,ia->ij"""', '(0.5 * t1)', 'fov'], {}), "('ja,ia->ij', 0.5 * t1, fov)\n", (10119, 10147), False, 'import numpy\n'), ((10163, 10203), 'numpy.einsum', 'numpy.einsum', (['"""ia,ib->ab"""', '(0.5 * t1)', 'fov'], {}), "('ia,ib->ab', 0.5 * t1, fov)\n", (10175, 10203), False, 'import numpy\n'), ((10798, 10839), 'pyscf.lib.direct_sum', 'lib.direct_sum', (['"""a,b->ab"""', 'eia[i]', 'eia[i]'], {}), "('a,b->ab', eia[i], eia[i])\n", (10812, 10839), False, 'from pyscf import lib\n'), ((10994, 11016), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (11014, 11016), False, 'from pyscf.lib import logger\n'), ((11018, 11039), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (11037, 11039), False, 'from pyscf.lib import logger\n'), ((11193, 11230), 'numpy.zeros', 'numpy.zeros', (['(nvir, nocc, nocc, nvir)'], {}), '((nvir, nocc, nocc, nvir))\n', (11204, 11230), False, 'import numpy\n'), ((12021, 12078), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['load_ovvv'], {'sync': '(not mycc.async_io)'}), '(load_ovvv, sync=not mycc.async_io)\n', (12043, 12078), False, 'from pyscf import lib\n'), ((12106, 12145), 'numpy.empty', 'numpy.empty', (['(blksize, nocc, nvir_pair)'], {}), '((blksize, nocc, nvir_pair))\n', (12117, 12145), False, 'import numpy\n'), ((12167, 12206), 'numpy.empty', 'numpy.empty', (['(blksize, nocc, nvir_pair)'], {}), '((blksize, nocc, nvir_pair))\n', (12178, 12206), False, 'import numpy\n'), ((12263, 12291), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (12273, 12291), False, 'from pyscf import lib\n'), ((13954, 13991), 'pyscf.lib.square_mat_in_trilu_indices', 'lib.square_mat_in_trilu_indices', (['nvir'], {}), '(nvir)\n', (13985, 13991), False, 'from pyscf import lib\n'), ((14014, 14042), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (14024, 14042), False, 'from pyscf import lib\n'), ((15322, 15344), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (15342, 15344), False, 'from pyscf.lib import logger\n'), ((15346, 15367), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (15365, 15367), False, 'from pyscf.lib import logger\n'), ((15623, 15671), 'numpy.empty', 'numpy.empty', (['(nocc2, nvir, nvir)'], {'dtype': 't2.dtype'}), '((nocc2, nvir, nvir), dtype=t2.dtype)\n', (15634, 15671), False, 'import numpy\n'), ((16107, 16147), 'numpy.asarray', 'numpy.asarray', (['mo[:, nocc:].T'], {'order': '"""F"""'}), "(mo[:, nocc:].T, order='F')\n", (16120, 16147), False, 'import numpy\n'), ((17755, 17777), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (17775, 17777), False, 'from pyscf.lib import logger\n'), ((17779, 17800), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (17798, 17800), False, 'from pyscf.lib import logger\n'), ((17912, 17947), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ijab"""', 't1', 't1'], {}), "('ia,jb->ijab', t1, t1)\n", (17924, 17947), False, 'import numpy\n'), ((18314, 18354), 'numpy.asarray', 'numpy.asarray', (['mo[:, nocc:].T'], {'order': '"""F"""'}), "(mo[:, nocc:].T, order='F')\n", (18327, 18354), False, 'import numpy\n'), ((19804, 19824), 'numpy.zeros_like', 'numpy.zeros_like', (['t2'], {}), '(t2)\n', (19820, 19824), False, 'import numpy\n'), ((19875, 19897), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (19895, 19897), False, 'from pyscf.lib import logger\n'), ((19899, 19920), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (19918, 19920), False, 'from pyscf.lib import logger\n'), ((21051, 21124), 'pyscf.ao2mo._ao2mo.AO2MOpt', '_ao2mo.AO2MOpt', (['mol', 'intor', '"""CVHFnr_schwarz_cond"""', '"""CVHFsetnr_direct_scf"""'], {}), "(mol, intor, 'CVHFnr_schwarz_cond', 'CVHFsetnr_direct_scf')\n", (21065, 21124), False, 'from pyscf.ao2mo import _ao2mo\n'), ((21301, 21349), 'pyscf.ao2mo.outcore.balance_partition', 'ao2mo.outcore.balance_partition', (['ao_loc', 'blksize'], {}), '(ao_loc, blksize)\n', (21332, 21349), False, 'from pyscf import ao2mo\n'), ((21414, 21459), 'numpy.empty', 'numpy.empty', (['(blksize, blksize, nvirb, nvirb)'], {}), '((blksize, blksize, nvirb, nvirb))\n', (21425, 21459), False, 'import numpy\n'), ((21475, 21520), 'numpy.empty', 'numpy.empty', (['(blksize, blksize, nvirb, nvirb)'], {}), '((blksize, blksize, nvirb, nvirb))\n', (21486, 21520), False, 'import numpy\n'), ((23961, 23999), 'pyscf.lib.square_mat_in_trilu_indices', 'lib.square_mat_in_trilu_indices', (['nvira'], {}), '(nvira)\n', (23992, 23999), False, 'from pyscf import lib\n'), ((24018, 24063), 'numpy.empty', 'numpy.empty', (['(blksize, blksize, nvirb, nvirb)'], {}), '((blksize, blksize, nvirb, nvirb))\n', (24029, 24063), False, 'import numpy\n'), ((25455, 25477), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (25475, 25477), False, 'from pyscf.lib import logger\n'), ((25479, 25500), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (25498, 25500), False, 'from pyscf.lib import logger\n'), ((25981, 26025), 'pyscf.lib.einsum', 'lib.einsum', (['"""xcd,acbd->xab"""', 'x2', 'vvvv[p0:p1]'], {}), "('xcd,acbd->xab', x2, vvvv[p0:p1])\n", (25991, 26025), False, 'from pyscf import lib\n'), ((26877, 26907), 'numpy.tril_indices', 'numpy.tril_indices', (['nocc'], {'k': '(-1)'}), '(nocc, k=-1)\n', (26895, 26907), False, 'import numpy\n'), ((26924, 26954), 'numpy.tril_indices', 'numpy.tril_indices', (['nvir'], {'k': '(-1)'}), '(nvir, k=-1)\n', (26942, 26954), False, 'import numpy\n'), ((26963, 27049), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['t2', 't2tril', '(otril[0] * nocc + otril[1])', '(vtril[0] * nvir + vtril[1])'], {}), '(t2, t2tril, otril[0] * nocc + otril[1], vtril[0] * nvir +\n vtril[1])\n', (26977, 27049), False, 'from pyscf import lib\n'), ((27046, 27132), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['t2', 't2tril', '(otril[1] * nocc + otril[0])', '(vtril[1] * nvir + vtril[0])'], {}), '(t2, t2tril, otril[1] * nocc + otril[0], vtril[1] * nvir +\n vtril[0])\n', (27060, 27132), False, 'from pyscf import lib\n'), ((27234, 27320), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['t2', 't2tril', '(otril[0] * nocc + otril[1])', '(vtril[1] * nvir + vtril[0])'], {}), '(t2, t2tril, otril[0] * nocc + otril[1], vtril[1] * nvir +\n vtril[0])\n', (27248, 27320), False, 'from pyscf import lib\n'), ((27317, 27403), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['t2', 't2tril', '(otril[1] * nocc + otril[0])', '(vtril[0] * nvir + vtril[1])'], {}), '(t2, t2tril, otril[1] * nocc + otril[0], vtril[0] * nvir +\n vtril[1])\n', (27331, 27403), False, 'from pyscf import lib\n'), ((28075, 28103), 'numpy.asarray', 'numpy.asarray', (['t2'], {'order': '"""C"""'}), "(t2, order='C')\n", (28088, 28103), False, 'import numpy\n'), ((29138, 29183), 'numpy.einsum', 'numpy.einsum', (['"""ia,ia"""', 'fock[:nocc, nocc:]', 't1'], {}), "('ia,ia', fock[:nocc, nocc:], t1)\n", (29150, 29183), False, 'import numpy\n'), ((29562, 29603), 'numpy.einsum', 'numpy.einsum', (['"""jiab,iabj"""', 'tau', 'eris_ovvo'], {}), "('jiab,iabj', tau, eris_ovvo)\n", (29574, 29603), False, 'import numpy\n'), ((29639, 29710), 'pyscf.lib.logger.warn', 'logger.warn', (['mycc', '"""Non-zero imaginary part found in CCSD energy %s"""', 'e'], {}), "(mycc, 'Non-zero imaginary part found in CCSD energy %s', e)\n", (29650, 29710), False, 'from pyscf.lib import logger\n'), ((39469, 39501), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['self', 'verbose'], {}), '(self, verbose)\n', (39486, 39501), False, 'from pyscf.lib import logger\n'), ((41047, 41107), 'numpy.empty', 'numpy.empty', (['(nocc, nocc, nvir, nvir)'], {'dtype': 'eris.ovov.dtype'}), '((nocc, nocc, nvir, nvir), dtype=eris.ovov.dtype)\n', (41058, 41107), False, 'import numpy\n'), ((41289, 41317), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (41299, 41317), False, 'from pyscf import lib\n'), ((41738, 41838), 'pyscf.lib.logger.info', 'logger.info', (['self', '"""Init t2, MP2 energy = %.15g E_corr(MP2) %.15g"""', '(e_hf + self.emp2)', 'self.emp2'], {}), "(self, 'Init t2, MP2 energy = %.15g E_corr(MP2) %.15g', e_hf +\n self.emp2, self.emp2)\n", (41749, 41838), False, 'from pyscf.lib import logger\n'), ((41863, 41901), 'pyscf.lib.logger.timer', 'logger.timer', (['self', '"""init mp2"""', '*time0'], {}), "(self, 'init mp2', *time0)\n", (41875, 41901), False, 'from pyscf.lib import logger\n'), ((43126, 43230), 'pyscf.lib.logger.note', 'logger.note', (['self', '"""E(%s) = %.16g E_corr = %.16g"""', 'self.__class__.__name__', 'self.e_tot', 'self.e_corr'], {}), "(self, 'E(%s) = %.16g E_corr = %.16g', self.__class__.__name__,\n self.e_tot, self.e_corr)\n", (43137, 43230), False, 'from pyscf.lib import logger\n'), ((43677, 43800), 'pyscf.cc.ccsd_lambda.kernel', 'ccsd_lambda.kernel', (['self', 'eris', 't1', 't2', 'l1', 'l2'], {'max_cycle': 'self.max_cycle', 'tol': 'self.conv_tol_normt', 'verbose': 'self.verbose'}), '(self, eris, t1, t2, l1, l2, max_cycle=self.max_cycle,\n tol=self.conv_tol_normt, verbose=self.verbose)\n', (43695, 43800), False, 'from pyscf.cc import ccsd_lambda\n'), ((44167, 44214), 'pyscf.cc.ccsd_t.kernel', 'ccsd_t.kernel', (['self', 'eris', 't1', 't2', 'self.verbose'], {}), '(self, eris, t1, t2, self.verbose)\n', (44180, 44214), False, 'from pyscf.cc import ccsd_t\n'), ((45672, 45693), 'pyscf.cc.eom_rccsd.EOMIP', 'eom_rccsd.EOMIP', (['self'], {}), '(self)\n', (45687, 45693), False, 'from pyscf.cc import eom_rccsd\n'), ((45777, 45798), 'pyscf.cc.eom_rccsd.EOMEA', 'eom_rccsd.EOMEA', (['self'], {}), '(self)\n', (45792, 45798), False, 'from pyscf.cc import eom_rccsd\n'), ((45882, 45903), 'pyscf.cc.eom_rccsd.EOMEE', 'eom_rccsd.EOMEE', (['self'], {}), '(self)\n', (45897, 45903), False, 'from pyscf.cc import eom_rccsd\n'), ((46299, 46356), 'pyscf.cc.ccsd_rdm.make_rdm1', 'ccsd_rdm.make_rdm1', (['self', 't1', 't2', 'l1', 'l2'], {'ao_repr': 'ao_repr'}), '(self, t1, t2, l1, l2, ao_repr=ao_repr)\n', (46317, 46356), False, 'from pyscf.cc import ccsd_rdm\n'), ((46830, 46887), 'pyscf.cc.ccsd_rdm.make_rdm2', 'ccsd_rdm.make_rdm2', (['self', 't1', 't2', 'l1', 'l2'], {'ao_repr': 'ao_repr'}), '(self, t1, t2, l1, l2, ao_repr=ao_repr)\n', (46848, 46887), False, 'from pyscf.cc import ccsd_rdm\n'), ((50281, 50327), 'pyscf.lib.chkfile.save', 'lib.chkfile.save', (['self.chkfile', '"""ccsd"""', 'cc_chk'], {}), "(self.chkfile, 'ccsd', cc_chk)\n", (50297, 50327), False, 'from pyscf import lib\n'), ((50436, 50500), 'pyscf.cc.dfccsd.RCCSD', 'dfccsd.RCCSD', (['self._scf', 'self.frozen', 'self.mo_coeff', 'self.mo_occ'], {}), '(self._scf, self.frozen, self.mo_coeff, self.mo_occ)\n', (50448, 50500), False, 'from pyscf.cc import dfccsd\n'), ((50837, 50857), 'pyscf.grad.ccsd.Gradients', 'ccsd.Gradients', (['self'], {}), '(self)\n', (50851, 50857), False, 'from pyscf.grad import ccsd\n'), ((51756, 51788), 'pyscf.mp.mp2._mo_without_core', '_mo_without_core', (['mycc', 'mo_coeff'], {}), '(mycc, mo_coeff)\n', (51772, 51788), False, 'from pyscf.mp.mp2 import get_nocc, get_nmo, get_frozen_mask, _mo_without_core\n'), ((53165, 53197), 'numpy.asarray', 'numpy.asarray', (['self.ovvv[slices]'], {}), '(self.ovvv[slices])\n', (53178, 53197), False, 'import numpy\n'), ((53994, 54016), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (54014, 54016), False, 'from pyscf.lib import logger\n'), ((54018, 54039), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (54037, 54039), False, 'from pyscf.lib import logger\n'), ((54799, 54826), 'pyscf.ao2mo.restore', 'ao2mo.restore', (['(4)', 'eri1', 'nmo'], {}), '(4, eri1, nmo)\n', (54812, 54826), False, 'from pyscf import ao2mo\n'), ((55309, 55365), 'pyscf.lib.unpack_tril', 'lib.unpack_tril', (['eri1[ij:ij + i + 1]'], {'out': 'outbuf[:i + 1]'}), '(eri1[ij:ij + i + 1], out=outbuf[:i + 1])\n', (55324, 55365), False, 'from pyscf import lib\n'), ((55623, 55679), 'pyscf.lib.unpack_tril', 'lib.unpack_tril', (['eri1[ij:ij + i + 1]'], {'out': 'outbuf[:i + 1]'}), '(eri1[ij:ij + i + 1], out=outbuf[:i + 1])\n', (55638, 55679), False, 'from pyscf import lib\n'), ((55863, 55902), 'pyscf.lib.pack_tril', 'lib.pack_tril', (['buf[:nocc, nocc:, nocc:]'], {}), '(buf[:nocc, nocc:, nocc:])\n', (55876, 55902), False, 'from pyscf import lib\n'), ((55936, 56010), 'pyscf.lib.pack_tril', 'lib.pack_tril', (['buf[nocc:i + 1, nocc:, nocc:]'], {'out': 'eris.vvvv[ij1:ij1 + dij]'}), '(buf[nocc:i + 1, nocc:, nocc:], out=eris.vvvv[ij1:ij1 + dij])\n', (55949, 56010), False, 'from pyscf import lib\n'), ((56204, 56226), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (56224, 56226), False, 'from pyscf.lib import logger\n'), ((56228, 56249), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (56247, 56249), False, 'from pyscf.lib import logger\n'), ((57861, 57883), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (57881, 57883), False, 'from pyscf.lib import logger\n'), ((57885, 57906), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (57904, 57906), False, 'from pyscf.lib import logger\n'), ((58029, 58044), 'pyscf.lib.H5TmpFile', 'lib.H5TmpFile', ([], {}), '()\n', (58042, 58044), False, 'from pyscf import lib\n'), ((58053, 58122), 'pyscf.ao2mo.full', 'ao2mo.full', (['mol', 'orbv', 'eris.feri2'], {'max_memory': 'max_memory', 'verbose': 'log'}), '(mol, orbv, eris.feri2, max_memory=max_memory, verbose=log)\n', (58063, 58122), False, 'from pyscf import ao2mo\n'), ((59076, 59128), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['load'], {'sync': '(not mycc.async_io)'}), '(load, sync=not mycc.async_io)\n', (59098, 59128), False, 'from pyscf import lib\n'), ((59204, 59232), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nocc', 'blksize'], {}), '(0, nocc, blksize)\n', (59214, 59232), False, 'from pyscf import lib\n'), ((59682, 59710), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (59692, 59710), False, 'from pyscf import lib\n'), ((60310, 60332), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (60330, 60332), False, 'from pyscf.lib import logger\n'), ((60334, 60355), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (60353, 60355), False, 'from pyscf.lib import logger\n'), ((1859, 1923), 'pyscf.lib.diis.DIIS', 'lib.diis.DIIS', (['mycc', 'mycc.diis_file'], {'incore': 'mycc.incore_complete'}), '(mycc, mycc.diis_file, incore=mycc.incore_complete)\n', (1872, 1923), False, 'from pyscf import lib\n'), ((4791, 4848), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['load_oovv'], {'sync': '(not mycc.async_io)'}), '(load_oovv, sync=not mycc.async_io)\n', (4813, 4848), False, 'from pyscf import lib\n'), ((4967, 5023), 'numpy.einsum', 'numpy.einsum', (['"""kc,kcji->ij"""', '(2 * t1[:, p0:p1])', 'eris_ovoo'], {}), "('kc,kcji->ij', 2 * t1[:, p0:p1], eris_ovoo)\n", (4979, 5023), False, 'import numpy\n'), ((5040, 5093), 'numpy.einsum', 'numpy.einsum', (['"""kc,icjk->ij"""', '(-t1[:, p0:p1])', 'eris_ovoo'], {}), "('kc,icjk->ij', -t1[:, p0:p1], eris_ovoo)\n", (5052, 5093), False, 'import numpy\n'), ((5112, 5164), 'pyscf.lib.einsum', 'lib.einsum', (['"""la,jaik->lkji"""', 't1[:, p0:p1]', 'eris_ovoo'], {}), "('la,jaik->lkji', t1[:, p0:p1], eris_ovoo)\n", (5122, 5164), False, 'from pyscf import lib\n'), ((5259, 5301), 'pyscf.lib.einsum', 'lib.einsum', (['"""jbik,ka->bjia"""', 'eris_ovoo', 't1'], {}), "('jbik,ka->bjia', eris_ovoo, t1)\n", (5269, 5301), False, 'from pyscf import lib\n'), ((5381, 5423), 'pyscf.lib.einsum', 'lib.einsum', (['"""kbij,ka->bija"""', 'eris_ovoo', 't1'], {}), "('kbij,ka->bija', eris_ovoo, t1)\n", (5391, 5423), False, 'from pyscf import lib\n'), ((5641, 5698), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['load_ovvo'], {'sync': '(not mycc.async_io)'}), '(load_ovvo, sync=not mycc.async_io)\n', (5663, 5698), False, 'from pyscf import lib\n'), ((5826, 5868), 'numpy.einsum', 'numpy.einsum', (['"""jb,jiab->ia"""', 't1', 'eris_oovv'], {}), "('jb,jiab->ia', t1, eris_oovv)\n", (5838, 5868), False, 'import numpy\n'), ((6157, 6199), 'numpy.einsum', 'numpy.einsum', (['"""jb,aijb->ia"""', 't1', 'eris_voov'], {}), "('jb,aijb->ia', t1, eris_voov)\n", (6169, 6199), False, 'import numpy\n'), ((6463, 6505), 'numpy.einsum', 'numpy.einsum', (['"""kc,aikc->ia"""', 't1', 'eris_voov'], {}), "('kc,aikc->ia', t1, eris_voov)\n", (6475, 6505), False, 'import numpy\n'), ((6934, 6979), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ijab"""', 't1[:, p0:p1]', 't1'], {}), "('ia,jb->ijab', t1[:, p0:p1], t1)\n", (6946, 6979), False, 'import numpy\n'), ((7125, 7186), 'pyscf.lib.einsum', 'lib.einsum', (['"""bkic,jkca->bija"""', 'eris_voov[:, :, :, q0:q1]', 'tau'], {}), "('bkic,jkca->bija', eris_voov[:, :, :, q0:q1], tau)\n", (7135, 7186), False, 'from pyscf import lib\n'), ((7197, 7257), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['update_wVooV'], {'sync': '(not mycc.async_io)'}), '(update_wVooV, sync=not mycc.async_io)\n', (7219, 7257), False, 'from pyscf import lib\n'), ((7301, 7329), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (7311, 7329), False, 'from pyscf import lib\n'), ((7784, 7841), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['update_t2'], {'sync': '(not mycc.async_io)'}), '(update_t2, sync=not mycc.async_io)\n', (7806, 7841), False, 'from pyscf import lib\n'), ((7882, 7910), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (7892, 7910), False, 'from pyscf import lib\n'), ((8484, 8544), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['update_wVOov'], {'sync': '(not mycc.async_io)'}), '(update_wVOov, sync=not mycc.async_io)\n', (8506, 8544), False, 'from pyscf import lib\n'), ((8588, 8616), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (8598, 8616), False, 'from pyscf import lib\n'), ((9034, 9077), 'pyscf.lib.einsum', 'lib.einsum', (['"""kica,ckjb->ijab"""', 'theta', 'wVOov'], {}), "('kica,ckjb->ijab', theta, wVOov)\n", (9044, 9077), False, 'from pyscf import lib\n'), ((9091, 9148), 'pyscf.lib.call_in_background', 'lib.call_in_background', (['update_t2'], {'sync': '(not mycc.async_io)'}), '(update_t2, sync=not mycc.async_io)\n', (9113, 9148), False, 'from pyscf import lib\n'), ((9189, 9217), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvir', 'blksize'], {}), '(0, nvir, blksize)\n', (9199, 9217), False, 'from pyscf import lib\n'), ((10019, 10060), 'pyscf.lib.einsum', 'lib.einsum', (['"""ijkl,klab->ijab"""', 'woooo', 'tau'], {}), "('ijkl,klab->ijab', woooo, tau)\n", (10029, 10060), False, 'from pyscf import lib\n'), ((10629, 10673), 'pyscf.lib.direct_sum', 'lib.direct_sum', (['"""a,jb->jab"""', 'eia[i]', 'eia[:i]'], {}), "('a,jb->jab', eia[i], eia[:i])\n", (10643, 10673), False, 'from pyscf import lib\n'), ((11396, 11416), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (11414, 11416), False, 'from pyscf import lib\n'), ((12871, 12913), 'numpy.einsum', 'numpy.einsum', (['"""kc,bkca->ab"""', 't1', 'eris_vovv'], {}), "('kc,bkca->ab', t1, eris_vovv)\n", (12883, 12913), False, 'import numpy\n'), ((13326, 13368), 'pyscf.lib.einsum', 'lib.einsum', (['"""biac,jc->bija"""', 'eris_vovv', 't1'], {}), "('biac,jc->bija', eris_vovv, t1)\n", (13336, 13368), False, 'from pyscf import lib\n'), ((13502, 13547), 'pyscf.lib.einsum', 'lib.einsum', (['"""icjb,cjba->ia"""', 'theta', 'eris_vovv'], {}), "('icjb,cjba->ia', theta, eris_vovv)\n", (13512, 13547), False, 'from pyscf import lib\n'), ((15573, 15597), 'numpy.tril_indices', 'numpy.tril_indices', (['nocc'], {}), '(nocc)\n', (15591, 15597), False, 'import numpy\n'), ((15774, 15818), 'numpy.einsum', 'numpy.einsum', (['"""a,jb->jab"""', 't1[i]', 't1[:i + 1]'], {}), "('a,jb->jab', t1[i], t1[:i + 1])\n", (15786, 15818), False, 'import numpy\n'), ((16027, 16064), 'pyscf.mp.mp2._mo_without_core', '_mo_without_core', (['mycc', 'mycc.mo_coeff'], {}), '(mycc, mycc.mo_coeff)\n', (16043, 16064), False, 'from pyscf.mp.mp2 import get_nocc, get_nmo, get_frozen_mask, _mo_without_core\n'), ((18199, 18236), 'pyscf.mp.mp2._mo_without_core', '_mo_without_core', (['mycc', 'mycc.mo_coeff'], {}), '(mycc, mycc.mo_coeff)\n', (18215, 18236), False, 'from pyscf.mp.mp2 import get_nocc, get_nmo, get_frozen_mask, _mo_without_core\n'), ((21188, 21244), 'numpy.sqrt', 'numpy.sqrt', (['(max_memory * 900000.0 / 8 / nvirb ** 2 / 2.5)'], {}), '(max_memory * 900000.0 / 8 / nvirb ** 2 / 2.5)\n', (21198, 21244), False, 'import numpy\n'), ((22815, 22843), 'pyscf.lib.unpack_tril', 'lib.unpack_tril', (['eri'], {'axis': '(0)'}), '(eri, axis=0)\n', (22830, 22843), False, 'from pyscf import lib\n'), ((22862, 22925), 'numpy.ndarray', 'numpy.ndarray', (['(i1 - i0, nvirb, i1 - i0, nvirb)'], {'buffer': 'loadbuf'}), '((i1 - i0, nvirb, i1 - i0, nvirb), buffer=loadbuf)\n', (22875, 22925), False, 'import numpy\n'), ((23900, 23941), 'numpy.asarray', 'numpy.asarray', (['vvvv[off0:off1]'], {'order': '"""C"""'}), "(vvvv[off0:off1], order='C')\n", (23913, 23941), False, 'import numpy\n'), ((24309, 24335), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'i1', 'blksize'], {}), '(0, i1, blksize)\n', (24319, 24335), False, 'from pyscf import lib\n'), ((25773, 25793), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (25791, 25793), False, 'from pyscf import lib\n'), ((29222, 29242), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (29240, 29242), False, 'from pyscf import lib\n'), ((29441, 29486), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ijab"""', 't1[:, p0:p1]', 't1'], {}), "('ia,jb->ijab', t1[:, p0:p1], t1)\n", (29453, 29486), False, 'import numpy\n'), ((29503, 29544), 'numpy.einsum', 'numpy.einsum', (['"""ijab,iabj"""', 'tau', 'eris_ovvo'], {}), "('ijab,iabj', tau, eris_ovvo)\n", (29515, 29544), False, 'import numpy\n'), ((30840, 30873), 'numpy.einsum', 'numpy.einsum', (['"""ia,ja->ij"""', 't1', 't1'], {}), "('ia,ja->ij', t1, t1)\n", (30852, 30873), False, 'import numpy\n'), ((30910, 30943), 'numpy.einsum', 'numpy.einsum', (['"""ia,ib->ab"""', 't1', 't1'], {}), "('ia,ib->ab', t1, t1)\n", (30922, 30943), False, 'import numpy\n'), ((31324, 31361), 'numpy.einsum', 'numpy.einsum', (['"""ikab,jkab->ij"""', 't2', 't2'], {}), "('ikab,jkab->ij', t2, t2)\n", (31336, 31361), False, 'import numpy\n'), ((31398, 31435), 'numpy.einsum', 'numpy.einsum', (['"""ijac,ijbc->ab"""', 't2', 't2'], {}), "('ijac,ijbc->ab', t2, t2)\n", (31410, 31435), False, 'import numpy\n'), ((40738, 40760), 'pyscf.lib.logger.process_clock', 'logger.process_clock', ([], {}), '()\n', (40758, 40760), False, 'from pyscf.lib import logger\n'), ((40762, 40783), 'pyscf.lib.logger.perf_counter', 'logger.perf_counter', ([], {}), '()\n', (40781, 40783), False, 'from pyscf.lib import logger\n'), ((41608, 41661), 'numpy.einsum', 'numpy.einsum', (['"""jiab,iajb"""', 't2[:, :, p0:p1]', 'eris_ovov'], {}), "('jiab,iajb', t2[:, :, p0:p1], eris_ovov)\n", (41620, 41661), False, 'import numpy\n'), ((42970, 43028), 'pyscf.lib.logger.info', 'logger.info', (['self', '"""%s converged"""', 'self.__class__.__name__'], {}), "(self, '%s converged', self.__class__.__name__)\n", (42981, 43028), False, 'from pyscf.lib import logger\n'), ((43055, 43117), 'pyscf.lib.logger.note', 'logger.note', (['self', '"""%s not converged"""', 'self.__class__.__name__'], {}), "(self, '%s not converged', self.__class__.__name__)\n", (43066, 43117), False, 'from pyscf.lib import logger\n'), ((47972, 47992), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (47990, 47992), False, 'from pyscf import lib\n'), ((48941, 48987), 'pyscf.lib.logger.debug1', 'logger.debug1', (['self', '"""DIIS for step %d"""', 'istep'], {}), "(self, 'DIIS for step %d', istep)\n", (48954, 48987), False, 'from pyscf.lib import logger\n'), ((50665, 50688), 'copy.copy', 'copy.copy', (['mycc.with_df'], {}), '(mycc.with_df)\n', (50674, 50688), False, 'import copy\n'), ((59384, 59486), 'pyscf.ao2mo._ao2mo.nr_e2', 'ao2mo._ao2mo.nr_e2', (['buf[:nrow]', 'mo_coeff', '(0, nmo, 0, nmo)', '"""s4"""', '"""s1"""'], {'out': 'outbuf', 'ao_loc': 'ao_loc'}), "(buf[:nrow], mo_coeff, (0, nmo, 0, nmo), 's4', 's1', out=\n outbuf, ao_loc=ao_loc)\n", (59402, 59486), False, 'from pyscf import ao2mo\n'), ((59866, 59968), 'pyscf.ao2mo._ao2mo.nr_e2', 'ao2mo._ao2mo.nr_e2', (['buf[:nrow]', 'mo_coeff', '(0, nmo, 0, nmo)', '"""s4"""', '"""s1"""'], {'out': 'outbuf', 'ao_loc': 'ao_loc'}), "(buf[:nrow], mo_coeff, (0, nmo, 0, nmo), 's4', 's1', out=\n outbuf, ao_loc=ao_loc)\n", (59884, 59968), False, 'from pyscf import ao2mo\n'), ((62095, 62115), 'pyscf.lib.ddot', 'lib.ddot', (['Loo.T', 'Loo'], {}), '(Loo.T, Loo)\n', (62103, 62115), False, 'from pyscf import lib\n'), ((62164, 62184), 'pyscf.lib.ddot', 'lib.ddot', (['Lov.T', 'Loo'], {}), '(Lov.T, Loo)\n', (62172, 62184), False, 'from pyscf import lib\n'), ((62319, 62339), 'pyscf.lib.ddot', 'lib.ddot', (['Lov.T', 'Lvo'], {}), '(Lov.T, Lvo)\n', (62327, 62339), False, 'from pyscf import lib\n'), ((62388, 62408), 'pyscf.lib.ddot', 'lib.ddot', (['Lov.T', 'Lov'], {}), '(Lov.T, Lov)\n', (62396, 62408), False, 'from pyscf import lib\n'), ((62457, 62477), 'pyscf.lib.ddot', 'lib.ddot', (['Lov.T', 'Lvv'], {}), '(Lov.T, Lvv)\n', (62465, 62477), False, 'from pyscf import lib\n'), ((7396, 7441), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ijab"""', 't1[:, q0:q1]', 't1'], {}), "('ia,jb->ijab', t1[:, q0:q1], t1)\n", (7408, 7441), False, 'import numpy\n'), ((7934, 7994), 'pyscf.lib.einsum', 'lib.einsum', (['"""jkca,ckib->jaib"""', 't2[:, :, p0:p1, q0:q1]', 'wVooV'], {}), "('jkca,ckib->jaib', t2[:, :, p0:p1, q0:q1], wVooV)\n", (7944, 7994), False, 'from pyscf import lib\n'), ((8425, 8470), 'pyscf.lib.einsum', 'lib.einsum', (['"""aikc,kcjb->aijb"""', 'eris_VOov', 'tau'], {}), "('aikc,kcjb->aijb', eris_VOov, tau)\n", (8435, 8470), False, 'from pyscf import lib\n'), ((8757, 8806), 'numpy.einsum', 'numpy.einsum', (['"""ia,jb->ibja"""', '(t1[:, q0:q1] * 2)', 't1'], {}), "('ia,jb->ibja', t1[:, q0:q1] * 2, t1)\n", (8769, 8806), False, 'import numpy\n'), ((12518, 12556), 'numpy.asarray', 'numpy.asarray', (['t1[:, p0:p1]'], {'order': '"""C"""'}), "(t1[:, p0:p1], order='C')\n", (12531, 12556), False, 'import numpy\n'), ((12791, 12843), 'numpy.einsum', 'numpy.einsum', (['"""kc,ckab->ab"""', 't1[:, p0:p1]', 'eris_vovv'], {}), "('kc,ckab->ab', t1[:, p0:p1], eris_vovv)\n", (12803, 12843), False, 'import numpy\n'), ((17232, 17279), 'numpy.ndarray', 'numpy.ndarray', (['(nocc2 * nvir, nvir)'], {'buffer': 'tau'}), '((nocc2 * nvir, nvir), buffer=tau)\n', (17245, 17279), False, 'import numpy\n'), ((20848, 20868), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (20866, 20868), False, 'from pyscf import lib\n'), ((22011, 22074), 'numpy.ndarray', 'numpy.ndarray', (['(i1 - i0, nvirb, j1 - j0, nvirb)'], {'buffer': 'loadbuf'}), '((i1 - i0, nvirb, j1 - j0, nvirb), buffer=loadbuf)\n', (22024, 22074), False, 'import numpy\n'), ((23167, 23186), 'ctypes.c_int', 'ctypes.c_int', (['nvirb'], {}), '(nvirb)\n', (23179, 23186), False, 'import ctypes\n'), ((24103, 24132), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvira', 'blksize'], {}), '(0, nvira, blksize)\n', (24113, 24132), False, 'from pyscf import lib\n'), ((24183, 24212), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'nvira', 'blksize'], {}), '(0, nvira, blksize)\n', (24193, 24212), False, 'from pyscf import lib\n'), ((24414, 24477), 'numpy.ndarray', 'numpy.ndarray', (['(i1 - i0, nvirb, j1 - j0, nvirb)'], {'buffer': 'loadbuf'}), '((i1 - i0, nvirb, j1 - j0, nvirb), buffer=loadbuf)\n', (24427, 24477), False, 'import numpy\n'), ((30580, 30601), 'numpy.linalg.norm', 'numpy.linalg.norm', (['t1'], {}), '(t1)\n', (30597, 30601), False, 'import numpy\n'), ((40321, 40341), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (40339, 40341), False, 'from pyscf import lib\n'), ((41144, 41164), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (41162, 41164), False, 'from pyscf import lib\n'), ((41458, 41507), 'pyscf.lib.direct_sum', 'lib.direct_sum', (['"""ia,jb->ijab"""', 'eia[:, p0:p1]', 'eia'], {}), "('ia,jb->ijab', eia[:, p0:p1], eia)\n", (41472, 41507), False, 'from pyscf import lib\n'), ((41532, 41585), 'numpy.einsum', 'numpy.einsum', (['"""ijab,iajb"""', 't2[:, :, p0:p1]', 'eris_ovov'], {}), "('ijab,iajb', t2[:, :, p0:p1], eris_ovov)\n", (41544, 41585), False, 'import numpy\n'), ((44384, 44405), 'pyscf.cc.eom_rccsd.EOMIP', 'eom_rccsd.EOMIP', (['self'], {}), '(self)\n', (44399, 44405), False, 'from pyscf.cc import eom_rccsd\n'), ((44674, 44695), 'pyscf.cc.eom_rccsd.EOMEA', 'eom_rccsd.EOMEA', (['self'], {}), '(self)\n', (44689, 44695), False, 'from pyscf.cc import eom_rccsd\n'), ((44921, 44942), 'pyscf.cc.eom_rccsd.EOMEE', 'eom_rccsd.EOMEE', (['self'], {}), '(self)\n', (44936, 44942), False, 'from pyscf.cc import eom_rccsd\n'), ((45119, 45147), 'pyscf.cc.eom_rccsd.EOMEESinglet', 'eom_rccsd.EOMEESinglet', (['self'], {}), '(self)\n', (45141, 45147), False, 'from pyscf.cc import eom_rccsd\n'), ((45324, 45352), 'pyscf.cc.eom_rccsd.EOMEETriplet', 'eom_rccsd.EOMEETriplet', (['self'], {}), '(self)\n', (45346, 45352), False, 'from pyscf.cc import eom_rccsd\n'), ((45521, 45550), 'pyscf.cc.eom_rccsd.EOMEESpinFlip', 'eom_rccsd.EOMEESpinFlip', (['self'], {}), '(self)\n', (45544, 45550), False, 'from pyscf.cc import eom_rccsd\n'), ((48232, 48435), 'pyscf.lib.logger.warn', 'logger.warn', (['self', '"""CCSD detected DF being used in the HF object. MO integrals are computed based on the DF 3-index tensors.\nIt\'s recommended to use dfccsd.CCSD for the DF-CCSD calculations"""'], {}), '(self,\n """CCSD detected DF being used in the HF object. MO integrals are computed based on the DF 3-index tensors.\nIt\'s recommended to use dfccsd.CCSD for the DF-CCSD calculations"""\n )\n', (48243, 48435), False, 'from pyscf.lib import logger\n'), ((52731, 52904), 'pyscf.lib.logger.warn', 'logger.warn', (['mycc', '"""HOMO-LUMO gap %s too small for CCSD.\nCCSD may be difficult to converge. Increasing CCSD Attribute level_shift may improve convergence."""', 'gap'], {}), '(mycc,\n """HOMO-LUMO gap %s too small for CCSD.\nCCSD may be difficult to converge. Increasing CCSD Attribute level_shift may improve convergence."""\n , gap)\n', (52742, 52904), False, 'from pyscf.lib import logger\n'), ((58303, 58323), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (58321, 58323), False, 'from pyscf import lib\n'), ((60952, 61010), 'pyscf.ao2mo._ao2mo.nr_e2', '_ao2mo.nr_e2', (['eri1', 'mo_coeff', 'ijslice'], {'aosym': '"""s2"""', 'out': 'Lpq'}), "(eri1, mo_coeff, ijslice, aosym='s2', out=Lpq)\n", (60964, 61010), False, 'from pyscf.ao2mo import _ao2mo\n'), ((62249, 62269), 'pyscf.lib.ddot', 'lib.ddot', (['Loo.T', 'Lvv'], {}), '(Loo.T, Lvv)\n', (62257, 62269), False, 'from pyscf import lib\n'), ((4110, 4134), 'numpy.asarray', 'numpy.asarray', (['eris.oooo'], {}), '(eris.oooo)\n', (4123, 4134), False, 'import numpy\n'), ((13155, 13193), 'pyscf.lib.einsum', 'lib.einsum', (['"""jcd,cdbk->jbk"""', 'tau', 'vvvo'], {}), "('jcd,cdbk->jbk', tau, vvvo)\n", (13165, 13193), False, 'from pyscf import lib\n'), ((13226, 13260), 'pyscf.lib.einsum', 'lib.einsum', (['"""ka,jbk->jab"""', 't1', 'tmp'], {}), "('ka,jbk->jab', t1, tmp)\n", (13236, 13260), False, 'from pyscf import lib\n'), ((22332, 22351), 'ctypes.c_int', 'ctypes.c_int', (['nvirb'], {}), '(nvirb)\n', (22344, 22351), False, 'import ctypes\n'), ((24735, 24754), 'ctypes.c_int', 'ctypes.c_int', (['nvirb'], {}), '(nvirb)\n', (24747, 24754), False, 'import ctypes\n'), ((26607, 26631), 'numpy.diag_indices', 'numpy.diag_indices', (['nocc'], {}), '(nocc)\n', (26625, 26631), False, 'import numpy\n'), ((30782, 30797), 'numpy.abs', 'numpy.abs', (['x[0]'], {}), '(x[0])\n', (30791, 30797), False, 'import numpy\n'), ((31266, 31281), 'numpy.abs', 'numpy.abs', (['x[0]'], {}), '(x[0])\n', (31275, 31281), False, 'import numpy\n'), ((57983, 58003), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (58001, 58003), False, 'from pyscf import lib\n'), ((13086, 13129), 'numpy.einsum', 'numpy.einsum', (['"""a,jb->jab"""', 't1[i, p0:p1]', 't1'], {}), "('a,jb->jab', t1[i, p0:p1], t1)\n", (13098, 13129), False, 'import numpy\n')] |
#===============================================================================
# Copyright 2014-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from __future__ import print_function
import numpy as np
from scipy import sparse as sp
from sklearn.calibration import CalibratedClassifierCV
from sklearn.utils import check_random_state
from sklearn.utils.validation import (
check_is_fitted,
check_consistent_length,
_num_samples,
_check_sample_weight)
import sklearn.svm._classes as svm_classes
import sklearn.svm._base as svm_base
import warnings
from sklearn.exceptions import NotFittedError
from sklearn.utils.multiclass import _ovr_decision_function
from sklearn.model_selection import StratifiedKFold
from distutils.version import LooseVersion
from sklearn import __version__ as sklearn_version
import daal4py
from .._utils import (
make2d, getFPType, get_patch_message, sklearn_check_version, PatchingConditionsChain)
import logging
def _get_libsvm_impl():
return ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _dual_coef_getter(self):
return self._internal_dual_coef_
def _intercept_getter(self):
return self._internal_intercept_
def _dual_coef_setter(self, val):
self._internal_dual_coef_ = val
if hasattr(self, 'daal_model_'):
del self.daal_model_
if getattr(self, '_daal_fit', False):
self._daal_fit = False
def _intercept_setter(self, val):
self._internal_intercept_ = val
if hasattr(self, 'daal_model_'):
del self.daal_model_
if getattr(self, '_daal_fit', False):
self._daal_fit = False
# Methods to extract coefficients
def group_indices_by_class(num_classes, sv_ind_by_clf, labels):
sv_ind_counters = np.zeros(num_classes, dtype=np.intp)
num_of_sv_per_class = np.bincount(labels[np.hstack(sv_ind_by_clf)])
sv_ind_by_class = [np.empty(n, dtype=np.int32)
for n in num_of_sv_per_class]
for indices_per_clf in sv_ind_by_clf:
for sv_index in indices_per_clf:
sv_label = labels[sv_index]
i = sv_ind_counters[sv_label]
sv_ind_by_class[sv_label][i] = sv_index
sv_ind_counters[sv_label] += 1
return sv_ind_by_class
def map_sv_to_columns_in_dual_coef_matrix(sv_ind_by_class):
from collections import defaultdict
sv_ind_mapping = defaultdict(lambda: -1)
p = 0
for indices_per_class in sv_ind_by_class:
indices_per_class.sort()
for sv_index in indices_per_class:
if sv_ind_mapping[sv_index] == -1:
sv_ind_mapping[sv_index] = p
p += 1
return sv_ind_mapping
def map_to_lexicographic(n):
""" Returns permutation of reverse lexicographics to
lexicographics orders for pairs of n consecutive integer indexes
"""
from itertools import (combinations, count)
two_class_order_gen = ((j, i) for i in range(n) for j in range(i))
reverse_lookup = {key: val for key,
val in zip(two_class_order_gen, count(0))}
perm_iter = (reverse_lookup[pair] for pair in combinations(range(n), 2))
return np.fromiter(perm_iter, dtype=np.intp)
def permute_list(li, perm):
"Rearrange `li` according to `perm`"
return [li[i] for i in perm]
def extract_dual_coef(num_classes, sv_ind_by_clf, sv_coef_by_clf, labels):
""" Construct dual coefficients array in SKLearn peculiar layout,
as well corresponding support vector indexes
"""
sv_ind_by_class = group_indices_by_class(
num_classes, sv_ind_by_clf, labels)
sv_ind_mapping = map_sv_to_columns_in_dual_coef_matrix(sv_ind_by_class)
num_unique_sv = len(sv_ind_mapping)
dc_dt = sv_coef_by_clf[0].dtype
dual_coef = np.zeros((num_classes - 1, num_unique_sv), dtype=dc_dt)
support_ = np.empty((num_unique_sv,), dtype=np.int32)
p = 0
for i in range(0, num_classes):
for j in range(i + 1, num_classes):
sv_ind_i_vs_j = sv_ind_by_clf[p]
sv_coef_i_vs_j = sv_coef_by_clf[p]
p += 1
for k, sv_index in enumerate(sv_ind_i_vs_j):
label = labels[sv_index]
col_index = sv_ind_mapping[sv_index]
if j == label:
row_index = i
else:
row_index = j - 1
dual_coef[row_index, col_index] = sv_coef_i_vs_j[k]
support_[col_index] = sv_index
return dual_coef, support_
def _daal4py_kf(kernel, X_fptype, gamma=1.0, is_sparse=False):
method = "fastCSR" if is_sparse else "defaultDense"
if kernel == 'rbf':
sigma_value = np.sqrt(0.5 / gamma)
kf = daal4py.kernel_function_rbf(
fptype=X_fptype, method=method, sigma=sigma_value)
elif kernel == 'linear':
kf = daal4py.kernel_function_linear(fptype=X_fptype, method=method)
else:
raise ValueError(
"_daal4py_fit received unexpected kernel specifiction {}.".format(kernel))
return kf
def _daal4py_check_weight(self, X, y, sample_weight):
ww = None
if sample_weight.shape[0] > 0:
sample_weight = _check_sample_weight(sample_weight, X)
if np.all(sample_weight <= 0):
raise ValueError(
'Invalid input - all samples have zero or negative weights.')
if np.any(sample_weight <= 0):
if len(np.unique(y[sample_weight > 0])) != len(self.classes_):
raise ValueError(
'Invalid input - all samples with positive weights '
'have the same label.')
ww = sample_weight
elif self.class_weight is not None:
ww = np.ones(X.shape[0], dtype=np.float64)
if self.class_weight is not None:
for i, v in enumerate(self.class_weight_):
ww[y == i] *= v
return ww
def _daal4py_svm(fptype, C, accuracyThreshold, tau,
maxIterations, cacheSize, doShrinking, kernel, nClasses=2):
svm_train = daal4py.svm_training(
method='thunder',
fptype=fptype,
C=C,
accuracyThreshold=accuracyThreshold,
tau=tau,
maxIterations=maxIterations,
cacheSize=cacheSize,
doShrinking=doShrinking,
kernel=kernel
)
if nClasses == 2:
algo = svm_train
else:
algo = daal4py.multi_class_classifier_training(
nClasses=nClasses,
fptype=fptype,
method='oneAgainstOne',
training=svm_train,
)
return algo
def _daal4py_fit(self, X, y_inp, sample_weight, kernel, is_sparse=False):
if self.C <= 0:
raise ValueError("C <= 0")
num_classes = len(self.classes_)
if sample_weight is not None:
sample_weight = make2d(sample_weight)
y = make2d(y_inp)
X_fptype = getFPType(X)
kf = _daal4py_kf(kernel, X_fptype, gamma=self._gamma, is_sparse=is_sparse)
algo = _daal4py_svm(fptype=X_fptype,
C=float(self.C),
accuracyThreshold=float(self.tol),
tau=1e-12,
maxIterations=int(
self.max_iter if self.max_iter > 0 else 2**30),
cacheSize=int(
self.cache_size * 1024 * 1024),
doShrinking=bool(self.shrinking),
kernel=kf,
nClasses=num_classes)
res = algo.compute(data=X, labels=y, weights=sample_weight)
model = res.model
self.daal_model_ = model
if num_classes == 2:
# binary
two_class_sv_ind_ = model.SupportIndices
two_class_sv_ind_ = two_class_sv_ind_.ravel()
# support indexes need permutation to arrange them
# into the same layout as that of Scikit-Learn
tmp = np.empty(two_class_sv_ind_.shape, dtype=np.dtype(
[('label', y.dtype), ('ind', two_class_sv_ind_.dtype)]))
tmp['label'][:] = y[two_class_sv_ind_].ravel()
tmp['ind'][:] = two_class_sv_ind_
perm = np.argsort(tmp, order=['label', 'ind'])
del tmp
self.support_ = two_class_sv_ind_[perm]
self.support_vectors_ = X[self.support_]
self.dual_coef_ = model.ClassificationCoefficients.T
if is_sparse:
self.dual_coef_ = sp.csr_matrix(self.dual_coef_)
self.dual_coef_ = self.dual_coef_[:, perm]
self.intercept_ = np.array([model.Bias])
else:
# multi-class
intercepts = []
coefs = []
sv_ind_by_clf = []
label_indexes = []
model_id = 0
for i1 in range(num_classes):
label_indexes.append(np.where(y == i1)[0])
for i2 in range(i1):
svm_model = model.TwoClassClassifierModel(model_id)
# Indices correspond to input features with label i1
# followed by input features with label i2
two_class_sv_ind_ = svm_model.SupportIndices
# Map these indexes to indexes of the training data
sv_ind = np.take(
np.hstack(
(label_indexes[i1],
label_indexes[i2])),
two_class_sv_ind_.ravel())
sv_ind_by_clf.append(sv_ind)
# svs_ = getArrayFromNumericTable(svm_model.getSupportVectors())
# assert np.array_equal(svs_, X[sv_ind])
intercepts.append(-svm_model.Bias)
coefs.append(-svm_model.ClassificationCoefficients)
model_id += 1
# permute solutions to lexicographic ordering
to_lex_perm = map_to_lexicographic(num_classes)
sv_ind_by_clf = permute_list(sv_ind_by_clf, to_lex_perm)
sv_coef_by_clf = permute_list(coefs, to_lex_perm)
intercepts = permute_list(intercepts, to_lex_perm)
self.dual_coef_, self.support_ = extract_dual_coef(
num_classes, # number of classes
sv_ind_by_clf, # support vector indexes by two-class classifiers
sv_coef_by_clf, # classification coefficients by two-class classifiers
y.squeeze().astype(np.intp, copy=False) # integer labels
)
if is_sparse:
self.dual_coef_ = sp.csr_matrix(self.dual_coef_)
self.support_vectors_ = X[self.support_]
self.intercept_ = np.array(intercepts)
indices = y.take(self.support_, axis=0)
self._n_support = np.array(
[np.sum(indices == i) for i, c in enumerate(self.classes_)], dtype=np.int32)
self._probA = np.empty(0)
self._probB = np.empty(0)
def __compute_gamma__(gamma, kernel, X, use_var=True, deprecation=True):
"""
Computes actual value of 'gamma' parameter of RBF kernel
corresponding to SVC keyword values `gamma` and `kernel`, and feature
matrix X, with sparsity `sparse`.
In 0.20 gamma='scale' used to mean compute 'gamma' based on
column-wise standard deviation, but in 0.20.3 it was changed
to use column-wise variance.
See: https://github.com/scikit-learn/scikit-learn/pull/13221
"""
if deprecation:
_gamma_is_scale = gamma in ('scale', 'auto_deprecated')
else:
_gamma_is_scale = (gamma == 'scale')
if _gamma_is_scale:
kernel_uses_gamma = (not callable(kernel) and kernel
not in ('linear', 'precomputed'))
if kernel_uses_gamma:
if sp.isspmatrix(X):
# var = E[X^2] - E[X]^2
X_sc = (X.multiply(X)).mean() - (X.mean())**2
else:
X_sc = X.var()
if not use_var:
X_sc = np.sqrt(X_sc)
else:
X_sc = 1.0 / X.shape[1]
if gamma == 'scale':
if X_sc != 0:
_gamma = 1.0 / (X.shape[1] * X_sc)
else:
_gamma = 1.0
else:
if kernel_uses_gamma and deprecation and not np.isclose(X_sc, 1.0):
# NOTE: when deprecation ends we need to remove explicitly
# setting `gamma` in examples (also in tests). See
# https://github.com/scikit-learn/scikit-learn/pull/10331
# for the examples/tests that need to be reverted.
warnings.warn("The default value of gamma will change "
"from 'auto' to 'scale' in version 0.22 to "
"account better for unscaled features. Set "
"gamma explicitly to 'auto' or 'scale' to "
"avoid this warning.", FutureWarning)
_gamma = 1.0 / X.shape[1]
elif gamma == 'auto':
_gamma = 1.0 / X.shape[1]
elif isinstance(gamma, str) and not deprecation:
raise ValueError(
"When 'gamma' is a string, it should be either 'scale' or "
"'auto'. Got '{}' instead.".format(gamma)
)
else:
_gamma = gamma
return _gamma
def _compute_gamma(*args):
no_older_than_0_20_3 = sklearn_check_version("0.20.3")
no_older_than_0_22 = not sklearn_check_version("0.22")
return __compute_gamma__(
*args,
use_var=no_older_than_0_20_3,
deprecation=no_older_than_0_22)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
is_sparse = sp.isspmatrix(X)
if is_sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = is_sparse and not callable(self.kernel)
if hasattr(self, 'decision_function_shape'):
if self.decision_function_shape not in ('ovr', 'ovo'):
raise ValueError(
f"decision_function_shape must be either 'ovr' or 'ovo', "
f"got {self.decision_function_shape}."
)
if callable(self.kernel):
check_consistent_length(X, y)
else:
X, y = self._validate_data(X, y, dtype=np.float64,
order='C', accept_sparse='csr',
accept_large_sparse=False)
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = _get_libsvm_impl().index(self._impl)
# input validation
n_samples = _num_samples(X)
if solver_type != 2 and n_samples != y.shape[0]:
raise ValueError(
"X and y have incompatible shapes.\n"
"X has %s samples, but y has %s." % (n_samples, y.shape[0]))
if self.kernel == "precomputed" and n_samples != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
kernel = 'precomputed' if callable(self.kernel) else self.kernel
if kernel == 'precomputed':
self._gamma = 0.0
else:
self._gamma = _compute_gamma(self.gamma, kernel, X)
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
# see comment on the other call to np.iinfo in this file
seed = rnd.randint(np.iinfo('i').max)
_patching_status = PatchingConditionsChain(
"sklearn.svm.SVC.fit")
_dal_ready = _patching_status.and_conditions([
(kernel in ['linear', 'rbf'],
f"'{kernel}' kernel is not supported. "
"Only 'linear' and 'rbf' kernels are supported.")])
_patching_status.write_log()
if _dal_ready:
sample_weight = _daal4py_check_weight(self, X, y, sample_weight)
self._daal_fit = True
_daal4py_fit(self, X, y, sample_weight, kernel, is_sparse=is_sparse)
self.fit_status_ = 0
if self.probability:
params = self.get_params()
params["probability"] = False
params["decision_function_shape"] = 'ovr'
clf_base = SVC(**params)
try:
n_splits = 5
cv = StratifiedKFold(
n_splits=n_splits,
shuffle=True,
random_state=self.random_state)
if LooseVersion(sklearn_version) >= LooseVersion("0.24"):
self.clf_prob = CalibratedClassifierCV(
clf_base, ensemble=False, cv=cv, method='sigmoid',
n_jobs=n_splits)
else:
self.clf_prob = CalibratedClassifierCV(
clf_base, cv=cv, method='sigmoid')
self.clf_prob.fit(X, y, sample_weight)
except ValueError:
clf_base = clf_base.fit(X, y, sample_weight)
self.clf_prob = CalibratedClassifierCV(
clf_base, cv="prefit", method='sigmoid')
self.clf_prob.fit(X, y, sample_weight)
else:
self._daal_fit = False
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples, )
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
if not getattr(self, '_daal_fit', False):
self._internal_intercept_ = self.intercept_.copy()
self._internal_dual_coef_ = self.dual_coef_.copy()
else:
self._internal_intercept_ = self.intercept_.copy()
self._internal_dual_coef_ = self.dual_coef_.copy()
if len(self.classes_) == 2:
self._internal_dual_coef_ *= -1
self._internal_intercept_ *= -1
if not getattr(
self,
'_daal_fit',
False) and len(
self.classes_) == 2 and self._impl in [
'c_svc',
'nu_svc']:
self.intercept_ *= -1
self.dual_coef_ *= -1
return self
def _daal4py_predict(self, X, is_decision_function=False):
X_fptype = getFPType(X)
num_classes = len(self.classes_)
kf = _daal4py_kf(self.kernel, X_fptype, gamma=self._gamma,
is_sparse=sp.isspmatrix(X))
svm_predict = daal4py.svm_prediction(
fptype=X_fptype,
method='defaultDense',
kernel=kf
)
if num_classes == 2:
alg = svm_predict
else:
result_to_compute = 'computeDecisionFunction' \
if is_decision_function else 'computeClassLabels'
alg = daal4py.multi_class_classifier_prediction(
nClasses=num_classes,
fptype=X_fptype,
pmethod="voteBased",
tmethod='oneAgainstOne',
resultsToEvaluate=result_to_compute,
prediction=svm_predict
)
predictionRes = alg.compute(X, self.daal_model_)
if not is_decision_function or num_classes == 2:
res = predictionRes.prediction
res = res.ravel()
else:
res = -predictionRes.decisionFunction
if num_classes == 2 and not is_decision_function:
# Convert from Intel(R) oneAPI Data Analytics Library format back to
# original classes
np.greater(res, 0, out=res)
return res
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
check_is_fitted(self)
_break_ties = getattr(self, 'break_ties', False)
if _break_ties and self.decision_function_shape == 'ovo':
raise ValueError("break_ties must be False when "
"decision_function_shape is 'ovo'")
_patching_status = PatchingConditionsChain(
"sklearn.svm.SVC.predict")
_dal_ready = _patching_status.and_conditions([
(not _break_ties, "Breaking ties is not supported."),
(self.decision_function_shape != 'ovr',
"'ovr' decision function shape is not supported."),
(len(self.classes_) <= 2, "Number of classes > 2.")
], conditions_merging=any)
_patching_status.write_log()
if not _dal_ready:
y = np.argmax(self.decision_function(X), axis=1)
else:
X = self._validate_for_predict(X)
_dal_ready = _patching_status.and_conditions([
(getattr(self, '_daal_fit', False) and hasattr(self, 'daal_model_'),
"oneDAL model was not trained.")])
if _dal_ready:
if self.probability and self.clf_prob is not None:
y = self.clf_prob.predict(X)
else:
y = _daal4py_predict(self, X)
else:
predict_func = self._sparse_predict if self._sparse else self._dense_predict
y = predict_func(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
def _daal4py_predict_proba(self, X):
X = self._validate_for_predict(X)
if getattr(self, 'clf_prob', None) is None:
raise NotFittedError(
"predict_proba is not available when fitted with probability=False")
prob = self.clf_prob.predict_proba(X)
return prob
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
_patching_status = PatchingConditionsChain(
"sklearn.svm.SVC.predict_proba")
_dal_ready = _patching_status.and_conditions([
(getattr(self, '_daal_fit', False), "oneDAL model was not trained.")])
_patching_status.write_log()
if _dal_ready:
algo = self._daal4py_predict_proba
else:
algo = self._predict_proba
return algo
def decision_function(self, X):
"""Evaluates the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes).
Notes
-----
If decision_function_shape='ovo', the function values are proportional
to the distance of the samples X to the separating hyperplane. If the
exact distances are required, divide the function values by the norm of
the weight vector (``coef_``). See also `this question
<https://stats.stackexchange.com/questions/14876/
interpreting-distance-from-hyperplane-in-svm>`_ for further details.
If decision_function_shape='ovr', the decision function is a monotonic
transformation of ovo decision function.
"""
_patching_status = PatchingConditionsChain(
"sklearn.svm.SVC.decision_function")
_dal_ready = _patching_status.and_conditions([
(getattr(self, '_daal_fit', False), "oneDAL model was not trained.")])
_patching_status.write_log()
if _dal_ready:
X = self._validate_for_predict(X)
dec = _daal4py_predict(self, X, is_decision_function=True)
else:
dec = self._decision_function(X)
if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
return dec
__base_svc_init_arg_names__ = []
__base_svc_init_function__ = svm_base.BaseSVC.__init__
__base_svc_init_function_code__ = __base_svc_init_function__.__code__
try:
# retrieve tuple of code argument names to check whether
# new in 0.22 keyword 'break_ties' is in it
__base_svc_init_arg_names__ = __base_svc_init_function_code__.co_varnames
except AttributeError:
pass
del __base_svc_init_function__
del __base_svc_init_function_code__
class SVC(svm_base.BaseSVC):
_impl = 'c_svc'
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='scale',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape='ovr',
break_ties=False, random_state=None):
super(SVC, self).__init__(
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
C=C,
nu=0.,
shrinking=shrinking,
probability=probability,
cache_size=cache_size,
class_weight=class_weight,
verbose=verbose,
max_iter=max_iter,
decision_function_shape=decision_function_shape,
break_ties=break_ties,
random_state=random_state)
SVC.fit = fit
SVC.predict = predict
SVC.predict_proba = predict_proba
SVC.decision_function = decision_function
SVC._daal4py_predict_proba = _daal4py_predict_proba
SVC._dual_coef_ = property(_dual_coef_getter, _dual_coef_setter)
SVC._intercept_ = property(_intercept_getter, _intercept_setter)
SVC.__doc__ = svm_classes.SVC.__doc__
| [
"daal4py.multi_class_classifier_training",
"numpy.sqrt",
"sklearn.exceptions.NotFittedError",
"daal4py.kernel_function_linear",
"numpy.hstack",
"sklearn.utils.validation._check_sample_weight",
"numpy.iinfo",
"numpy.argsort",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"sklearn.util... | [((2340, 2376), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'np.intp'}), '(num_classes, dtype=np.intp)\n', (2348, 2376), True, 'import numpy as np\n'), ((2966, 2990), 'collections.defaultdict', 'defaultdict', (['(lambda : -1)'], {}), '(lambda : -1)\n', (2977, 2990), False, 'from collections import defaultdict\n'), ((3740, 3777), 'numpy.fromiter', 'np.fromiter', (['perm_iter'], {'dtype': 'np.intp'}), '(perm_iter, dtype=np.intp)\n', (3751, 3777), True, 'import numpy as np\n'), ((4346, 4401), 'numpy.zeros', 'np.zeros', (['(num_classes - 1, num_unique_sv)'], {'dtype': 'dc_dt'}), '((num_classes - 1, num_unique_sv), dtype=dc_dt)\n', (4354, 4401), True, 'import numpy as np\n'), ((4417, 4459), 'numpy.empty', 'np.empty', (['(num_unique_sv,)'], {'dtype': 'np.int32'}), '((num_unique_sv,), dtype=np.int32)\n', (4425, 4459), True, 'import numpy as np\n'), ((6598, 6801), 'daal4py.svm_training', 'daal4py.svm_training', ([], {'method': '"""thunder"""', 'fptype': 'fptype', 'C': 'C', 'accuracyThreshold': 'accuracyThreshold', 'tau': 'tau', 'maxIterations': 'maxIterations', 'cacheSize': 'cacheSize', 'doShrinking': 'doShrinking', 'kernel': 'kernel'}), "(method='thunder', fptype=fptype, C=C,\n accuracyThreshold=accuracyThreshold, tau=tau, maxIterations=\n maxIterations, cacheSize=cacheSize, doShrinking=doShrinking, kernel=kernel)\n", (6618, 6801), False, 'import daal4py\n'), ((11216, 11227), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (11224, 11227), True, 'import numpy as np\n'), ((11246, 11257), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (11254, 11257), True, 'import numpy as np\n'), ((14945, 14982), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (14963, 14982), False, 'from sklearn.utils import check_random_state\n'), ((15000, 15016), 'scipy.sparse.isspmatrix', 'sp.isspmatrix', (['X'], {}), '(X)\n', (15013, 15016), True, 'from scipy import sparse as sp\n'), ((15809, 15885), 'numpy.asarray', 'np.asarray', (['([] if sample_weight is None else sample_weight)'], {'dtype': 'np.float64'}), '([] if sample_weight is None else sample_weight, dtype=np.float64)\n', (15819, 15885), True, 'import numpy as np\n'), ((16043, 16058), 'sklearn.utils.validation._num_samples', '_num_samples', (['X'], {}), '(X)\n', (16055, 16058), False, 'from sklearn.utils.validation import check_is_fitted, check_consistent_length, _num_samples, _check_sample_weight\n'), ((20171, 20244), 'daal4py.svm_prediction', 'daal4py.svm_prediction', ([], {'fptype': 'X_fptype', 'method': '"""defaultDense"""', 'kernel': 'kf'}), "(fptype=X_fptype, method='defaultDense', kernel=kf)\n", (20193, 20244), False, 'import daal4py\n'), ((21592, 21613), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (21607, 21613), False, 'from sklearn.utils.validation import check_is_fitted, check_consistent_length, _num_samples, _check_sample_weight\n'), ((2473, 2500), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.int32'}), '(n, dtype=np.int32)\n', (2481, 2500), True, 'import numpy as np\n'), ((5253, 5273), 'numpy.sqrt', 'np.sqrt', (['(0.5 / gamma)'], {}), '(0.5 / gamma)\n', (5260, 5273), True, 'import numpy as np\n'), ((5287, 5365), 'daal4py.kernel_function_rbf', 'daal4py.kernel_function_rbf', ([], {'fptype': 'X_fptype', 'method': 'method', 'sigma': 'sigma_value'}), '(fptype=X_fptype, method=method, sigma=sigma_value)\n', (5314, 5365), False, 'import daal4py\n'), ((5751, 5789), 'sklearn.utils.validation._check_sample_weight', '_check_sample_weight', (['sample_weight', 'X'], {}), '(sample_weight, X)\n', (5771, 5789), False, 'from sklearn.utils.validation import check_is_fitted, check_consistent_length, _num_samples, _check_sample_weight\n'), ((5801, 5827), 'numpy.all', 'np.all', (['(sample_weight <= 0)'], {}), '(sample_weight <= 0)\n', (5807, 5827), True, 'import numpy as np\n'), ((5948, 5974), 'numpy.any', 'np.any', (['(sample_weight <= 0)'], {}), '(sample_weight <= 0)\n', (5954, 5974), True, 'import numpy as np\n'), ((6943, 7064), 'daal4py.multi_class_classifier_training', 'daal4py.multi_class_classifier_training', ([], {'nClasses': 'nClasses', 'fptype': 'fptype', 'method': '"""oneAgainstOne"""', 'training': 'svm_train'}), "(nClasses=nClasses, fptype=fptype,\n method='oneAgainstOne', training=svm_train)\n", (6982, 7064), False, 'import daal4py\n'), ((8672, 8711), 'numpy.argsort', 'np.argsort', (['tmp'], {'order': "['label', 'ind']"}), "(tmp, order=['label', 'ind'])\n", (8682, 8711), True, 'import numpy as np\n'), ((9048, 9070), 'numpy.array', 'np.array', (['[model.Bias]'], {}), '([model.Bias])\n', (9056, 9070), True, 'import numpy as np\n'), ((11014, 11034), 'numpy.array', 'np.array', (['intercepts'], {}), '(intercepts)\n', (11022, 11034), True, 'import numpy as np\n'), ((15526, 15555), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X', 'y'], {}), '(X, y)\n', (15549, 15555), False, 'from sklearn.utils.validation import check_is_fitted, check_consistent_length, _num_samples, _check_sample_weight\n'), ((20468, 20664), 'daal4py.multi_class_classifier_prediction', 'daal4py.multi_class_classifier_prediction', ([], {'nClasses': 'num_classes', 'fptype': 'X_fptype', 'pmethod': '"""voteBased"""', 'tmethod': '"""oneAgainstOne"""', 'resultsToEvaluate': 'result_to_compute', 'prediction': 'svm_predict'}), "(nClasses=num_classes, fptype=\n X_fptype, pmethod='voteBased', tmethod='oneAgainstOne',\n resultsToEvaluate=result_to_compute, prediction=svm_predict)\n", (20509, 20664), False, 'import daal4py\n'), ((21133, 21160), 'numpy.greater', 'np.greater', (['res', '(0)'], {'out': 'res'}), '(res, 0, out=res)\n', (21143, 21160), True, 'import numpy as np\n'), ((22961, 22989), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': 'np.intp'}), '(y, dtype=np.intp)\n', (22971, 22989), True, 'import numpy as np\n'), ((23131, 23219), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""predict_proba is not available when fitted with probability=False"""'], {}), "(\n 'predict_proba is not available when fitted with probability=False')\n", (23145, 23219), False, 'from sklearn.exceptions import NotFittedError\n'), ((2423, 2447), 'numpy.hstack', 'np.hstack', (['sv_ind_by_clf'], {}), '(sv_ind_by_clf)\n', (2432, 2447), True, 'import numpy as np\n'), ((5421, 5483), 'daal4py.kernel_function_linear', 'daal4py.kernel_function_linear', ([], {'fptype': 'X_fptype', 'method': 'method'}), '(fptype=X_fptype, method=method)\n', (5451, 5483), False, 'import daal4py\n'), ((6282, 6319), 'numpy.ones', 'np.ones', (['X.shape[0]'], {'dtype': 'np.float64'}), '(X.shape[0], dtype=np.float64)\n', (6289, 6319), True, 'import numpy as np\n'), ((8940, 8970), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['self.dual_coef_'], {}), '(self.dual_coef_)\n', (8953, 8970), True, 'from scipy import sparse as sp\n'), ((10908, 10938), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['self.dual_coef_'], {}), '(self.dual_coef_)\n', (10921, 10938), True, 'from scipy import sparse as sp\n'), ((11121, 11141), 'numpy.sum', 'np.sum', (['(indices == i)'], {}), '(indices == i)\n', (11127, 11141), True, 'import numpy as np\n'), ((12083, 12099), 'scipy.sparse.isspmatrix', 'sp.isspmatrix', (['X'], {}), '(X)\n', (12096, 12099), True, 'from scipy import sparse as sp\n'), ((17205, 17218), 'numpy.iinfo', 'np.iinfo', (['"""i"""'], {}), "('i')\n", (17213, 17218), True, 'import numpy as np\n'), ((20134, 20150), 'scipy.sparse.isspmatrix', 'sp.isspmatrix', (['X'], {}), '(X)\n', (20147, 20150), True, 'from scipy import sparse as sp\n'), ((3641, 3649), 'itertools.count', 'count', (['(0)'], {}), '(0)\n', (3646, 3649), False, 'from itertools import combinations, count\n'), ((8481, 8545), 'numpy.dtype', 'np.dtype', (["[('label', y.dtype), ('ind', two_class_sv_ind_.dtype)]"], {}), "([('label', y.dtype), ('ind', two_class_sv_ind_.dtype)])\n", (8489, 8545), True, 'import numpy as np\n'), ((12303, 12316), 'numpy.sqrt', 'np.sqrt', (['X_sc'], {}), '(X_sc)\n', (12310, 12316), True, 'import numpy as np\n'), ((12913, 13138), 'warnings.warn', 'warnings.warn', (['"""The default value of gamma will change from \'auto\' to \'scale\' in version 0.22 to account better for unscaled features. Set gamma explicitly to \'auto\' or \'scale\' to avoid this warning."""', 'FutureWarning'], {}), '(\n "The default value of gamma will change from \'auto\' to \'scale\' in version 0.22 to account better for unscaled features. Set gamma explicitly to \'auto\' or \'scale\' to avoid this warning."\n , FutureWarning)\n', (12926, 13138), False, 'import warnings\n'), ((18040, 18125), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': 'self.random_state'}), '(n_splits=n_splits, shuffle=True, random_state=self.random_state\n )\n', (18055, 18125), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((5995, 6026), 'numpy.unique', 'np.unique', (['y[sample_weight > 0]'], {}), '(y[sample_weight > 0])\n', (6004, 6026), True, 'import numpy as np\n'), ((9294, 9311), 'numpy.where', 'np.where', (['(y == i1)'], {}), '(y == i1)\n', (9302, 9311), True, 'import numpy as np\n'), ((9729, 9778), 'numpy.hstack', 'np.hstack', (['(label_indexes[i1], label_indexes[i2])'], {}), '((label_indexes[i1], label_indexes[i2]))\n', (9738, 9778), True, 'import numpy as np\n'), ((12591, 12612), 'numpy.isclose', 'np.isclose', (['X_sc', '(1.0)'], {}), '(X_sc, 1.0)\n', (12601, 12612), True, 'import numpy as np\n'), ((18201, 18230), 'distutils.version.LooseVersion', 'LooseVersion', (['sklearn_version'], {}), '(sklearn_version)\n', (18213, 18230), False, 'from distutils.version import LooseVersion\n'), ((18234, 18254), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.24"""'], {}), "('0.24')\n", (18246, 18254), False, 'from distutils.version import LooseVersion\n'), ((18292, 18386), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf_base'], {'ensemble': '(False)', 'cv': 'cv', 'method': '"""sigmoid"""', 'n_jobs': 'n_splits'}), "(clf_base, ensemble=False, cv=cv, method='sigmoid',\n n_jobs=n_splits)\n", (18314, 18386), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((18490, 18547), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf_base'], {'cv': 'cv', 'method': '"""sigmoid"""'}), "(clf_base, cv=cv, method='sigmoid')\n", (18512, 18547), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((18752, 18815), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf_base'], {'cv': '"""prefit"""', 'method': '"""sigmoid"""'}), "(clf_base, cv='prefit', method='sigmoid')\n", (18774, 18815), False, 'from sklearn.calibration import CalibratedClassifierCV\n')] |
from EP_N_Env import *
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pdb import set_trace as bp
Tensor = torch.FloatTensor
def get_input_optimizer(input_array):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.Adam([input_array.requires_grad_()], lr=8e-2)
return optimizer
class Ave_D_Loss(nn.Module):
def __init__(self, net, loads, N_node_in, N_node_out):
super(Ave_D_Loss, self).__init__()
self.net = net.eval()
# loads should be 1 X N_node
self.load_mtx = loads
self.load_mtx.requires_grad = False
self.N_node_in = N_node_in
self.N_node_out = N_node_out
def forward(self, in_x):
# X source X dest
# loads dest
x_portion = torch.nn.functional.softmax(in_x, dim=2)
x_final = x_portion
ave_delay = -1*self.net(self.load_mtx.view(-1).unsqueeze(0), x_portion.view(-1).unsqueeze(0))
return x_final, ave_delay
class Critic(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, int(hidden_size/2))
self.linear3 = nn.Linear(int(hidden_size/2), output_size)
def forward(self, stt, act):
x = torch.cat([stt, act], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class Agent(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
s_dim = self.env.observation_shape
a_dim = self.env.action_shape
self.N_in = self.env.N_node_in
self.N_out = self.env.N_node_out
self.N_mid = self.env.N_node_mid
self.N_init = 1000
self.critic = Critic(a_dim+s_dim, 256*2, 1)
self.critic_target = Critic(a_dim+s_dim, 256*2, 1)
self.critic_optim = optim.Adam(self.critic.parameters(), lr = self.critic_lr)
self.buffer = []
self.critic_target.load_state_dict(self.critic.state_dict())
def act(self, s0):
s0 = torch.tensor(s0, dtype=torch.float).unsqueeze(0)
load_temp = Variable(Tensor(s0), requires_grad=False)
x_init = Tensor(np.random.normal(0.0, 1.0, (self.N_init, self.N_in*self.N_out, self.N_mid)))
x_init = torch.nn.functional.softmax(x_init, dim=2)
d_temp_a = -1*self.critic(load_temp.unsqueeze(0).repeat(self.N_init, 1, 1).view(self.N_init, -1), x_init.view(self.N_init, -1))
D_loss_i = Ave_D_Loss(self.critic_target, load_temp, self.N_in, self.N_out)
init_n_min = torch.argmin(d_temp_a, dim=0)
x_chosen = x_init[init_n_min]
x = Variable(x_chosen, requires_grad = True)
optimizer = get_input_optimizer(x)
opt_step = 0
while opt_step < 100:
opt_step = opt_step + 1
def closure():
optimizer.zero_grad()
x_temp, d_temp = D_loss_i(x)
d_temp.backward()
delay_temp = d_temp.item()
optimizer.step(closure)
x2 = torch.nn.functional.softmax(x, dim=2)
x2 = x2.detach().numpy()
return x2
def put(self, *transition):
if len(self.buffer)== self.capacity:
self.buffer.pop(0)
self.buffer.append(transition)
def clear(self):
self.buffer.clear()
def learn(self):
if len(self.buffer) < self.batch_size:
return
samples = random.sample(self.buffer, self.batch_size)
s0, a0, r1, s1 = zip(*samples)
s0 = torch.tensor(s0, dtype=torch.float)
s0 = s0.unsqueeze(1)
s0 = s0.view(self.batch_size, -1)
a0 = torch.tensor(a0, dtype=torch.float).view(self.batch_size,-1)
r1 = torch.tensor(r1, dtype=torch.float).view(self.batch_size,-1)
s1 = torch.tensor(s1, dtype=torch.float)
def critic_learn():
y_pred = self.critic(s0, a0)
loss_fn = nn.MSELoss()
loss = loss_fn(y_pred, r1)
self.critic_optim.zero_grad()
loss.backward()
self.critic_optim.step()
def soft_update(net_target, net, tau):
for target_param, param in zip(net_target.parameters(), net.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
critic_learn()
soft_update(self.critic_target, self.critic, self.tau)
def Run_Simulation(rand_seed_n):
rep_time = 1
N_in = 4
N_out = 16
N_mid = 8
N_pair = 4
env = EP_Env(N_in, N_out, N_mid, N_pair, rep_time, rand_seed_n*10)
s0 = env.initial_state()
s_mean = np.mean(s0)
s_std = np.std(s0)
s_max = np.max(s0)
RL_delays = np.zeros((10000, N_pair))
O_delays = np.zeros((10000, N_pair))
E_delays = np.zeros((10000, N_pair))
RL_delays_total = np.zeros(10000)
RL_gains_total = np.zeros(10000)
O_delays_total = np.zeros(10000)
E_delays_total = np.zeros(10000)
txt_file = './CORL_record_%d.txt'%(rand_seed_n)
with open(txt_file, 'w') as filep:
filep.write('Sample equal_delay orig_delay nn_delay gain\n')
params = {
'env': env,
'gamma': 0.99,
'actor_lr': 0.005,
'critic_lr': 0.005,
'tau': 0.02,
'capacity': 1000,
'batch_size': 32,
}
agent_list = []
for i in range(N_pair):
agent_list.append(Agent(**params))
for episode in range(1):
s0_list = env.initial_state()
s_max_list = []
for act_i in range(N_pair):
s_max_list.append(np.max(s0_list[act_i]))
s0_list[act_i] = s0_list[act_i]/np.max(s0_list[act_i])
for step in range(10000*rep_time):
act_list = []
for act_i in range(N_pair):
act_list.append( np.reshape(agent_list[act_i].act(s0_list[act_i]), (N_in, N_out, N_mid)) )
o_v_l, o_d_l, e_v_l, e_d_l, n_v_l, n_d_l, s_l = env.env_step(act_list)
for act_i in range(N_pair):
s_l[act_i] = s_l[act_i]/s_max_list[act_i]
r_t = -1*n_d_l[act_i]/n_v_l[act_i]
agent_list[act_i].put(s0_list[act_i], act_list[act_i].flatten(), r_t, s_l[act_i])
r_t_o = -1*o_d_l[act_i]/o_v_l[act_i]
r_t_e = -1*e_d_l[act_i]/e_v_l[act_i]
RL_delays[step, act_i] = -1*r_t
O_delays[step, act_i] = -1*r_t_o
E_delays[step, act_i] = -1*r_t_e
s0_list = s_l
if step % rep_time ==0:
n_d_total = sum(n_d_l)/sum(n_v_l)
o_d_total = sum(o_d_l)/sum(o_v_l)
e_d_total = sum(e_d_l)/sum(e_v_l)
print('step:%d, eq_delay:%e, orig_delay:%e, nn_delay:%e, gain:%e'%(step, e_d_total, o_d_total, n_d_total, (o_d_total-n_d_total)/o_d_total))
record_file = open(txt_file, 'a')
record_file.write('%d %e %e %e %e\n'%(step, e_d_total, o_d_total, n_d_total, (o_d_total-n_d_total)/o_d_total))
record_file.close()
RL_delays_total[step] = n_d_total
O_delays_total[step] = o_d_total
E_delays_total[step] = e_d_total
RL_gains_total[step] = (o_d_total-n_d_total)/o_d_total
for act_i in range(N_pair):
agent_list[act_i].learn()
scipy.io.savemat('./RL_data_%d.mat'%(rand_seed_n), dict(RL_delays=RL_delays, O_delays=O_delays, E_delays=E_delays,\
RL_delays_total=RL_delays_total, O_delays_total=O_delays_total, E_delays_total=E_delays_total, RL_gains_total=RL_gains_total))
if __name__ == '__main__':
parser = argparse.ArgumentParser('')
parser.add_argument('--seed_n', type=int)
args = parser.parse_args()
Run_Simulation(args.seed_n) | [
"numpy.random.normal",
"numpy.mean",
"random.sample",
"numpy.max",
"torch.tensor",
"numpy.zeros",
"torch.nn.MSELoss",
"torch.nn.Linear",
"numpy.std",
"torch.argmin",
"torch.nn.functional.softmax",
"torch.cat"
] | [((5156, 5167), 'numpy.mean', 'np.mean', (['s0'], {}), '(s0)\n', (5163, 5167), True, 'import numpy as np\n'), ((5181, 5191), 'numpy.std', 'np.std', (['s0'], {}), '(s0)\n', (5187, 5191), True, 'import numpy as np\n'), ((5205, 5215), 'numpy.max', 'np.max', (['s0'], {}), '(s0)\n', (5211, 5215), True, 'import numpy as np\n'), ((5233, 5258), 'numpy.zeros', 'np.zeros', (['(10000, N_pair)'], {}), '((10000, N_pair))\n', (5241, 5258), True, 'import numpy as np\n'), ((5275, 5300), 'numpy.zeros', 'np.zeros', (['(10000, N_pair)'], {}), '((10000, N_pair))\n', (5283, 5300), True, 'import numpy as np\n'), ((5317, 5342), 'numpy.zeros', 'np.zeros', (['(10000, N_pair)'], {}), '((10000, N_pair))\n', (5325, 5342), True, 'import numpy as np\n'), ((5366, 5381), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (5374, 5381), True, 'import numpy as np\n'), ((5404, 5419), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (5412, 5419), True, 'import numpy as np\n'), ((5441, 5456), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (5449, 5456), True, 'import numpy as np\n'), ((5478, 5493), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (5486, 5493), True, 'import numpy as np\n'), ((904, 944), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['in_x'], {'dim': '(2)'}), '(in_x, dim=2)\n', (931, 944), False, 'import torch\n'), ((1253, 1287), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1262, 1287), True, 'import torch.nn as nn\n'), ((1467, 1491), 'torch.cat', 'torch.cat', (['[stt, act]', '(1)'], {}), '([stt, act], 1)\n', (1476, 1491), False, 'import torch\n'), ((2559, 2601), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['x_init'], {'dim': '(2)'}), '(x_init, dim=2)\n', (2586, 2601), False, 'import torch\n'), ((2846, 2875), 'torch.argmin', 'torch.argmin', (['d_temp_a'], {'dim': '(0)'}), '(d_temp_a, dim=0)\n', (2858, 2875), False, 'import torch\n'), ((3357, 3394), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (3384, 3394), False, 'import torch\n'), ((3772, 3815), 'random.sample', 'random.sample', (['self.buffer', 'self.batch_size'], {}), '(self.buffer, self.batch_size)\n', (3785, 3815), False, 'import random\n'), ((3887, 3922), 'torch.tensor', 'torch.tensor', (['s0'], {'dtype': 'torch.float'}), '(s0, dtype=torch.float)\n', (3899, 3922), False, 'import torch\n'), ((4157, 4192), 'torch.tensor', 'torch.tensor', (['s1'], {'dtype': 'torch.float'}), '(s1, dtype=torch.float)\n', (4169, 4192), False, 'import torch\n'), ((2465, 2542), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(self.N_init, self.N_in * self.N_out, self.N_mid)'], {}), '(0.0, 1.0, (self.N_init, self.N_in * self.N_out, self.N_mid))\n', (2481, 2542), True, 'import numpy as np\n'), ((4321, 4333), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4331, 4333), True, 'import torch.nn as nn\n'), ((2329, 2364), 'torch.tensor', 'torch.tensor', (['s0'], {'dtype': 'torch.float'}), '(s0, dtype=torch.float)\n', (2341, 2364), False, 'import torch\n'), ((4008, 4043), 'torch.tensor', 'torch.tensor', (['a0'], {'dtype': 'torch.float'}), '(a0, dtype=torch.float)\n', (4020, 4043), False, 'import torch\n'), ((4083, 4118), 'torch.tensor', 'torch.tensor', (['r1'], {'dtype': 'torch.float'}), '(r1, dtype=torch.float)\n', (4095, 4118), False, 'import torch\n'), ((6109, 6131), 'numpy.max', 'np.max', (['s0_list[act_i]'], {}), '(s0_list[act_i])\n', (6115, 6131), True, 'import numpy as np\n'), ((6177, 6199), 'numpy.max', 'np.max', (['s0_list[act_i]'], {}), '(s0_list[act_i])\n', (6183, 6199), True, 'import numpy as np\n')] |
"""
I/O for FLAC3D format.
"""
import logging
import struct
import time
import numpy
from ..__about__ import __version__ as version
from .._common import _pick_first_int_data
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register
from .._mesh import Mesh
meshio_only = {
"zone": {
"tetra": "tetra",
"tetra10": "tetra",
"pyramid": "pyramid",
"pyramid13": "pyramid",
"wedge": "wedge",
"wedge12": "wedge",
"wedge15": "wedge",
"wedge18": "wedge",
"hexahedron": "hexahedron",
"hexahedron20": "hexahedron",
"hexahedron24": "hexahedron",
"hexahedron27": "hexahedron",
},
"face": {
"triangle": "triangle",
"triangle6": "triangle",
"triangle7": "triangle",
"quad": "quad",
"quad8": "quad",
"quad9": "quad",
},
}
numnodes_to_meshio_type = {
"zone": {4: "tetra", 5: "pyramid", 6: "wedge", 8: "hexahedron"},
"face": {3: "triangle", 4: "quad"},
}
meshio_to_flac3d_type = {
"triangle": "T3",
"quad": "Q4",
"tetra": "T4",
"pyramid": "P5",
"wedge": "W6",
"hexahedron": "B8",
}
flac3d_to_meshio_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 4, 2, 3],
"wedge": [0, 1, 3, 2, 4, 5],
"hexahedron": [0, 1, 4, 2, 3, 6, 7, 5],
}
meshio_to_flac3d_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 3, 4, 2],
"wedge": [0, 1, 3, 2, 4, 5],
"hexahedron": [0, 1, 3, 4, 2, 7, 5, 6],
}
meshio_to_flac3d_order_2 = {
"tetra": [0, 2, 1, 3],
"pyramid": [0, 3, 1, 4, 2],
"wedge": [0, 2, 3, 1, 5, 4],
"hexahedron": [0, 3, 1, 4, 2, 5, 7, 6],
}
flag_to_numdim = {
"zone": 3,
"face": 2,
}
def read(filename):
"""Read FLAC3D f3grid grid file."""
# Read a small block of the file to assess its type
# See <http://code.activestate.com/recipes/173220/>
with open_file(filename, "rb") as f:
block = f.read(8)
binary = b"\x00" in block
mode = "rb" if binary else "r"
with open_file(filename, mode) as f:
out = read_buffer(f, binary)
return out
def read_buffer(f, binary):
"""Read binary or ASCII file."""
flags = {
"Z": "zone",
"F": "face",
"ZGROUP": "zone",
"FGROUP": "face",
}
points = []
point_ids = {}
cells = []
field_data = {}
# Zones and faces do not share the same cell ID pool in FLAC3D
# i.e. a given cell ID can be assigned to a zone and a face concurrently
mapper = {"zone": {}, "face": {}}
slots = {"zone": set(), "face": set()}
pidx = 0
cidx = 0
gidx = 0
if binary:
# Not sure what the first bytes represent, the format might be wrong
# It does not seem to be useful anyway
_ = struct.unpack("<2I", f.read(8))
(num_nodes,) = struct.unpack("<I", f.read(4))
for pidx in range(num_nodes):
pid, point = _read_point(f, binary)
points.append(point)
point_ids[pid] = pidx
for flag in ["zone", "face"]:
(num_cells,) = struct.unpack("<I", f.read(4))
for _ in range(num_cells):
cid, cell = _read_cell(f, point_ids, binary)
cells = _update_cells(cells, cell, flag)
mapper[flag][cid] = [cidx]
cidx += 1
(num_groups,) = struct.unpack("<I", f.read(4))
for _ in range(num_groups):
name, slot, data = _read_group(f, binary)
field_data, mapper[flag] = _update_field_data(
field_data,
mapper[flag],
data,
name,
gidx + 1,
flag,
)
slots[flag] = _update_slots(slots[flag], slot)
gidx += 1
else:
line = f.readline().rstrip().split()
while line:
if line[0] == "G":
pid, point = _read_point(line, binary)
points.append(point)
point_ids[pid] = pidx
pidx += 1
elif line[0] in {"Z", "F"}:
flag = flags[line[0]]
cid, cell = _read_cell(line, point_ids, binary)
cells = _update_cells(cells, cell, flag)
mapper[flag][cid] = [cidx]
cidx += 1
elif line[0] in {"ZGROUP", "FGROUP"}:
flag = flags[line[0]]
name, slot, data = _read_group(f, binary, line)
field_data, mapper[flag] = _update_field_data(
field_data,
mapper[flag],
data,
name,
gidx + 1,
flag,
)
slots[flag] = _update_slots(slots[flag], slot)
gidx += 1
line = f.readline().rstrip().split()
if field_data:
num_cells = numpy.cumsum([len(c[1]) for c in cells])
cell_data = numpy.zeros(num_cells[-1], dtype=int)
for k, v in mapper.items():
if not slots[k]:
continue
for cid, zid in v.values():
cell_data[cid] = zid
cell_data = {"flac3d:group": numpy.split(cell_data, num_cells[:-1])}
else:
cell_data = {}
return Mesh(
points=numpy.array(points),
cells=[(k, numpy.array(v)[:, flac3d_to_meshio_order[k]]) for k, v in cells],
cell_data=cell_data,
field_data=field_data,
)
def _read_point(buf_or_line, binary):
"""Read point coordinates."""
if binary:
pid, x, y, z = struct.unpack("<I3d", buf_or_line.read(28))
point = [x, y, z]
else:
pid = int(buf_or_line[1])
point = [float(l) for l in buf_or_line[2:]]
return pid, point
def _read_cell(buf_or_line, point_ids, binary):
"""Read cell connectivity."""
if binary:
cid, num_verts = struct.unpack("<2I", buf_or_line.read(8))
cell = struct.unpack("<{}I".format(num_verts), buf_or_line.read(4 * num_verts))
is_b7 = num_verts == 7
else:
cid = int(buf_or_line[2])
cell = buf_or_line[3:]
is_b7 = buf_or_line[1] == "B7"
cell = [point_ids[int(l)] for l in cell]
if is_b7:
cell.append(cell[-1])
return cid, cell
def _read_group(buf_or_line, binary, line=None):
"""Read cell group."""
if binary:
# Group name
(num_chars,) = struct.unpack("<H", buf_or_line.read(2))
(name,) = struct.unpack("<{}s".format(num_chars), buf_or_line.read(num_chars))
name = name.decode("utf-8")
# Slot name
(num_chars,) = struct.unpack("<H", buf_or_line.read(2))
(slot,) = struct.unpack("<{}s".format(num_chars), buf_or_line.read(num_chars))
slot = slot.decode("utf-8")
# Zones
(num_zones,) = struct.unpack("<I", buf_or_line.read(4))
data = struct.unpack("<{}I".format(num_zones), buf_or_line.read(4 * num_zones))
else:
name = line[1].replace('"', "")
data = []
slot = "" if "SLOT" not in line else line[-1]
i = buf_or_line.tell()
line = buf_or_line.readline()
while True:
line = line.rstrip().split()
if line and (line[0] not in {"*", "ZGROUP"}):
data += [int(l) for l in line]
else:
buf_or_line.seek(i)
break
i = buf_or_line.tell()
line = buf_or_line.readline()
return name, slot, data
def _update_cells(cells, cell, flag):
"""Update cell list."""
cell_type = numnodes_to_meshio_type[flag][len(cell)]
if len(cells) > 0 and cell_type == cells[-1][0]:
cells[-1][1].append(cell)
else:
cells.append((cell_type, [cell]))
return cells
def _update_field_data(field_data, mapper, data, name, gidx, flag):
"""Update field data dict."""
for cid in data:
mapper[cid].append(gidx)
field_data[name] = numpy.array([gidx, flag_to_numdim[flag]])
return field_data, mapper
def _update_slots(slots, slot):
"""Update slot set. Only one slot is supported."""
slots.add(slot)
if len(slots) > 1:
raise ReadError("Multiple slots are not supported")
return slots
def write(filename, mesh, float_fmt=".16e", binary=False):
"""Write FLAC3D f3grid grid file."""
if not any(c.type in meshio_only["zone"].keys() for c in mesh.cells):
raise WriteError("FLAC3D format only supports 3D cells")
# Pick out material
material = None
if mesh.cell_data:
key, other = _pick_first_int_data(mesh.cell_data)
if key:
material = numpy.concatenate(mesh.cell_data[key])
if other:
logging.warning(
"FLAC3D can only write one cell data array. "
"Picking {}, skipping {}.".format(key, ", ".join(other))
)
mode = "wb" if binary else "w"
with open_file(filename, mode) as f:
if binary:
f.write(
struct.pack("<2I", 1375135718, 3)
) # Don't know what these values represent
else:
f.write("* FLAC3D grid produced by meshio v{}\n".format(version))
f.write("* {}\n".format(time.ctime()))
_write_points(f, mesh.points, binary, float_fmt)
for flag in ["zone", "face"]:
_write_cells(f, mesh.points, mesh.cells, flag, binary)
_write_groups(f, mesh.cells, material, mesh.field_data, flag, binary)
def _write_points(f, points, binary, float_fmt=None):
"""Write points coordinates."""
if binary:
f.write(struct.pack("<I", len(points)))
for i, point in enumerate(points):
f.write(struct.pack("<I3d", i + 1, *point))
else:
f.write("* GRIDPOINTS\n")
for i, point in enumerate(points):
fmt = "G\t{:8}\t" + "\t".join(3 * ["{:" + float_fmt + "}"]) + "\n"
f.write(fmt.format(i + 1, *point))
def _write_cells(f, points, cells, flag, binary):
"""Write cells."""
if flag == "zone":
count = 0
cells = _translate_zones(points, cells)
else:
count = sum(len(c[1]) for c in cells if c.type in meshio_only["zone"])
cells = _translate_faces(cells)
if binary:
f.write(
struct.pack(
"<I", sum(len(c[1]) for c in cells if c[0] in meshio_only[flag])
)
)
for _, cdata in cells:
num_cells, num_verts = cdata.shape
tmp = numpy.column_stack(
(
numpy.arange(1, num_cells + 1) + count,
numpy.full(num_cells, num_verts),
cdata + 1,
)
)
f.write(
struct.pack("<{}I".format((num_verts + 2) * num_cells), *tmp.ravel())
)
count += num_cells
else:
flag_to_text = {
"zone": ("ZONES", "Z"),
"face": ("FACES", "F"),
}
f.write("* {}\n".format(flag_to_text[flag][0]))
for ctype, cdata in cells:
fmt = (
"{} {{}} {{}} ".format(flag_to_text[flag][1])
+ " ".join(["{}"] * cdata.shape[1])
+ "\n"
)
for entry in cdata + 1:
count += 1
f.write(fmt.format(meshio_to_flac3d_type[ctype], count, *entry))
def _write_groups(f, cells, cell_data, field_data, flag, binary):
"""Write groups."""
if cell_data is not None:
groups, labels = _translate_groups(cells, cell_data, field_data, flag)
if binary:
slot = "Default".encode("utf-8")
f.write(struct.pack("<I", len(groups)))
for k in sorted(groups.keys()):
num_chars, num_zones = len(labels[k]), len(groups[k])
fmt = "<H{}sH7sI{}I".format(num_chars, num_zones)
tmp = [
num_chars,
labels[k].encode("utf-8"),
7,
slot,
num_zones,
*groups[k],
]
f.write(struct.pack(fmt, *tmp))
else:
flag_to_text = {
"zone": "ZGROUP",
"face": "FGROUP",
}
f.write("* {} GROUPS\n".format(flag.upper()))
for k in sorted(groups.keys()):
f.write('{} "{}"\n'.format(flag_to_text[flag], labels[k]))
_write_table(f, groups[k])
else:
if binary:
f.write(struct.pack("<I", 0))
def _translate_zones(points, cells):
"""Reorder meshio cells to FLAC3D zones.
Four first points must form a right-handed coordinate system (outward normal vectors).
Reorder corner points according to sign of scalar triple products.
"""
# See <https://stackoverflow.com/a/42386330/353337>
def slicing_summing(a, b, c):
c0 = b[:, 1] * c[:, 2] - b[:, 2] * c[:, 1]
c1 = b[:, 2] * c[:, 0] - b[:, 0] * c[:, 2]
c2 = b[:, 0] * c[:, 1] - b[:, 1] * c[:, 0]
return a[:, 0] * c0 + a[:, 1] * c1 + a[:, 2] * c2
zones = []
for key, idx in cells:
if key not in meshio_only["zone"].keys():
continue
# Compute scalar triple products
key = meshio_only["zone"][key]
tmp = points[idx[:, meshio_to_flac3d_order[key][:4]].T]
det = slicing_summing(tmp[1] - tmp[0], tmp[2] - tmp[0], tmp[3] - tmp[0])
# Reorder corner points
data = numpy.where(
(det > 0)[:, None],
idx[:, meshio_to_flac3d_order[key]],
idx[:, meshio_to_flac3d_order_2[key]],
)
zones.append((key, data))
return zones
def _translate_faces(cells):
"""Reorder meshio cells to FLAC3D faces."""
faces = []
for key, idx in cells:
if key not in meshio_only["face"].keys():
continue
key = meshio_only["face"][key]
data = idx[:, meshio_to_flac3d_order[key]]
faces.append((key, data))
return faces
def _translate_groups(cells, cell_data, field_data, flag):
"""Convert meshio cell_data to FLAC3D groups."""
num_dims = numpy.concatenate(
[numpy.full(len(c[1]), 2 if c[0] in meshio_only["face"] else 3) for c in cells]
)
groups = {
k: numpy.nonzero(
numpy.logical_and(cell_data == k, num_dims == flag_to_numdim[flag])
)[0]
+ 1
for k in numpy.unique(cell_data)
}
groups = {k: v for k, v in groups.items() if v.size}
labels = {k: str(k) for k in groups.keys()}
labels[0] = "None"
if field_data:
labels.update(
{v[0]: k for k, v in field_data.items() if v[1] == flag_to_numdim[flag]}
)
return groups, labels
def _write_table(f, data, ncol=20):
"""Write group data table."""
nrow = len(data) // ncol
lines = numpy.split(data, numpy.full(nrow, ncol).cumsum())
for line in lines:
if len(line):
f.write(" {}\n".format(" ".join([str(l) for l in line])))
register("flac3d", [".f3grid"], read, {"flac3d": write})
| [
"time.ctime",
"numpy.unique",
"numpy.logical_and",
"numpy.where",
"struct.pack",
"numpy.array",
"numpy.zeros",
"numpy.split",
"numpy.concatenate",
"numpy.full",
"numpy.arange"
] | [((8208, 8249), 'numpy.array', 'numpy.array', (['[gidx, flag_to_numdim[flag]]'], {}), '([gidx, flag_to_numdim[flag]])\n', (8219, 8249), False, 'import numpy\n'), ((5194, 5231), 'numpy.zeros', 'numpy.zeros', (['num_cells[-1]'], {'dtype': 'int'}), '(num_cells[-1], dtype=int)\n', (5205, 5231), False, 'import numpy\n'), ((13813, 13924), 'numpy.where', 'numpy.where', (['(det > 0)[:, None]', 'idx[:, meshio_to_flac3d_order[key]]', 'idx[:, meshio_to_flac3d_order_2[key]]'], {}), '((det > 0)[:, None], idx[:, meshio_to_flac3d_order[key]], idx[:,\n meshio_to_flac3d_order_2[key]])\n', (13824, 13924), False, 'import numpy\n'), ((5437, 5475), 'numpy.split', 'numpy.split', (['cell_data', 'num_cells[:-1]'], {}), '(cell_data, num_cells[:-1])\n', (5448, 5475), False, 'import numpy\n'), ((5543, 5562), 'numpy.array', 'numpy.array', (['points'], {}), '(points)\n', (5554, 5562), False, 'import numpy\n'), ((8897, 8935), 'numpy.concatenate', 'numpy.concatenate', (['mesh.cell_data[key]'], {}), '(mesh.cell_data[key])\n', (8914, 8935), False, 'import numpy\n'), ((14760, 14783), 'numpy.unique', 'numpy.unique', (['cell_data'], {}), '(cell_data)\n', (14772, 14783), False, 'import numpy\n'), ((9285, 9318), 'struct.pack', 'struct.pack', (['"""<2I"""', '(1375135718)', '(3)'], {}), "('<2I', 1375135718, 3)\n", (9296, 9318), False, 'import struct\n'), ((9981, 10015), 'struct.pack', 'struct.pack', (['"""<I3d"""', '(i + 1)', '*point'], {}), "('<I3d', i + 1, *point)\n", (9992, 10015), False, 'import struct\n'), ((12847, 12867), 'struct.pack', 'struct.pack', (['"""<I"""', '(0)'], {}), "('<I', 0)\n", (12858, 12867), False, 'import struct\n'), ((15214, 15236), 'numpy.full', 'numpy.full', (['nrow', 'ncol'], {}), '(nrow, ncol)\n', (15224, 15236), False, 'import numpy\n'), ((9503, 9515), 'time.ctime', 'time.ctime', ([], {}), '()\n', (9513, 9515), False, 'import time\n'), ((10900, 10932), 'numpy.full', 'numpy.full', (['num_cells', 'num_verts'], {}), '(num_cells, num_verts)\n', (10910, 10932), False, 'import numpy\n'), ((12428, 12450), 'struct.pack', 'struct.pack', (['fmt', '*tmp'], {}), '(fmt, *tmp)\n', (12439, 12450), False, 'import struct\n'), ((14650, 14717), 'numpy.logical_and', 'numpy.logical_and', (['(cell_data == k)', '(num_dims == flag_to_numdim[flag])'], {}), '(cell_data == k, num_dims == flag_to_numdim[flag])\n', (14667, 14717), False, 'import numpy\n'), ((5583, 5597), 'numpy.array', 'numpy.array', (['v'], {}), '(v)\n', (5594, 5597), False, 'import numpy\n'), ((10840, 10870), 'numpy.arange', 'numpy.arange', (['(1)', '(num_cells + 1)'], {}), '(1, num_cells + 1)\n', (10852, 10870), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
n_pts = 500
np.random.seed(0)
Xa = np.array([np.random.normal(13, 2, n_pts), np.random.normal(12, 2, n_pts)]).T
Xb = np.array([np.random.normal(8, 2, n_pts), np.random.normal(6, 2, n_pts)]).T
X = np.vstack((Xa, Xb))
y = np.matrix(np.append(np.zeros(n_pts), np.ones(n_pts))).T
plt.scatter(X[:n_pts, 0], X[:n_pts, 1])
plt.scatter(X[n_pts:, 0], X[n_pts:, 1])
#plt.ion()
plt.show()
model = Sequential()
model.add(Dense(units = 1, input_shape = (2,), activation = 'sigmoid'))
adam = Adam(lr = 0.1)
model.compile(adam, loss='binary_crossentropy', metrics = ['accuracy'])
h = model.fit(x=X, y=y, verbose=1, batch_size=50, epochs=500, shuffle='true')
plt.plot(h.history['acc'])
plt.title('accuracy')
plt.xlabel('epoch')
plt.legend(['accuracy'])
plt.show()
# plt.plot(h.history['loss'])
# plt.title('loss')
# plt.xlabel('epoch')
# plt.legend(['loss'])
# plt.show()
# print(X.shape)
def plot_decision_boundary(X, y, model):
x_span = np.linspace(min(X[:, 0]) - 1, max(X[:, 0]) + 1)
y_span = np.linspace(min(X[:, 1]) - 1, max(X[:, 1]) + 1)
# print(x_span)
# print(y_span)
xx, yy = np.meshgrid(x_span, y_span)
# print(xx)
# print(yy)
xx_, yy_ = xx.ravel(), yy.ravel()
# print(xx_)
# print(yy_)
grid = np.c_[xx_, yy_]
# print(grid.shape)
pred_func = model.predict(grid)
z = pred_func.reshape(xx.shape)
plt.contourf(xx, yy, z)
#plt.ion()
#plt.show()
plot_decision_boundary(X, y, model)
plt.scatter(X[:n_pts, 0], X[:n_pts, 1])
plt.scatter(X[n_pts:, 0], X[n_pts:, 1])
#plt.show()
x = 7.5
y = 5
point = np.array([[x, y]])
prediction = model.predict(point)
plt.plot([x], [y], marker='o', markersize=10, color='red')
plt.show()
print('Prediction is:', prediction) | [
"keras.optimizers.Adam",
"matplotlib.pyplot.contourf",
"numpy.random.normal",
"numpy.ones",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"numpy.array",
"numpy.zeros",
"numpy.meshgrid",
"numpy.vstack",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"ke... | [((178, 195), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (192, 195), True, 'import numpy as np\n'), ((363, 382), 'numpy.vstack', 'np.vstack', (['(Xa, Xb)'], {}), '((Xa, Xb))\n', (372, 382), True, 'import numpy as np\n'), ((443, 482), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:n_pts, 0]', 'X[:n_pts, 1]'], {}), '(X[:n_pts, 0], X[:n_pts, 1])\n', (454, 482), True, 'import matplotlib.pyplot as plt\n'), ((483, 522), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[n_pts:, 0]', 'X[n_pts:, 1]'], {}), '(X[n_pts:, 0], X[n_pts:, 1])\n', (494, 522), True, 'import matplotlib.pyplot as plt\n'), ((534, 544), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (542, 544), True, 'import matplotlib.pyplot as plt\n'), ((555, 567), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (565, 567), False, 'from keras.models import Sequential\n'), ((647, 659), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.1)'}), '(lr=0.1)\n', (651, 659), False, 'from keras.optimizers import Adam\n'), ((814, 840), 'matplotlib.pyplot.plot', 'plt.plot', (["h.history['acc']"], {}), "(h.history['acc'])\n", (822, 840), True, 'import matplotlib.pyplot as plt\n'), ((841, 862), 'matplotlib.pyplot.title', 'plt.title', (['"""accuracy"""'], {}), "('accuracy')\n", (850, 862), True, 'import matplotlib.pyplot as plt\n'), ((863, 882), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (873, 882), True, 'import matplotlib.pyplot as plt\n'), ((883, 907), 'matplotlib.pyplot.legend', 'plt.legend', (["['accuracy']"], {}), "(['accuracy'])\n", (893, 907), True, 'import matplotlib.pyplot as plt\n'), ((908, 918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (916, 918), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1659), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:n_pts, 0]', 'X[:n_pts, 1]'], {}), '(X[:n_pts, 0], X[:n_pts, 1])\n', (1631, 1659), True, 'import matplotlib.pyplot as plt\n'), ((1660, 1699), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[n_pts:, 0]', 'X[n_pts:, 1]'], {}), '(X[n_pts:, 0], X[n_pts:, 1])\n', (1671, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1753), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (1743, 1753), True, 'import numpy as np\n'), ((1788, 1846), 'matplotlib.pyplot.plot', 'plt.plot', (['[x]', '[y]'], {'marker': '"""o"""', 'markersize': '(10)', 'color': '"""red"""'}), "([x], [y], marker='o', markersize=10, color='red')\n", (1796, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1855, 1857), True, 'import matplotlib.pyplot as plt\n'), ((578, 632), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'input_shape': '(2,)', 'activation': '"""sigmoid"""'}), "(units=1, input_shape=(2,), activation='sigmoid')\n", (583, 632), False, 'from keras.layers import Dense\n'), ((1263, 1290), 'numpy.meshgrid', 'np.meshgrid', (['x_span', 'y_span'], {}), '(x_span, y_span)\n', (1274, 1290), True, 'import numpy as np\n'), ((1522, 1545), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'z'], {}), '(xx, yy, z)\n', (1534, 1545), True, 'import matplotlib.pyplot as plt\n'), ((211, 241), 'numpy.random.normal', 'np.random.normal', (['(13)', '(2)', 'n_pts'], {}), '(13, 2, n_pts)\n', (227, 241), True, 'import numpy as np\n'), ((243, 273), 'numpy.random.normal', 'np.random.normal', (['(12)', '(2)', 'n_pts'], {}), '(12, 2, n_pts)\n', (259, 273), True, 'import numpy as np\n'), ((293, 322), 'numpy.random.normal', 'np.random.normal', (['(8)', '(2)', 'n_pts'], {}), '(8, 2, n_pts)\n', (309, 322), True, 'import numpy as np\n'), ((324, 353), 'numpy.random.normal', 'np.random.normal', (['(6)', '(2)', 'n_pts'], {}), '(6, 2, n_pts)\n', (340, 353), True, 'import numpy as np\n'), ((407, 422), 'numpy.zeros', 'np.zeros', (['n_pts'], {}), '(n_pts)\n', (415, 422), True, 'import numpy as np\n'), ((424, 438), 'numpy.ones', 'np.ones', (['n_pts'], {}), '(n_pts)\n', (431, 438), True, 'import numpy as np\n')] |
""" This file defines the sample class. """
import numpy as np
from opentamp.src.policy_hooks.utils.policy_solver_utils import ACTION_ENUM
class Sample(object):
"""
Class that handles the representation of a trajectory and stores a
single trajectory.
Note: must be serializable for easy saving, no C++ references!
"""
def __init__(self, agent):
self.agent = agent
self.T = agent.T
self.step = 0
self.task_end = False
self._data = {}
self.reinit()
self.draw = True
self.opt_wt = None
def reinit(self):
self.dX = self.agent.dX
self.dU = self.agent.dU
self.dO = self.agent.dO
self.dM = self.agent.dM
self.dPrim = self.agent.dPrim
self.dPrimOut = self.agent.dPrimOut
self.dContOut = self.agent.dContOut
self.dCont = self.agent.dCont
self.dVal = self.agent.dVal
self.success = 0
self.opt_suc = 0
self._postsuc = False
self.source_label = 'rollout'
self.wt = 1.
self.base_x = None
self.condition = 0
self._X = np.empty((self.T, self.dX))
self._X.fill(np.nan)
self.env_state = {}
self._obs = np.empty((self.T, self.dO))
self._obs.fill(np.nan)
self._prim_out = np.empty((self.T, self.dPrimOut))
self._prim_out.fill(np.nan)
self._cont_out = np.empty((self.T, self.dContOut))
self._cont_out.fill(np.nan)
self._prim_obs = np.empty((self.T, self.dPrim))
self._prim_obs.fill(np.nan)
self._cont_obs = np.empty((self.T, self.dCont))
self._cont_obs.fill(np.nan)
self._val_obs = np.empty((self.T, self.dVal))
self._val_obs.fill(np.nan)
self._meta = np.empty(self.dM)
self._meta.fill(np.nan)
self._ref_U = np.zeros((self.T, self.dU), dtype='float32')
self._ref_X = np.zeros((self.T, self.agent.symbolic_bound), dtype='float32')
self.task_cost = np.nan
self.task_start = False
self.removable = True
self.use_ts = np.ones(self.T)
self.prim_use_ts = np.ones(self.T)
self.opt_strength = 0.
def set(self, sensor_name, sensor_data, t=None):
""" Set trajectory data for a particular sensor. """
if t is None:
self._data[sensor_name] = sensor_data
self._X.fill(np.nan) # Invalidate existing X.
self._obs.fill(np.nan) # Invalidate existing obs.
self._val_obs.fill(np.nan) # Invalidate existing obs.
self._prim_obs.fill(np.nan) # Invalidate existing obs.
self._prim_out.fill(np.nan) # Invalidate existing out.
self._cont_out.fill(np.nan) # Invalidate existing out.
self._cont_obs.fill(np.nan) # Invalidate existing obs.
self._meta.fill(np.nan) # Invalidate existing meta data.
else:
if sensor_name not in self._data:
self._data[sensor_name] = \
np.empty((self.T,) + sensor_data.shape)
self._data[sensor_name].fill(np.nan)
self._data[sensor_name][t, :] = sensor_data
self._X[t, :].fill(np.nan)
self._obs[t, :].fill(np.nan)
self._val_obs[t, :].fill(np.nan)
self._prim_obs[t, :].fill(np.nan)
self._prim_out[t, :].fill(np.nan)
self._cont_obs[t, :].fill(np.nan)
self._cont_out[t, :].fill(np.nan)
def get(self, sensor_name, t=None):
""" Get trajectory data for a particular sensor. """
return (self._data[sensor_name] if t is None
else self._data[sensor_name][t, :])
def get_X(self, t=None):
""" Get the state. Put it together if not precomputed. """
X = self._X if t is None else self._X[t, :]
if np.any(np.isnan(X)):
for data_type in self._data:
if data_type not in self.agent.x_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_x(X, data, data_types=[data_type])
return X.copy()
def set_X(self, X, t=None):
for data_type in self.agent._x_data_idx:
self.set(data_type, X[self.agent._x_data_idx[data_type]], t=t)
def set_obs(self, obs, t=None):
for data_type in self.agent._obs_data_idx:
self.set(data_type, obs[self.agent._obs_data_idx[data_type]], t=t)
def set_prim_obs(self, prim_obs, t=None):
for data_type in self.agent._prim_obs_data_idx:
self.set(data_type, prim_obs[self.agent._prim_obs_data_idx[data_type]], t=t)
def set_val_obs(self, val_obs, t=None):
for data_type in self.agent._val_obs_data_idx:
self.set(data_type, val_obs[self.agent._val_obs_data_idx[data_type]], t=t)
def get_U(self, t=None):
""" Get the action. """
# return self._data[ACTION] if t is None else self._data[ACTION][t, :]
return self._data[ACTION_ENUM] if t is None else self._data[ACTION_ENUM][t, :]
def get_obs(self, t=None):
""" Get the observation. Put it together if not precomputed. """
obs = self._obs if t is None else self._obs[t, :]
if np.any(np.isnan(obs)):
for data_type in self._data:
if data_type not in self.agent.obs_data_types:
continue
if data_type in self.agent.meta_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_obs(obs, data, data_types=[data_type])
return obs.copy()
def get_prim_obs(self, t=None):
""" Get the observation. Put it together if not precomputed. """
obs = self._prim_obs if t is None else self._prim_obs[t, :]
if np.any(np.isnan(obs)):
for data_type in self._data:
if data_type not in self.agent.prim_obs_data_types:
continue
if data_type in self.agent.meta_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_prim_obs(obs, data, data_types=[data_type])
return obs.copy()
def get_prim_out(self, t=None):
""" Get the observation. Put it together if not precomputed. """
out = self._prim_out if t is None else self._prim_out[t, :]
if np.any(np.isnan(out)):
for data_type in self._data:
if data_type not in self.agent.prim_out_data_types:
continue
if data_type in self.agent.meta_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_prim_out(out, data, data_types=[data_type])
return out.copy()
def get_cont_obs(self, t=None):
""" Get the observation. Put it together if not precomputed. """
obs = self._cont_obs if t is None else self._cont_obs[t, :]
if np.any(np.isnan(obs)):
for data_type in self._data:
if data_type not in self.agent.cont_obs_data_types:
continue
if data_type in self.agent.meta_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_cont_obs(obs, data, data_types=[data_type])
return obs.copy()
def get_cont_out(self, t=None):
""" Get the observation. Put it together if not precomputed. """
out = self._cont_out if t is None else self._cont_out[t, :]
if np.any(np.isnan(out)):
for data_type in self._data:
if data_type not in self.agent.cont_out_data_types:
continue
if data_type in self.agent.meta_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_cont_out(out, data, data_types=[data_type])
return out.copy()
def get_val_obs(self, t=None):
""" Get the observation. Put it together if not precomputed. """
obs = self._val_obs if t is None else self._val_obs[t, :]
if np.any(np.isnan(obs)):
for data_type in self._data:
if data_type not in self.agent.val_obs_data_types:
continue
if data_type in self.agent.meta_data_types:
continue
data = (self._data[data_type] if t is None
else self._data[data_type][t, :])
self.agent.pack_data_val_obs(obs, data, data_types=[data_type])
return obs.copy()
def get_meta(self):
""" Get the meta data. Put it together if not precomputed. """
meta = self._meta
if np.any(np.isnan(meta)):
for data_type in self._data:
if data_type not in self.agent.meta_data_types:
continue
data = self._data[data_type]
self.agent.pack_data_meta(meta, data, data_types=[data_type])
return meta
def set_ref_X(self, X):
self._ref_X[:,:] = X[:, :]
def set_ref_U(self, U):
self._ref_U[:, :] = U[:, :]
def get_ref_X(self, t=-1):
if t > 0:
return self._ref_X[t].copy()
return self._ref_X.copy()
def get_ref_U(self, t=-1):
if t > 0:
return self._ref_U[t].copy()
return self._ref_U.copy()
# For pickling.
def __getstate__(self):
state = self.__dict__.copy()
state.pop('agent')
return state
# For unpickling.
def __setstate__(self, state):
self.__dict__ = state
self.__dict__['agent'] = None
| [
"numpy.zeros",
"numpy.empty",
"numpy.ones",
"numpy.isnan"
] | [((1137, 1164), 'numpy.empty', 'np.empty', (['(self.T, self.dX)'], {}), '((self.T, self.dX))\n', (1145, 1164), True, 'import numpy as np\n'), ((1242, 1269), 'numpy.empty', 'np.empty', (['(self.T, self.dO)'], {}), '((self.T, self.dO))\n', (1250, 1269), True, 'import numpy as np\n'), ((1326, 1359), 'numpy.empty', 'np.empty', (['(self.T, self.dPrimOut)'], {}), '((self.T, self.dPrimOut))\n', (1334, 1359), True, 'import numpy as np\n'), ((1421, 1454), 'numpy.empty', 'np.empty', (['(self.T, self.dContOut)'], {}), '((self.T, self.dContOut))\n', (1429, 1454), True, 'import numpy as np\n'), ((1516, 1546), 'numpy.empty', 'np.empty', (['(self.T, self.dPrim)'], {}), '((self.T, self.dPrim))\n', (1524, 1546), True, 'import numpy as np\n'), ((1608, 1638), 'numpy.empty', 'np.empty', (['(self.T, self.dCont)'], {}), '((self.T, self.dCont))\n', (1616, 1638), True, 'import numpy as np\n'), ((1699, 1728), 'numpy.empty', 'np.empty', (['(self.T, self.dVal)'], {}), '((self.T, self.dVal))\n', (1707, 1728), True, 'import numpy as np\n'), ((1785, 1802), 'numpy.empty', 'np.empty', (['self.dM'], {}), '(self.dM)\n', (1793, 1802), True, 'import numpy as np\n'), ((1857, 1901), 'numpy.zeros', 'np.zeros', (['(self.T, self.dU)'], {'dtype': '"""float32"""'}), "((self.T, self.dU), dtype='float32')\n", (1865, 1901), True, 'import numpy as np\n'), ((1924, 1986), 'numpy.zeros', 'np.zeros', (['(self.T, self.agent.symbolic_bound)'], {'dtype': '"""float32"""'}), "((self.T, self.agent.symbolic_bound), dtype='float32')\n", (1932, 1986), True, 'import numpy as np\n'), ((2104, 2119), 'numpy.ones', 'np.ones', (['self.T'], {}), '(self.T)\n', (2111, 2119), True, 'import numpy as np\n'), ((2147, 2162), 'numpy.ones', 'np.ones', (['self.T'], {}), '(self.T)\n', (2154, 2162), True, 'import numpy as np\n'), ((3872, 3883), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (3880, 3883), True, 'import numpy as np\n'), ((5342, 5355), 'numpy.isnan', 'np.isnan', (['obs'], {}), '(obs)\n', (5350, 5355), True, 'import numpy as np\n'), ((5995, 6008), 'numpy.isnan', 'np.isnan', (['obs'], {}), '(obs)\n', (6003, 6008), True, 'import numpy as np\n'), ((6658, 6671), 'numpy.isnan', 'np.isnan', (['out'], {}), '(out)\n', (6666, 6671), True, 'import numpy as np\n'), ((7321, 7334), 'numpy.isnan', 'np.isnan', (['obs'], {}), '(obs)\n', (7329, 7334), True, 'import numpy as np\n'), ((7984, 7997), 'numpy.isnan', 'np.isnan', (['out'], {}), '(out)\n', (7992, 7997), True, 'import numpy as np\n'), ((8644, 8657), 'numpy.isnan', 'np.isnan', (['obs'], {}), '(obs)\n', (8652, 8657), True, 'import numpy as np\n'), ((9249, 9263), 'numpy.isnan', 'np.isnan', (['meta'], {}), '(meta)\n', (9257, 9263), True, 'import numpy as np\n'), ((3040, 3079), 'numpy.empty', 'np.empty', (['((self.T,) + sensor_data.shape)'], {}), '((self.T,) + sensor_data.shape)\n', (3048, 3079), True, 'import numpy as np\n')] |
import os, glob, config, logging
from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell
'''
Here, you must use the class "Cell" to refer to a cell you want processed.
This allows the program to process your data in to function wxPython code.
The class Cell has the following default keyword arguments:
my_cell = Cell(
row = None,
col = None,
text = None,
background_color = None,
text_color = None,
font_size = None,
bold = False,
function = None,
)
Import your own custom functions from the "functions_for_custom_analysis_go_in_here" folder.
Your import statements should look like this:
def my_custom_spreadsheet():
from functions_for_custom_analysis_go_in_here import your_file
data = your_file.your_function()
'''
# Note, if you are adding a double click event into your analysis spreadsheets (you programming champ), you may run into errors, here is why:
# There is a preset function in wxStocks_modules/wxStocks_gui.py in the CustomAnalysisPage object, in the create_custom_analysis_spread_sheet method
# This preset event function on double click (search for: self.screen_grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK)
# takes the text in that cell, and passes it to the add_ticker_to_research_page in wxStocks_modules/wxStocks_utilities.py,
# which passes it to the research page for processing. If the text is a valid ticker it'll add that stock to the research page.
# I'm not sure how to ensure that only valid ticker columns are allowed to be passed for custom spreadsheets, but for now i'll allow any cell to be double clicked.
def rainbow_spreadsheet(stock_list):
"rainbow sort"
cell_list = [] # Cell object imported above
class Row(object):
"this will help in sorting cells"
def __init__(self, stock):
self.stock = stock
if not stock_list: # no empty list drama
return
row_list = []
for stock in stock_list:
the_row_of_this_stock = stock_list.index(stock)
this_row = Row(stock = stock)
# standard stock attributes
this_row.ticker = stock.ticker
this_row.ticker_len = len(stock.ticker)
this_row.firm_name = stock.firm_name
row_list.append(this_row)
def sort_my_rows(row_list):
'sorts rows by ticker length'
row_list.sort(key = lambda row: row.ticker_len)
sort_my_rows(row_list)
# set colors!
for row in row_list:
if row.ticker_len == 1:
row.row_color = "#FFCCFF"
elif row.ticker_len == 2:
row.row_color = "#FFCCCC"
elif row.ticker_len == 3:
row.row_color = "#FFFFCC"
elif row.ticker_len == 4:
row.row_color = "#CCFFCC"
elif row.ticker_len == 5:
row.row_color = "#CCFFFF"
elif row.ticker_len == 6:
row.row_color = "#CCCCFF"
elif row.ticker_len == 7:
row.row_color = "#CC99FF"
elif row.ticker_len > 7:
row.row_color = "#FFFFFF"
def make_rainbow_row_list(row_list):
ones_list = [row for row in row_list if row.ticker_len == 1]
twos_list = [row for row in row_list if row.ticker_len == 2]
threes_list = [row for row in row_list if row.ticker_len == 3]
fours_list = [row for row in row_list if row.ticker_len == 4]
fives_list = [row for row in row_list if row.ticker_len == 5]
sixs_list = [row for row in row_list if row.ticker_len == 6]
sevens_list = [row for row in row_list if row.ticker_len == 7]
len_list = [ones_list, twos_list, threes_list, fours_list, fives_list, sixs_list, sevens_list]
largest_len = 0
for num_list in len_list:
if len(num_list) > largest_len:
largest_len = len(num_list)
rainbow_list = []
for i in range(largest_len):
for num_list in len_list:
try:
rainbow_list.append(num_list[i])
except:
pass
return rainbow_list
row_list = make_rainbow_row_list(row_list)
for row in row_list:
ticker_cell = Cell(text = row.ticker, row = row_list.index(row), col = 0, col_title = "ticker", background_color = row.row_color)
firm_name_cell = Cell(text = row.firm_name, row = row_list.index(row), col = 1, col_title = "name", background_color = row.row_color)
ticker_len_cell = Cell(text = row.ticker_len, row = row_list.index(row), col = 2, col_title = "ticker length", background_color = row.row_color)
cell_list.append(ticker_cell)
cell_list.append(firm_name_cell)
cell_list.append(ticker_len_cell)
one_longer_than_row_list = len(row_list)
end_cell = Cell(text = "", row = one_longer_than_row_list, col = 0, row_title = "end")
cell_list.append(end_cell)
return cell_list
def jas_stock_analysis(stock_list):
"very in-depth analysis example"
from functions_for_custom_analysis_go_in_here import aaii_formulas as aaii
import numpy
import pprint as pp
import locale
portfolios = config.PORTFOLIO_OBJECTS_DICT.values()
#background_color = {"green": "#D1FFD1", "red": "#8A0002", "orange": "#FFE0B2"}
class Attribute(object):
def __init__(self, name, function, weight, maximum, minimum, display, size_factor, col, align_right):
self.name = name
self.function = function
self.weight = weight
self.maximum = maximum
self.minimum = minimum
self.size_factor = size_factor
self.display = display
self.col = col
self.align_right = align_right
self.avg = None
self.std = None
class Row(object):
"used to look at a whole row of stock' data at the same time"
def __init__(self, stock):
self.stock = stock
class Function_Globals(object):
def __init__(self,
default_row_buffer_before_stocks = None,
all_cells_list = [],
row_list = [],
attrbute_name_avg_and_std_triple_list = [],
avg_row_cell = None,
score_avg = None,
score_std = None,
):
self.default_row_buffer_before_stocks = default_row_buffer_before_stocks
self.all_cells_list = all_cells_list
self.row_list = row_list
self.attrbute_name_avg_and_std_triple_list = attrbute_name_avg_and_std_triple_list
self.avg_row_cell = avg_row_cell
self.score_avg = score_avg
self.score_std = score_std
function_globals = Function_Globals(default_row_buffer_before_stocks = 2)
def return_Row_obj_else_create(stock):
correct_row = None
for row in function_globals.row_list:
if row.stock is stock:
correct_row = row
if not correct_row:
correct_row = Row(stock)
function_globals.row_list.append(correct_row)
return correct_row
def top_row():
text = 'BUY CANDIDATES'
top_row_cell = Cell(row = 0, col=0, text=text)
function_globals.all_cells_list.append(top_row_cell)
def gen_attribute_list(row = 1):
cell_list_to_return = []
for attribute_obj in attribute_list:
attribute_cell = Cell(row = row, col = attribute_obj.col, text = attribute_obj.name, col_title = attribute_obj.name)
function_globals.all_cells_list.append(attribute_cell)
def create_rows_of_stock_data(stock_list = stock_list):
rows_before_stock_data = 2
rows_list = []
for stock in stock_list:
stock_row = return_Row_obj_else_create(stock)
for attribute_obj in attribute_list:
try:
data = attribute_obj.function(stock)
except: # in case it throws an attribute error
data = None
data_cell = Cell(text = data) # this will be thrown out later
setattr(stock_row, attribute_obj.name, data_cell)
def create_data_scores():
for attribute_obj in attribute_list:
if attribute_obj.size_factor:
# first iterate over each attribute, to find the mean and standard dev
unadjusted_attribute_data_list_valid_values_only = []
adjusted_attribute_data_list = []
nonetype_attribute_list = []
# seperate nonconforming data
for row in function_globals.row_list:
data_cell = getattr(row, attribute_obj.name)
data = data_cell.text
try:
data = float(data)
data = round(data, 2)
unadjusted_attribute_data_list_valid_values_only.append(data)
except:
nonetype_attribute_list.append(data)
if not unadjusted_attribute_data_list_valid_values_only:
# no reason to look at data, all is None
continue
else:
pass
# logging.info(unadjusted_attribute_data_list_valid_values_only)
# unadjusted avg will be rounded to 3 instead of 2 for identification purposes
unadjusted_avg = round(numpy.mean(unadjusted_attribute_data_list_valid_values_only), 3)
unadjusted_avg_len = len(unadjusted_attribute_data_list_valid_values_only)
# set degree of freedom to subtract from N to 0 if only one date unit
if len(unadjusted_attribute_data_list_valid_values_only) > 1:
number_to_minus_degrees_of_freedom = 1
else:
number_to_minus_degrees_of_freedom = 0
unadjusted_std = numpy.std(unadjusted_attribute_data_list_valid_values_only, ddof=number_to_minus_degrees_of_freedom)
if not (isinstance(unadjusted_avg, float) and isinstance(unadjusted_avg_len, float) and isinstance(unadjusted_std, float) ):
logging.info(("unadjusted_avg:", unadjusted_avg))
logging.info(("unadjusted_avg_len:", unadjusted_avg_len))
logging.info(("unadjusted_std:", unadjusted_std))
# set nonconforming data to avg of conforming data
nonetype_to_avg_attribute_list = []
for nonconforming_data in nonetype_attribute_list:
nonetype_to_avg_attribute_list.append(unadjusted_avg)
# set conforming data and replaced data into the min/max checks
for row in function_globals.row_list:
data_cell = getattr(row, attribute_obj.name)
data = data_cell.text
try:
data = float(data)
data = round(data, 2)
except:
# replace nonconforming data with avg again
data = unadjusted_avg
if type(data) is float:
if attribute_obj.minimum or attribute_obj.maximum:
# normalize data
adjusted_data = data
if attribute_obj.minimum:
if data < attribute_obj.minimum:
adjusted_data = attribute_obj.minimum
data_cell.text_color = "#B8B8B8"
if attribute_obj.maximum:
if data > attribute_obj.maximum:
adjusted_data = attribute_obj.maximum
data_cell.text_color = "#B8B8B8"
adjusted_attribute_data_list.append(adjusted_data)
elif data and attribute_obj.name == "Cur Ratio": # strange variation in rms code
if data > 10.:
adjusted_data = 1.7
data_cell.text_color = "#B8B8B8"
elif data > 4.:
adjusted_data = 4.
data_cell.text_color = "#B8B8B8"
else:
adjusted_data = data
adjusted_attribute_data_list.append(adjusted_data)
elif data and attribute_obj.name == "ROE %"+"Dev": # strange variation in rms code
if data < 0.:
adjusted_data = 1.5
data_cell.text_color = "#B8B8B8"
else:
adjusted_data = data
adjusted_attribute_data_list.append(adjusted_data)
elif data and attribute_obj.name == "Inv2sales Grwth": # strange variation in rms code
adjusted_data = -abs(data-1.)
data_cell.text_color = "#B8B8B8"
adjusted_attribute_data_list.append(adjusted_data)
else:
adjusted_attribute_data_list.append(data)
adjusted_data_avg = numpy.mean(adjusted_attribute_data_list)
adjusted_data_avg_len = len(adjusted_attribute_data_list)
# set degree of freedom to subtract from N to 0 if only one date unit
if len(adjusted_attribute_data_list) > 1:
number_to_minus_degrees_of_freedom = 1
else:
number_to_minus_degrees_of_freedom = 0
adjusted_data_std = numpy.std(adjusted_attribute_data_list, ddof=number_to_minus_degrees_of_freedom) # 1 degrees of freedom for std
if not (isinstance(adjusted_data_avg, float) and isinstance(adjusted_data_avg_len, float) and isinstance(adjusted_data_std, float) ):
logging.info(("adjusted_avg:", adjusted_data_avg))
logging.info(("adjusted_avg_len:", adjusted_data_avg_len))
logging.info(("adjusted_std:", adjusted_data_std))
# set attribute average standard deviation for colors later
attribute_obj.avg = float(round(adjusted_data_avg, 2))
attribute_obj.std = float(round(adjusted_data_std, 2))
#logging.info(("\n"*2))
#logging.info((sorted(adjusted_attribute_data_list)))
#logging.info((attribute_obj.name, "unadjusted avg", unadjusted_avg, "length=", unadjusted_avg_len))
#logging.info((attribute_obj.name, "unadjusted std", unadjusted_std))
#logging.info((attribute_obj.name, "adjusted avg", adjusted_data_avg, "length=", adjusted_data_avg_len))
#logging.info((attribute_obj.name, "adjusted std", adjusted_data_std))
#logging.info(("\n"*2))
function_globals.attrbute_name_avg_and_std_triple_list.append([attribute_obj.name, unadjusted_avg, unadjusted_std])
for row in function_globals.row_list:
if attribute_obj.size_factor:
#b = attribute_obj.function(row.stock)
try:
b = float(attribute_obj.function(row.stock))
except:
if attribute_obj.avg:
b = attribute_obj.avg
else:
logging.info(("No avg for", attribute_obj.name, "set avg to 0"))
b = 0.
if not (type(b) is float):
logging.error("Error: b should always be float")
logging.info(type(b))
logging.info(attribute_obj.name)
logging.info(attribute_obj.avg)
if attribute_obj.minimum or attribute_obj.maximum:
# normalize data
if attribute_obj.minimum and attribute_obj.maximum:
if b < attribute_obj.minimum:
b = attribute_obj.minimum
elif b > attribute_obj.maximum:
b = attribute_obj.maximum
else:
if attribute_obj.minimum:
if b < attribute_obj.minimum:
b = attribute_obj.minimum
else:
if b > attribute_obj.maximum:
b = attribute_obj.maximum
elif b and attribute_obj.name == "Cur Ratio": # strange variation in rms code
if b > 10.:
b = 1.7
elif b > 4.:
b = 4.
elif b and attribute_obj.name == "ROE %"+"Dev": # strange variation in rms code
if b < 0.:
b = 1.5
elif b and attribute_obj.name == "Inv2sales Grwth": # strange variation in rms code
b = -abs(b-1.)
b = round(b, 2)
try:
mu = float(adjusted_data_avg)
except:
mu = None
try:
sigma = float(adjusted_data_std)
except:
sigma = None
if type(b) is float and type(mu) is float and sigma:
z_score = (b-mu)/sigma
z_score = round(z_score, 2)
else:
z_score = None
z_score_cell = Cell(text = z_score)
setattr(row, str(attribute_obj.name) + "__z_score", z_score_cell)
for stock_row in function_globals.row_list:
score = 0.0
for attribute_obj in attribute_list:
weight = attribute_obj.weight
if weight:
try:
z_score_data_cell = getattr(stock_row, attribute_obj.name + "__z_score")
z_score_data = z_score_data_cell.text
if z_score_data is not None:
modified_score_value = z_score_data * weight
if attribute_obj.size_factor:
if attribute_obj.size_factor == "big":
score += modified_score_value
elif attribute_obj.size_factor == "small":
score -= modified_score_value
else:
logging.info("Error: something went wrong here")
z_score_data_cell.text = score
except Exception as e:
logging.info(e)
pass
stock_row.Score.text = score
def find_score_standard_deviations():
score_list = []
for stock_row in function_globals.row_list:
score = stock_row.Score.text
if score is not None:
if score > 1000:
score = 1000
if score < -1000:
score = -1000
score_list.append(score)
score_avg = numpy.average(score_list)
# set degree of freedom to subtract from N to 0 if only one date unit
if len(score_list) > 1:
number_to_minus_degrees_of_freedom = 1
else:
number_to_minus_degrees_of_freedom = 0
score_std = numpy.std(score_list, ddof=number_to_minus_degrees_of_freedom)
if not (isinstance(score_avg, float) and isinstance(score_std, float) ):
logging.info(("score_avg:", score_avg))
logging.info(("score_std:", score_std))
function_globals.score_avg = score_avg
function_globals.score_std = score_std
for attribute_obj in attribute_list:
if attribute_obj.name == "Score":
attribute_obj.avg = score_avg
attribute_obj.std = score_std
def sort_row_list_and_convert_into_cells():
''' this is a complex function,
it sorts by row,
then find each attribute,
then looks for the attribute object,
then sees if the value is outside the bounds of a standard deviation,
then sets object colors
'''
extra_rows = 0
extra_rows += function_globals.default_row_buffer_before_stocks
score_avg = None
score_std = None
# first, get score avg and std
for attribute_obj in attribute_list:
if attribute_obj.name == "Score":
score_avg = attribute_obj.avg
score_std = attribute_obj.std
first_iteration = True
last_sigma_stage = None
function_globals.row_list.sort(key = lambda x: x.Score.text, reverse=True)
for stock_row in function_globals.row_list:
# Now, check if we need a blank row between sigma's
if stock_row.Score.text > (score_avg + (score_std * 3)):
# greater than 3 sigmas
if first_iteration:
first_iteration = False
last_sigma_stage = 3
elif stock_row.Score.text > (score_avg + (score_std * 2)):
# greater than 2 sigmas
if first_iteration:
first_iteration = False
last_sigma_stage = 2
if last_sigma_stage > 2:
last_sigma_stage = 2
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = "3 sigma")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
elif stock_row.Score.text > (score_avg + (score_std * 1)):
# greater than 1 sigma
if first_iteration:
first_iteration = False
last_sigma_stage = 1
if last_sigma_stage > 1:
last_sigma_stage = 1
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = "2 sigma")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
elif stock_row.Score.text > (score_avg + (score_std * 0)):
# greater than avg
if first_iteration:
first_iteration = False
last_sigma_stage = 0
if last_sigma_stage > 0:
last_sigma_stage = 0
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = "1 sigma")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
elif stock_row.Score.text >= (score_avg - (score_std * 1)):
if first_iteration:
first_iteration = False
last_sigma_stage = 0
if last_sigma_stage > -1:
last_sigma_stage = -1
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = " ")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
# this is the average cell
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
avg_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = "Average")
function_globals.all_cells_list.append(avg_cell)
function_globals.avg_row_cell = avg_cell
extra_rows += 1
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = " ")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
elif stock_row.Score.text > (score_avg - (score_std * 2)):
# greater than 1 sigma
if first_iteration:
first_iteration = False
last_sigma_stage = -1
if last_sigma_stage > -2:
last_sigma_stage = -2
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = "-1 sigma")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
elif stock_row.Score.text > (score_avg - (score_std * 3)):
# greater than 2 sigma
if first_iteration:
first_iteration = False
last_sigma_stage = -2
if last_sigma_stage > -3:
last_sigma_stage = -3
empty_row_num = function_globals.row_list.index(stock_row) + extra_rows
empty_cell = Cell(text = " ", col = 0, row = empty_row_num, row_title = "-2 sigma")
function_globals.all_cells_list.append(empty_cell)
extra_rows += 1
row_num = function_globals.row_list.index(stock_row) + extra_rows
for attribute_obj in attribute_list:
data_cell = getattr(stock_row, attribute_obj.name)
if data_cell.text is not None:
data = data_cell.text
background_color = None
if attribute_obj.avg is not None and attribute_obj.std is not None and data is not None:
if attribute_obj.size_factor == "big":
if data > (attribute_obj.avg + attribute_obj.std):
# better than 1 standard deviation -> green!
background_color = "#CCFFCC"
elif data < (attribute_obj.avg - (attribute_obj.std * 2)):
# worse than 2 standard deviations -> red :/
background_color = "#F78181"
elif data < (attribute_obj.avg - attribute_obj.std):
# worse than 1 standard deviation -> orange
background_color = "#FFB499"
elif attribute_obj.size_factor == "small":
if data < (attribute_obj.avg - attribute_obj.std):
# better than 1 standard deviation -> green!
background_color = "#CCFFCC"
elif data > (attribute_obj.avg + (attribute_obj.std * 2)):
# worse than 2 standard deviations -> red :/
background_color = "#F78181"
elif data > (attribute_obj.avg + attribute_obj.std):
# worse than 1 standard deviation -> orange
background_color = "#FFB499"
new_data_cell = Cell(text = data)
new_data_cell.row = row_num
new_data_cell.col = attribute_obj.col
new_data_cell.background_color = background_color
new_data_cell.text_color = data_cell.text_color
new_data_cell.align_right = attribute_obj.align_right
if attribute_obj.display == "2":
new_data_cell.text = "%.2f" % data
elif attribute_obj.display == "%":
try:
data = float(data)
if data.is_integer():
new_data_cell.text = str(int(round(float(data)))) + "%"
else:
new_data_cell.text = ("%.2f" % data) + "%"
except:
new_data_cell.text = str(data) + "%"
elif attribute_obj.display == "$":
try:
new_data_cell.text = config.locale.currency(float(data), grouping = True)
except Exception as e:
logging.info(e)
new_data_cell.text = "$" + str(data)
elif attribute_obj.display == "rnk":
try:
if float(data).is_integer():
new_data_cell.text = str(int(data))
else:
new_data_cell.text = str(data)
except:
new_data_cell.text = str(data)
try:
new_data_cell.row_title = stock_row.stock.ticker
except:
pass
function_globals.all_cells_list.append(new_data_cell)
def create_avg_and_std_cells_for_attributes():
total_rows = 0
for cell in function_globals.all_cells_list:
if cell.row > total_rows:
total_rows = cell.row
avg_row_at_the_end = total_rows + 2
std_row = avg_row_at_the_end + 1
for attribute_obj in attribute_list:
for triple in function_globals.attrbute_name_avg_and_std_triple_list:
if attribute_obj.name == triple[0]:
if attribute_obj.display == "2":
triple[1] = "%.2f" % triple[1]
triple[2] = "%.2f" % triple[2]
elif attribute_obj.display == "%":
try:
triple[1] = float(triple[1])
if triple[1].is_integer():
triple[1] = str(int(round(float(triple[1])))) + "%"
else:
triple[1] = ("%.2f" % triple[1]) + "%"
except:
triple[1] = str(triple[1]) + "%"
try:
triple[2] = float(triple[2])
if triple[2].is_integer():
triple[2] = str(int(round(float(triple[2])))) + "%"
else:
triple[2] = ("%.2f" % triple[2]) + "%"
except:
triple[2] = str(triple[2]) + "%"
elif attribute_obj.display == "$":
try:
triple[1] = config.locale.currency(float(triple[1]), grouping = True)
except Exception as e:
logging.info(e)
triple[1] = "$" + str(triple[1])
try:
triple[2] = config.locale.currency(float(triple[2]), grouping = True)
except Exception as e:
logging.info(e)
triple[2] = "$" + str(triple[2])
elif attribute_obj.display == "rnk":
try:
if float(triple[1]).is_integer():
triple[1] = str(int(triple[1]))
else:
triple[1] = str(triple[1])
except:
triple[1] = str(triple[1])
try:
if float(triple[2]).is_integer():
triple[2] = str(int(triple[2]))
else:
triple[2] = str(round(triple[2], 2))
except:
triple[2] = str(triple[2])
if function_globals.avg_row_cell:
attribute_avg_cell = Cell(text = triple[1],row = function_globals.avg_row_cell.row, col = attribute_obj.col, text_color = "red", align_right = True)
function_globals.all_cells_list.append(attribute_avg_cell)
attribute_avg_cell = Cell(text = triple[1],row = avg_row_at_the_end, col = attribute_obj.col, row_title = "Average", text_color = "red", align_right = True)
function_globals.all_cells_list.append(attribute_avg_cell)
attribute_std_cell = Cell(text = triple[2],row = std_row, col = attribute_obj.col, row_title = "Standard Dev",text_color = "red", align_right = True)
function_globals.all_cells_list.append(attribute_std_cell)
score_cols_list = []
score_display = None
for attribute_obj in attribute_list:
if attribute_obj.name == "Score":
score_cols_list.append(attribute_obj.col)
score_display = attribute_obj.display
for col in score_cols_list:
if function_globals.avg_row_cell:
score_avg_cell = Cell(text = "%.2f" % function_globals.score_avg, row = function_globals.avg_row_cell.row, col = col, text_color = "red", align_right = True)
score_avg_cell_2 = Cell(text = "%.2f" % function_globals.score_avg, row = avg_row_at_the_end, col = col, text_color = "red", align_right = True)
score_std_cell = Cell(text = "%.2f" % function_globals.score_std, row = std_row, col = col, text_color = "red", align_right = True)
function_globals.all_cells_list.append(score_avg_cell)
function_globals.all_cells_list.append(score_avg_cell_2)
function_globals.all_cells_list.append(score_std_cell)
def return_ticker(stock):
return stock.ticker
def return_name(stock):
return stock.firm_name
def return_volume(stock):
return stock#.volume
def return_relevant_portfolios_in_string(stock):
#logging.info(("\n"*10, "--------START---------"))
#logging.info(("ticker:", stock.ticker))
#logging.info((config.PORTFOLIO_OBJECTS_DICT))
#logging.info(("portfolio list:", portfolios))
#logging.info(("portfolio 1", portfolios[0]))
#logging.info(("---------END---------" , "\n"*10))
portfolios_that_contain_stock = []
for portfolio in portfolios:
logging.info(("str(stock.ticker) in [str(x) for x in portfolio.stock_shares_dict.keys()]"))
logging.info((portfolio.name))
logging.info((str(stock.ticker) in [str(x) for x in portfolio.stock_shares_dict.keys()]))
if str(stock.ticker) in [str(x) for x in portfolio.stock_shares_dict.keys()]:
portfolios_that_contain_stock.append(portfolio.name)
portfolios_that_contain_stock.sort()
logging.info(portfolios_that_contain_stock)
if portfolios_that_contain_stock:
string_to_return = ", ".join(portfolios_that_contain_stock)
#string_to_return = "BOOOOOOOOM!"
else:
string_to_return = ""
logging.info(("\n" * 10, "--------START 2---------"))
if string_to_return:
logging.info(("success, string to return is:", string_to_return))
else:
logging.info(("STOCK %s IS NOT IN ANY PORTFOLIOS" % str(stock.ticker)))
logging.info(("---------END 2---------", "\n" * 10))
return string_to_return
# attr = Attribute("name", function, weight, max, min, display, size, col, align_right)
score = Attribute("Score", None, None, None, None, "2", "big", 0, True)
action = Attribute("Action", return_relevant_portfolios_in_string, None, None, None, None, None, 1, False)
ticker = Attribute("Ticker", return_ticker, None, None, None, None, None, 2, False)
price = Attribute("Price", aaii.aaii_price, None, None, None, "$", None, 3, True)
volume = Attribute("AvgDly $K Vol", aaii.aaii_volume, None, None, None, "$", None, 4, True)
neff5h = Attribute("Neff 3YrH +2xYld",aaii.neff_3yr_H_x2yield, 1.0, 10., 0., "2", "big", 5, True)
neffttm = Attribute("Neff TTM H", aaii.neff_TTM_historical, 1.0, 10., 0., "2", "big", 6, True)
neff5f = Attribute("Neff 5 Yr F", aaii.neff_5_Year_future_estimate, 2.0, 10., 0., "2", "big", 7, True)
margin = Attribute("Mrgin %"+"Rnk", aaii.marginPercentRank, 1.0, None, None, "rnk", "big", 8, True)
roe_rank= Attribute("ROE %"+"Rnk", aaii.roePercentRank, 2.0, None, None, "rnk", "big", 9, True)
roe_dev = Attribute("ROE %"+"Dev", aaii.roePercentDev, 0.1, None, None, "%", "small",10, True)
ticker2 = Attribute("Ticker", return_ticker, None, None, None, None, None, 11, False)
p2b_g = Attribute("Prc2Bk Grwth", aaii.price_to_book_growth, 0.1, 2., None, "%", "big", 12, True)
p2r = Attribute("Prc 2Rng", aaii.price_to_range, 0.1, 0.5, None, "2", "big", 13, True)
insiders= Attribute("Insdr %", aaii.percentage_held_by_insiders, 0.1, 20., None, "%", "big", 14, True)
inst = Attribute("NetInst Buy%", aaii.net_institution_buy_percent, 0.1, None, None, "%", "big", 15, True)
current = Attribute("Cur Ratio", aaii.current_ratio, 0.1, None, None, "%", "big", 16, True)
ltd2e = Attribute("LTDbt / Eqty %", aaii.longTermDebtToEquity, 0.1, None, None, "%", "small",17, True)
neffebit= Attribute("Inv2sales Grwth", aaii.invtory2sales, 0.1, None, None, "2", "big", 18, True)
neff3h = Attribute("Neff CF3yrH", aaii.neffCf3Year, 1.0, 10., 0., "2", "big", 19, True)
name = Attribute("Name", return_name, None, None, None, None, None, 20, False)
score2 = Attribute("Score", None, None, None, None, "2", "big", 21, True)
attribute_list = [score, action, ticker, price, volume, neff5h, neffttm,
neff5f, margin, roe_rank, roe_dev, ticker2, p2b_g, p2r, insiders, inst,
current, ltd2e, neffebit, neff3h, name, score2]
top_row()
gen_attribute_list()
create_rows_of_stock_data()
create_data_scores()
find_score_standard_deviations()
sort_row_list_and_convert_into_cells()
create_avg_and_std_cells_for_attributes()
logging.info("Done sorting spreadsheet")
return function_globals.all_cells_list
| [
"numpy.mean",
"numpy.average",
"config.PORTFOLIO_OBJECTS_DICT.values",
"wxStocks_modules.wxStocks_classes.SpreadsheetCell",
"numpy.std",
"logging.info",
"logging.error"
] | [((4836, 4903), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""""""', 'row': 'one_longer_than_row_list', 'col': '(0)', 'row_title': '"""end"""'}), "(text='', row=one_longer_than_row_list, col=0, row_title='end')\n", (4840, 4903), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((5196, 5234), 'config.PORTFOLIO_OBJECTS_DICT.values', 'config.PORTFOLIO_OBJECTS_DICT.values', ([], {}), '()\n', (5232, 5234), False, 'import os, glob, config, logging\n'), ((40285, 40325), 'logging.info', 'logging.info', (['"""Done sorting spreadsheet"""'], {}), "('Done sorting spreadsheet')\n", (40297, 40325), False, 'import os, glob, config, logging\n'), ((7284, 7313), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'row': '(0)', 'col': '(0)', 'text': 'text'}), '(row=0, col=0, text=text)\n', (7288, 7313), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((20053, 20078), 'numpy.average', 'numpy.average', (['score_list'], {}), '(score_list)\n', (20066, 20078), False, 'import numpy\n'), ((20325, 20387), 'numpy.std', 'numpy.std', (['score_list'], {'ddof': 'number_to_minus_degrees_of_freedom'}), '(score_list, ddof=number_to_minus_degrees_of_freedom)\n', (20334, 20387), False, 'import numpy\n'), ((36110, 36153), 'logging.info', 'logging.info', (['portfolios_that_contain_stock'], {}), '(portfolios_that_contain_stock)\n', (36122, 36153), False, 'import os, glob, config, logging\n'), ((36370, 36423), 'logging.info', 'logging.info', (["('\\n' * 10, '--------START 2---------')"], {}), "(('\\n' * 10, '--------START 2---------'))\n", (36382, 36423), False, 'import os, glob, config, logging\n'), ((36637, 36689), 'logging.info', 'logging.info', (["('---------END 2---------', '\\n' * 10)"], {}), "(('---------END 2---------', '\\n' * 10))\n", (36649, 36689), False, 'import os, glob, config, logging\n'), ((7521, 7617), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'row': 'row', 'col': 'attribute_obj.col', 'text': 'attribute_obj.name', 'col_title': 'attribute_obj.name'}), '(row=row, col=attribute_obj.col, text=attribute_obj.name, col_title=\n attribute_obj.name)\n', (7525, 7617), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((20482, 20521), 'logging.info', 'logging.info', (["('score_avg:', score_avg)"], {}), "(('score_avg:', score_avg))\n", (20494, 20521), False, 'import os, glob, config, logging\n'), ((20534, 20573), 'logging.info', 'logging.info', (["('score_std:', score_std)"], {}), "(('score_std:', score_std))\n", (20546, 20573), False, 'import os, glob, config, logging\n'), ((35661, 35760), 'logging.info', 'logging.info', (['"""str(stock.ticker) in [str(x) for x in portfolio.stock_shares_dict.keys()]"""'], {}), "(\n 'str(stock.ticker) in [str(x) for x in portfolio.stock_shares_dict.keys()]'\n )\n", (35673, 35760), False, 'import os, glob, config, logging\n'), ((35765, 35793), 'logging.info', 'logging.info', (['portfolio.name'], {}), '(portfolio.name)\n', (35777, 35793), False, 'import os, glob, config, logging\n'), ((36465, 36530), 'logging.info', 'logging.info', (["('success, string to return is:', string_to_return)"], {}), "(('success, string to return is:', string_to_return))\n", (36477, 36530), False, 'import os, glob, config, logging\n'), ((8147, 8162), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': 'data'}), '(text=data)\n', (8151, 8162), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((10056, 10161), 'numpy.std', 'numpy.std', (['unadjusted_attribute_data_list_valid_values_only'], {'ddof': 'number_to_minus_degrees_of_freedom'}), '(unadjusted_attribute_data_list_valid_values_only, ddof=\n number_to_minus_degrees_of_freedom)\n', (10065, 10161), False, 'import numpy\n'), ((13562, 13602), 'numpy.mean', 'numpy.mean', (['adjusted_attribute_data_list'], {}), '(adjusted_attribute_data_list)\n', (13572, 13602), False, 'import numpy\n'), ((13998, 14083), 'numpy.std', 'numpy.std', (['adjusted_attribute_data_list'], {'ddof': 'number_to_minus_degrees_of_freedom'}), '(adjusted_attribute_data_list, ddof=number_to_minus_degrees_of_freedom\n )\n', (14007, 14083), False, 'import numpy\n'), ((34341, 34476), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': "('%.2f' % function_globals.score_avg)", 'row': 'function_globals.avg_row_cell.row', 'col': 'col', 'text_color': '"""red"""', 'align_right': '(True)'}), "(text='%.2f' % function_globals.score_avg, row=function_globals.\n avg_row_cell.row, col=col, text_color='red', align_right=True)\n", (34345, 34476), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((34517, 34637), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': "('%.2f' % function_globals.score_avg)", 'row': 'avg_row_at_the_end', 'col': 'col', 'text_color': '"""red"""', 'align_right': '(True)'}), "(text='%.2f' % function_globals.score_avg, row=avg_row_at_the_end, col=\n col, text_color='red', align_right=True)\n", (34521, 34637), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((34676, 34784), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': "('%.2f' % function_globals.score_std)", 'row': 'std_row', 'col': 'col', 'text_color': '"""red"""', 'align_right': '(True)'}), "(text='%.2f' % function_globals.score_std, row=std_row, col=col,\n text_color='red', align_right=True)\n", (34680, 34784), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((9563, 9623), 'numpy.mean', 'numpy.mean', (['unadjusted_attribute_data_list_valid_values_only'], {}), '(unadjusted_attribute_data_list_valid_values_only)\n', (9573, 9623), False, 'import numpy\n'), ((10319, 10368), 'logging.info', 'logging.info', (["('unadjusted_avg:', unadjusted_avg)"], {}), "(('unadjusted_avg:', unadjusted_avg))\n", (10331, 10368), False, 'import os, glob, config, logging\n'), ((10389, 10446), 'logging.info', 'logging.info', (["('unadjusted_avg_len:', unadjusted_avg_len)"], {}), "(('unadjusted_avg_len:', unadjusted_avg_len))\n", (10401, 10446), False, 'import os, glob, config, logging\n'), ((10467, 10516), 'logging.info', 'logging.info', (["('unadjusted_std:', unadjusted_std)"], {}), "(('unadjusted_std:', unadjusted_std))\n", (10479, 10516), False, 'import os, glob, config, logging\n'), ((14281, 14331), 'logging.info', 'logging.info', (["('adjusted_avg:', adjusted_data_avg)"], {}), "(('adjusted_avg:', adjusted_data_avg))\n", (14293, 14331), False, 'import os, glob, config, logging\n'), ((14352, 14410), 'logging.info', 'logging.info', (["('adjusted_avg_len:', adjusted_data_avg_len)"], {}), "(('adjusted_avg_len:', adjusted_data_avg_len))\n", (14364, 14410), False, 'import os, glob, config, logging\n'), ((14431, 14481), 'logging.info', 'logging.info', (["('adjusted_std:', adjusted_data_std)"], {}), "(('adjusted_std:', adjusted_data_std))\n", (14443, 14481), False, 'import os, glob, config, logging\n'), ((28349, 28364), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': 'data'}), '(text=data)\n', (28353, 28364), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((18369, 18387), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': 'z_score'}), '(text=z_score)\n', (18373, 18387), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((22494, 22555), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '"""3 sigma"""'}), "(text=' ', col=0, row=empty_row_num, row_title='3 sigma')\n", (22498, 22555), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((33232, 33355), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': 'triple[1]', 'row': 'function_globals.avg_row_cell.row', 'col': 'attribute_obj.col', 'text_color': '"""red"""', 'align_right': '(True)'}), "(text=triple[1], row=function_globals.avg_row_cell.row, col=\n attribute_obj.col, text_color='red', align_right=True)\n", (33236, 33355), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((33488, 33616), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': 'triple[1]', 'row': 'avg_row_at_the_end', 'col': 'attribute_obj.col', 'row_title': '"""Average"""', 'text_color': '"""red"""', 'align_right': '(True)'}), "(text=triple[1], row=avg_row_at_the_end, col=attribute_obj.col,\n row_title='Average', text_color='red', align_right=True)\n", (33492, 33616), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((33752, 33875), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': 'triple[2]', 'row': 'std_row', 'col': 'attribute_obj.col', 'row_title': '"""Standard Dev"""', 'text_color': '"""red"""', 'align_right': '(True)'}), "(text=triple[2], row=std_row, col=attribute_obj.col, row_title=\n 'Standard Dev', text_color='red', align_right=True)\n", (33756, 33875), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((16052, 16100), 'logging.error', 'logging.error', (['"""Error: b should always be float"""'], {}), "('Error: b should always be float')\n", (16065, 16100), False, 'import os, glob, config, logging\n'), ((16179, 16211), 'logging.info', 'logging.info', (['attribute_obj.name'], {}), '(attribute_obj.name)\n', (16191, 16211), False, 'import os, glob, config, logging\n'), ((16240, 16271), 'logging.info', 'logging.info', (['attribute_obj.avg'], {}), '(attribute_obj.avg)\n', (16252, 16271), False, 'import os, glob, config, logging\n'), ((19578, 19593), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (19590, 19593), False, 'import os, glob, config, logging\n'), ((23111, 23172), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '"""2 sigma"""'}), "(text=' ', col=0, row=empty_row_num, row_title='2 sigma')\n", (23115, 23172), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((23724, 23785), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '"""1 sigma"""'}), "(text=' ', col=0, row=empty_row_num, row_title='1 sigma')\n", (23728, 23785), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((15868, 15932), 'logging.info', 'logging.info', (["('No avg for', attribute_obj.name, 'set avg to 0')"], {}), "(('No avg for', attribute_obj.name, 'set avg to 0'))\n", (15880, 15932), False, 'import os, glob, config, logging\n'), ((24305, 24360), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '""" """'}), "(text=' ', col=0, row=empty_row_num, row_title=' ')\n", (24309, 24360), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((24647, 24708), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '"""Average"""'}), "(text=' ', col=0, row=empty_row_num, row_title='Average')\n", (24651, 24708), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((25009, 25064), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '""" """'}), "(text=' ', col=0, row=empty_row_num, row_title=' ')\n", (25013, 25064), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((19399, 19447), 'logging.info', 'logging.info', (['"""Error: something went wrong here"""'], {}), "('Error: something went wrong here')\n", (19411, 19447), False, 'import os, glob, config, logging\n'), ((25623, 25685), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '"""-1 sigma"""'}), "(text=' ', col=0, row=empty_row_num, row_title='-1 sigma')\n", (25627, 25685), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n'), ((29534, 29549), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (29546, 29549), False, 'import os, glob, config, logging\n'), ((32036, 32051), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (32048, 32051), False, 'import os, glob, config, logging\n'), ((32316, 32331), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (32328, 32331), False, 'import os, glob, config, logging\n'), ((26244, 26306), 'wxStocks_modules.wxStocks_classes.SpreadsheetCell', 'Cell', ([], {'text': '""" """', 'col': '(0)', 'row': 'empty_row_num', 'row_title': '"""-2 sigma"""'}), "(text=' ', col=0, row=empty_row_num, row_title='-2 sigma')\n", (26248, 26306), True, 'from wxStocks_modules.wxStocks_classes import SpreadsheetCell as Cell\n')] |
#!/usr/bin/env python
# Packages we're going to use
import math
import numpy as np
# Other pydigree objects
from pydigree.individual import Individual
from pydigree.recombination import recombine
from pydigree.individualcontainer import IndividualContainer
from pydigree.genotypes import ChromosomeSet
from pydigree.simulation.mating import MatingStructure, RandomMating
missing_genotype = (0, 0)
def is_missing_genotype(g):
return g == missing_genotype
# Population growth models.
# These are easy enough to supply your own if you wanted
def exponential_growth(p, r, t):
"""
Models exponential growth over discrete generations.
:param p: initial population
:param r: growth rate
:param t: number of generations
:type p: numeric
:type t: numeric
:type r: numeric
:returns: population size at time t
:rtype: numeric
"""
return p * math.exp(r * t)
def logistic_growth(p, r, k, t):
"""
Models logistic growth over discrete generations.
:param p: initial population
:param r: growth rate
:param k: final population
:param t: number of generations
:returns: population size at time t
:rtype: numeric
"""
return (p * k) / (p + (k - p) * math.exp(-r * t))
# Classes
class Population(IndividualContainer):
# Methods for mapping types
def __init__(self, intial_pop_size=0, name=None):
self.chromosomes = ChromosomeSet()
self.pool = None
self.population = {}
self.n0 = intial_pop_size
self.name = name
def __hash__(self):
return id(self)
def __getitem__(self, key):
return self.population[key]
def __contains__(self, item):
return item in self.population.values()
def __len__(self):
return len(self.population)
def __setitem__(self, key, value):
self.population[key] = value
def __delitem__(self, key):
del self.population[key]
def size(self):
""" Returns the number of individuals in the population. """
return len(self.population)
def remove_ancestry(self):
""" Makes every individual in the population a founder """
for x in self.individuals:
x.remove_ancestry()
# Adding and removing people
#
#
def register_individual(self, ind):
''' Adds an individual to the population '''
if ind.label in self.population:
raise ValueError('ID %s already in population!' % ind.label)
self.population[ind.label] = ind
def remove_individual(self, ind):
''' Removes an individual from the population '''
del self[ind.label]
def add_founders(self, n):
"""
Adds a number of founder individuals to the population
:param n: number of individuals to add
:type n: int
:rtype: void
"""
for _ in range(n):
self.founder_individual(register=True)
def update(self, other):
'''
Merges two datasets (i.e. performs Individual.update for each individual in the pedigree)
Assumes unique individual IDs
:param other: New data to merge in
:type other: Population
:return: void
'''
self.chromosomes = other.chromosomes
self.clear_genotypes()
selfids = {x.label for x in self.individuals}
otherids = {x.label for x in other.individuals}
overlap = set.intersection(selfids, otherids)
if not overlap:
return
for x in overlap:
self.population[x].update(other[x])
def _getindividual(self, label):
return self[label]
@property
def individuals(self):
''' Returns a list of individuals in the population '''
return [x for x in self.population.values()]
# Chromosome functions
#
#
def add_chromosome(self, chrom):
""" Adds a chromosome to the population """
self.chromosomes.add_chromosome(chrom)
def chromosome_count(self):
""" Returns the number of chromosomes in this population """
return len(self.chromosomes)
# Random mating
#
#
def mate(self, ind1, ind2, indlab, sex=None):
"""
Creates an individual as the child of two specificied individual
objects and randomly chooses a sex.
:param ind1: The first parent
:param ind2: The second parent
:type ind1: Individual
:type ind2: Individual
:param indlab: ID label for the child
:param sex: Sex of child, randomly chosen if not specified
:type sex: {0,1}
:return: An individual with ind1 and ind2 as parents
:rtype: Individual
"""
if sex is None:
sex = np.random.choice([0, 1])
child = Individual(self, indlab, ind1, ind2, sex)
return child
def advance_generation(self, gensize, mating=None):
'''
Simulates a generation of random mating.
:param gensize: The size of the new generation
:param mating: MatingScheme for the generation
:type gensize: numeric
:type mating: MatingScheme
'''
if mating is None:
mating = RandomMating()
progeny = mating.next_generation(self, gensize)
self.population = {x.label : x for x in progeny}
def founder_individual(self, register=True, sex=None):
"Creates a new founder individual and adds to the population"
if sex is not None:
sex = sex.lower()
sexd = {'m': 0, 'f': 1, None: np.random.choice([0, 1])}
i = Individual(self, self.size(), None, None, sexd[sex])
if register:
self.register_individual(i)
return i
# Genotype Functions
#
#
def get_founder_genotypes(self):
'''
Gives genotypes to each founder in the population with chromosomes
from the chromosome pool. If there is no pool, genotypes are generated
under linkage equilibrium
'''
for ind in self.individuals:
if not self.pool:
genotypes = self.get_linkage_equilibrium_genotypes()
else:
genotypes = self.pool.get_genotype_set()
ind.genotypes = genotypes
def get_genotypes(self):
'''
Causes each Individual object in the pedigree to request genotypes
from its parents
'''
for x in self.individuals:
x.get_genotypes()
def get_linkage_equilibrium_genotypes(self):
'''
Returns a set of genotypes for an individual in linkage equilibrium
'''
return [[c.linkageequilibrium_chromosome(),
c.linkageequilibrium_chromosome()]
for c in self.chromosomes]
| [
"pydigree.genotypes.ChromosomeSet",
"numpy.random.choice",
"pydigree.individual.Individual",
"pydigree.simulation.mating.RandomMating",
"math.exp"
] | [((896, 911), 'math.exp', 'math.exp', (['(r * t)'], {}), '(r * t)\n', (904, 911), False, 'import math\n'), ((1428, 1443), 'pydigree.genotypes.ChromosomeSet', 'ChromosomeSet', ([], {}), '()\n', (1441, 1443), False, 'from pydigree.genotypes import ChromosomeSet\n'), ((4839, 4880), 'pydigree.individual.Individual', 'Individual', (['self', 'indlab', 'ind1', 'ind2', 'sex'], {}), '(self, indlab, ind1, ind2, sex)\n', (4849, 4880), False, 'from pydigree.individual import Individual\n'), ((4798, 4822), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {}), '([0, 1])\n', (4814, 4822), True, 'import numpy as np\n'), ((5259, 5273), 'pydigree.simulation.mating.RandomMating', 'RandomMating', ([], {}), '()\n', (5271, 5273), False, 'from pydigree.simulation.mating import MatingStructure, RandomMating\n'), ((5617, 5641), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {}), '([0, 1])\n', (5633, 5641), True, 'import numpy as np\n'), ((1245, 1261), 'math.exp', 'math.exp', (['(-r * t)'], {}), '(-r * t)\n', (1253, 1261), False, 'import math\n')] |
# Copyright (c) <NAME> (<EMAIL>).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import numpy as np
import torch
import logging
import collections.abc
def batched_coordinates(coords):
r"""Create a `ME.SparseTensor` coordinates from a sequence of coordinates
Given a list of either numpy or pytorch tensor coordinates, return the
batched coordinates suitable for `ME.SparseTensor`.
Args:
coords (a sequence of `torch.Tensor` or `numpy.ndarray`): a list of coordinates.
Returns:
coords (`torch.IntTensor`): a batched coordinates.
.. warning::
From v0.3, the batch index will be prepended before all coordinates.
"""
assert isinstance(
coords, collections.abc.Sequence), "The coordinates must be a sequence."
assert np.array([cs.ndim == 2 for cs in coords]).all(), \
"All coordinates must be in a 2D array."
D = np.unique(np.array([cs.shape[1] for cs in coords]))
assert len(D) == 1, f"Dimension of the array mismatch. All dimensions: {D}"
D = D[0]
# Create a batched coordinates
N = np.array([len(cs) for cs in coords]).sum()
bcoords = torch.IntTensor(N, D + 1) # uninitialized
# if not BATCH_FIRST:
s = 0
for b, cs in enumerate(coords):
if isinstance(cs, np.ndarray):
cs = torch.from_numpy(np.floor(cs))
else:
cs = cs.floor()
cn = len(cs)
bcoords[s:s + cn, :D] = cs
bcoords[s:s + cn, D] = b
s += cn
return bcoords
def sparse_collate(coords, feats, labels=None, is_double=False):
r"""Create a sparse tensor with batch indices C in `the documentation
<https://stanfordvl.github.io/MinkowskiEngine/sparse_tensor.html>`_.
Convert a set of coordinates and features into the batch coordinates and
batch features.
Args:
coords (set of `torch.Tensor` or `numpy.ndarray`): a set of coordinates.
feats (set of `torch.Tensor` or `numpy.ndarray`): a set of features.
labels (set of `torch.Tensor` or `numpy.ndarray`): a set of labels
associated to the inputs.
is_double (`bool`): return double precision features if True. False by
default.
"""
use_label = False if labels is None else True
coords_batch, feats_batch, labels_batch = [], [], []
batch_id = 0
for coord, feat in zip(coords, feats):
if isinstance(coord, np.ndarray):
coord = torch.from_numpy(coord)
else:
assert isinstance(
coord, torch.Tensor
), "Coords must be of type numpy.ndarray or torch.Tensor"
coord = coord.int()
if isinstance(feat, np.ndarray):
feat = torch.from_numpy(feat)
else:
assert isinstance(
feat, torch.Tensor
), "Features must be of type numpy.ndarray or torch.Tensor"
feat = feat.double() if is_double else feat.float()
# Batched coords
num_points = coord.shape[0]
coords_batch.append(
torch.cat((coord, torch.ones(num_points, 1).int() * batch_id), 1))
# Features
feats_batch.append(feat)
# Labels
if use_label:
label = labels[batch_id]
if isinstance(label, np.ndarray):
label = torch.from_numpy(label)
else:
assert isinstance(
label, torch.Tensor
), "labels must be of type numpy.ndarray or torch.Tensor"
labels_batch.append(label)
batch_id += 1
# Concatenate all lists
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats_batch, 0)
if use_label:
labels_batch = torch.cat(labels_batch, 0)
return coords_batch, feats_batch, labels_batch
else:
return coords_batch, feats_batch
class SparseCollation:
"""Generates collate function for coords, feats, labels.
Args:
limit_numpoints: If 0 or False, does not alter batch size. If positive
integer, limits batch size so that the number of input
coordinates is below limit_numpoints.
"""
def __init__(self, limit_numpoints):
self.limit_numpoints = limit_numpoints
def __call__(self, list_data):
coords, feats, labels = list(zip(*list_data))
coords_batch, feats_batch, labels_batch = [], [], []
batch_id = 0
batch_num_points = 0
for batch_id, _ in enumerate(coords):
num_points = coords[batch_id].shape[0]
batch_num_points += num_points
if self.limit_numpoints > 0 and batch_num_points > self.limit_numpoints:
num_full_points = sum(len(c) for c in coords)
num_full_batch_size = len(coords)
logging.warning(
f'\tCannot fit {num_full_points} points into'
' {self.limit_numpoints} points limit. Truncating batch '
f'size at {batch_id} out of {num_full_batch_size} with '
f'{batch_num_points - num_points}.')
break
coords_batch.append(
torch.cat((torch.from_numpy(coords[batch_id]).int(),
torch.ones(num_points, 1).int() * batch_id), 1))
feats_batch.append(torch.from_numpy(feats[batch_id]))
labels_batch.append(torch.from_numpy(labels[batch_id]))
batch_id += 1
# Concatenate all lists
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats_batch, 0).float()
labels_batch = torch.cat(labels_batch, 0) # arbitrary format
return coords_batch, feats_batch, labels_batch
| [
"torch.ones",
"numpy.floor",
"logging.warning",
"torch.from_numpy",
"numpy.array",
"torch.cat",
"torch.IntTensor"
] | [((2320, 2345), 'torch.IntTensor', 'torch.IntTensor', (['N', '(D + 1)'], {}), '(N, D + 1)\n', (2335, 2345), False, 'import torch\n'), ((4838, 4863), 'torch.cat', 'torch.cat', (['feats_batch', '(0)'], {}), '(feats_batch, 0)\n', (4847, 4863), False, 'import torch\n'), ((2084, 2124), 'numpy.array', 'np.array', (['[cs.shape[1] for cs in coords]'], {}), '([cs.shape[1] for cs in coords])\n', (2092, 2124), True, 'import numpy as np\n'), ((4905, 4931), 'torch.cat', 'torch.cat', (['labels_batch', '(0)'], {}), '(labels_batch, 0)\n', (4914, 4931), False, 'import torch\n'), ((6797, 6823), 'torch.cat', 'torch.cat', (['labels_batch', '(0)'], {}), '(labels_batch, 0)\n', (6806, 6823), False, 'import torch\n'), ((1966, 2009), 'numpy.array', 'np.array', (['[(cs.ndim == 2) for cs in coords]'], {}), '([(cs.ndim == 2) for cs in coords])\n', (1974, 2009), True, 'import numpy as np\n'), ((3617, 3640), 'torch.from_numpy', 'torch.from_numpy', (['coord'], {}), '(coord)\n', (3633, 3640), False, 'import torch\n'), ((3881, 3903), 'torch.from_numpy', 'torch.from_numpy', (['feat'], {}), '(feat)\n', (3897, 3903), False, 'import torch\n'), ((4787, 4813), 'torch.cat', 'torch.cat', (['coords_batch', '(0)'], {}), '(coords_batch, 0)\n', (4796, 4813), False, 'import torch\n'), ((2509, 2521), 'numpy.floor', 'np.floor', (['cs'], {}), '(cs)\n', (2517, 2521), True, 'import numpy as np\n'), ((4486, 4509), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (4502, 4509), False, 'import torch\n'), ((5973, 6187), 'logging.warning', 'logging.warning', (['f"""\tCannot fit {num_full_points} points into {{self.limit_numpoints}} points limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}."""'], {}), "(\n f'\\tCannot fit {num_full_points} points into {{self.limit_numpoints}} points limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.'\n )\n", (5988, 6187), False, 'import logging\n'), ((6499, 6532), 'torch.from_numpy', 'torch.from_numpy', (['feats[batch_id]'], {}), '(feats[batch_id])\n', (6515, 6532), False, 'import torch\n'), ((6566, 6600), 'torch.from_numpy', 'torch.from_numpy', (['labels[batch_id]'], {}), '(labels[batch_id])\n', (6582, 6600), False, 'import torch\n'), ((6685, 6711), 'torch.cat', 'torch.cat', (['coords_batch', '(0)'], {}), '(coords_batch, 0)\n', (6694, 6711), False, 'import torch\n'), ((6740, 6765), 'torch.cat', 'torch.cat', (['feats_batch', '(0)'], {}), '(feats_batch, 0)\n', (6749, 6765), False, 'import torch\n'), ((4237, 4262), 'torch.ones', 'torch.ones', (['num_points', '(1)'], {}), '(num_points, 1)\n', (4247, 4262), False, 'import torch\n'), ((6350, 6384), 'torch.from_numpy', 'torch.from_numpy', (['coords[batch_id]'], {}), '(coords[batch_id])\n', (6366, 6384), False, 'import torch\n'), ((6419, 6444), 'torch.ones', 'torch.ones', (['num_points', '(1)'], {}), '(num_points, 1)\n', (6429, 6444), False, 'import torch\n')] |
from finrl_meta.data_processors.processor_alpaca import AlpacaProcessor as Alpaca
from finrl_meta.data_processors.processor_wrds import WrdsProcessor as Wrds
from finrl_meta.data_processors.processor_yahoofinance import YahooFinanceProcessor as YahooFinance
from finrl_meta.data_processors.processor_binance import BinanceProcessor as Binance
from finrl_meta.data_processors.processor_ricequant import RiceQuantProcessor as RiceQuant
from finrl_meta.data_processors.processor_joinquant import JoinquantProcessor
from finrl_meta.data_processors.processor_tusharepro import TushareProProcessor as Tusharepro
import pandas as pd
import numpy as np
import os
class DataProcessor():
def __init__(self, data_source, **kwargs):
self.data_source = data_source
self.dataframe = pd.DataFrame()
if self.data_source == 'alpaca':
try:
# users should input values: kwargs['API_KEY'], kwargs['API_SECRET'], kwargs['APCA_API_BASE_URL'], kwargs['API']
self.processor = Alpaca(data_source, **kwargs)
print('Alpaca successfully connected')
except:
raise ValueError('Please input correct account info for alpaca!')
elif self.data_source == "joinquant":
try:
# users should input values: kwargs['username'], kwargs['password']
self.processor = JoinquantProcessor(data_source, **kwargs)
print('Joinquant successfully connected')
except:
raise ValueError('Please input correct account info for joinquant!')
elif self.data_source == 'ricequant':
try:
# users should input values: kwargs['username'], kwargs['password']
self.processor = RiceQuant(data_source, **kwargs)
print('Ricequant successfully connected')
except:
raise ValueError('Please input correct account info for ricequant!')
elif self.data_source == 'wrds':
try:
# users should input values: kwargs['if_offline']
self.processor = Wrds(data_source, **kwargs)
print('Wrds successfully connected')
except:
raise ValueError('Please input correct account info for wrds!')
elif self.data_source == 'yahoofinance':
try:
self.processor = YahooFinance(data_source, **kwargs)
print('Yahoofinance successfully connected')
except:
raise ValueError('Please input correct account info for yahoofinance!')
elif self.data_source == 'binance':
try:
self.processor = Binance(data_source, **kwargs)
print('Binance successfully connected')
except:
raise ValueError('Please input correct account info for binance!')
elif self.data_source == "tusharepro":
try:
# users should input values: kwargs['token'], choose to input values: kwargs['adj']
self.processor = Tusharepro(data_source, **kwargs)
print('tusharepro successfully connected')
except:
raise ValueError('Please input correct account info for tusharepro!')
else:
raise ValueError('Data source input is NOT supported yet.')
def download_data(self, ticker_list, start_date, end_date, time_interval):
self.processor.download_data(ticker_list=ticker_list,
start_date=start_date,
end_date=end_date,
time_interval=time_interval)
self.dataframe = self.processor.dataframe
def clean_data(self):
self.processor.dataframe = self.dataframe
self.processor.clean_data()
self.dataframe = self.processor.dataframe
def add_technical_indicator(self, tech_indicator_list):
self.tech_indicator_list = tech_indicator_list
self.processor.add_technical_indicator(tech_indicator_list)
self.dataframe = self.processor.dataframe
def add_turbulence(self):
self.processor.add_turbulence()
self.dataframe = self.processor.dataframe
def add_vix(self):
self.processor.add_vix()
self.dataframe = self.processor.dataframe
def df_to_array(self, if_vix) -> np.array:
price_array, tech_array, turbulence_array = self.processor.df_to_array(self.tech_indicator_list, if_vix)
# fill nan with 0 for technical indicators
tech_nan_positions = np.isnan(tech_array)
tech_array[tech_nan_positions] = 0
return price_array, tech_array, turbulence_array
def run(self, ticker_list, start_date, end_date, time_interval,
technical_indicator_list, if_vix, cache=False):
if time_interval == "1s" and self.data_source != "binance":
raise ValueError("Currently 1s interval data is only supported with 'binance' as data source")
cache_csv = '_'.join(ticker_list + [self.data_source, start_date, end_date, time_interval]) + '.csv'
cache_dir = './cache'
cache_path = os.path.join(cache_dir, cache_csv)
if cache and os.path.isfile(cache_path):
print('Using cached file {}'.format(cache_path))
self.tech_indicator_list = technical_indicator_list
self.dataframe = pd.read_csv(cache_path)
else:
self.download_data(ticker_list, start_date, end_date, time_interval)
self.clean_data()
if cache:
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
self.dataframe.to_csv(cache_path, index=False)
self.add_technical_indicator(technical_indicator_list)
if if_vix:
self.add_vix()
price_array, tech_array, turbulence_array = self.df_to_array(if_vix)
tech_nan_positions = np.isnan(tech_array)
tech_array[tech_nan_positions] = 0
return price_array, tech_array, turbulence_array
def test_joinquant():
path_of_data = "../data"
# TRADE_START_DATE = "2019-09-01"
TRADE_START_DATE = "2020-09-01"
TRADE_END_DATE = "2021-09-11"
READ_DATA_FROM_LOCAL = 0
TIME_INTERVAL = '1D'
TECHNICAL_INDICATOR = ['macd', 'boll_ub', 'boll_lb', 'rsi_30', 'dx_30', 'close_30_sma', 'close_60_sma']
kwargs = {}
kwargs['username'] = "xxx" # should input your username
kwargs['password'] = "<PASSWORD>" # should input your password
p = DataProcessor(data_source='joinquant', **kwargs)
# trade_days = p.calc_trade_days_by_joinquant(TRADE_START_DATE, TRADE_END_DATE)
# stocknames = ["000612.XSHE", "601808.XSHG"]
# data = p.download_data_for_stocks(
# stocknames, trade_days[0], trade_days[-1], READ_DATA_FROM_LOCAL, path_of_data
# )
ticker_list = ["000612.XSHE", "601808.XSHG"]
p.download_data(ticker_list=ticker_list, start_date=TRADE_START_DATE, end_date=TRADE_END_DATE, time_interval=TIME_INTERVAL)
p.clean_data()
p.add_turbulence()
p.add_technical_indicator(TECHNICAL_INDICATOR)
p.add_vix()
price_array, tech_array, turbulence_array = p.run(ticker_list, TRADE_START_DATE, TRADE_END_DATE,
TIME_INTERVAL, TECHNICAL_INDICATOR,
if_vix=False, cache=True)
pass
def test_binance():
DP = DataProcessor('binance')
ticker_list = ['BTCUSDT', 'ETHUSDT', 'ADAUSDT', 'BNBUSDT']
start_date = '2021-09-01'
end_date = '2021-09-20'
time_interval = '5m'
technical_indicator_list = ['macd', 'rsi', 'cci', 'dx'] # self-defined technical indicator list is NOT supported yet
if_vix = False
price_array, tech_array, turbulence_array = DP.run(ticker_list, start_date, end_date,
time_interval, technical_indicator_list,
if_vix, cache=True)
print(price_array.shape, tech_array.shape)
if __name__ == "__main__":
test_joinquant()
# test_binance() | [
"os.path.exists",
"pandas.read_csv",
"finrl_meta.data_processors.processor_joinquant.JoinquantProcessor",
"finrl_meta.data_processors.processor_yahoofinance.YahooFinanceProcessor",
"finrl_meta.data_processors.processor_ricequant.RiceQuantProcessor",
"finrl_meta.data_processors.processor_binance.BinancePro... | [((791, 805), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (803, 805), True, 'import pandas as pd\n'), ((4638, 4658), 'numpy.isnan', 'np.isnan', (['tech_array'], {}), '(tech_array)\n', (4646, 4658), True, 'import numpy as np\n'), ((5226, 5260), 'os.path.join', 'os.path.join', (['cache_dir', 'cache_csv'], {}), '(cache_dir, cache_csv)\n', (5238, 5260), False, 'import os\n'), ((6006, 6026), 'numpy.isnan', 'np.isnan', (['tech_array'], {}), '(tech_array)\n', (6014, 6026), True, 'import numpy as np\n'), ((5283, 5309), 'os.path.isfile', 'os.path.isfile', (['cache_path'], {}), '(cache_path)\n', (5297, 5309), False, 'import os\n'), ((5465, 5488), 'pandas.read_csv', 'pd.read_csv', (['cache_path'], {}), '(cache_path)\n', (5476, 5488), True, 'import pandas as pd\n'), ((1026, 1055), 'finrl_meta.data_processors.processor_alpaca.AlpacaProcessor', 'Alpaca', (['data_source'], {}), '(data_source, **kwargs)\n', (1032, 1055), True, 'from finrl_meta.data_processors.processor_alpaca import AlpacaProcessor as Alpaca\n'), ((1393, 1434), 'finrl_meta.data_processors.processor_joinquant.JoinquantProcessor', 'JoinquantProcessor', (['data_source'], {}), '(data_source, **kwargs)\n', (1411, 1434), False, 'from finrl_meta.data_processors.processor_joinquant import JoinquantProcessor\n'), ((5660, 5685), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (5674, 5685), False, 'import os\n'), ((5707, 5726), 'os.mkdir', 'os.mkdir', (['cache_dir'], {}), '(cache_dir)\n', (5715, 5726), False, 'import os\n'), ((1778, 1810), 'finrl_meta.data_processors.processor_ricequant.RiceQuantProcessor', 'RiceQuant', (['data_source'], {}), '(data_source, **kwargs)\n', (1787, 1810), True, 'from finrl_meta.data_processors.processor_ricequant import RiceQuantProcessor as RiceQuant\n'), ((2131, 2158), 'finrl_meta.data_processors.processor_wrds.WrdsProcessor', 'Wrds', (['data_source'], {}), '(data_source, **kwargs)\n', (2135, 2158), True, 'from finrl_meta.data_processors.processor_wrds import WrdsProcessor as Wrds\n'), ((2411, 2446), 'finrl_meta.data_processors.processor_yahoofinance.YahooFinanceProcessor', 'YahooFinance', (['data_source'], {}), '(data_source, **kwargs)\n', (2423, 2446), True, 'from finrl_meta.data_processors.processor_yahoofinance import YahooFinanceProcessor as YahooFinance\n'), ((2710, 2740), 'finrl_meta.data_processors.processor_binance.BinanceProcessor', 'Binance', (['data_source'], {}), '(data_source, **kwargs)\n', (2717, 2740), True, 'from finrl_meta.data_processors.processor_binance import BinanceProcessor as Binance\n'), ((3097, 3130), 'finrl_meta.data_processors.processor_tusharepro.TushareProProcessor', 'Tusharepro', (['data_source'], {}), '(data_source, **kwargs)\n', (3107, 3130), True, 'from finrl_meta.data_processors.processor_tusharepro import TushareProProcessor as Tusharepro\n')] |
import sys
# sys.path.append("../../")
import joblib
import numpy as np
import os
import argparse
import torch
import torch.nn as nn
from RNNRepair.utils import create_args,get_project_path
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='coverage guided fuzzing for DNN')
# parser.add_argument('-pca', default=10, type=int)
# parser.add_argument('-epoch', default=30, type=int)
#
# parser.add_argument('-components', default=37, type=int)
# parser.add_argument('-path')
#
# parser.add_argument('-model', default='torch_gru_toxic',
# choices=['keras_lstm_mnist', 'torch_gru_imdb', 'torch_gru_toxic', 'torch_lstm_bin', 'torch_gru_sst', ])
args = create_args().parse_args()
save_dir = get_project_path(args.path, args.model )
if args.model == 'keras_lstm_mnist' or args.model == 'keras_gru_mnist':
class_num = 10
else:
class_num =2
pca_dir = os.path.join(save_dir, 'pca_trace')
k = args.pca
epoch = args.epoch
k_path = os.path.join(pca_dir, str(k) + '_' + str(epoch) + '.ptr')
pca_path = os.path.join(pca_dir, str(k) + '_' + str(epoch) + '.pca')
pca = joblib.load(pca_path)
pca_data, softmax, pred_seq_labels, pred_labels, train_labels = joblib.load(k_path)
k_path = os.path.join(pca_dir, str(k) + '_' + str(epoch) + '.tptr')
test_pca_data, test_softmax, test_seq_labels, test_pred_labels, test_truth_labels = joblib.load(k_path)
print('Original RNN Test Acc:', np.sum(test_pred_labels == test_truth_labels)/len(test_truth_labels))
# Correct training indexes
indexes = np.where(pred_labels == train_labels)[0]
test_indexes = np.where(test_pred_labels == test_truth_labels)[0]
label = torch.from_numpy(train_labels[indexes]).long()
test_labels = torch.from_numpy(test_truth_labels).long().to(device)
abst_dir = os.path.join(save_dir, 'abs_model')
log_path = os.path.join(abst_dir, str(args.pca) + '_' + str(args.epoch) +'_acc.log')
plot_file = open(log_path, 'a+')
stnum = args.components
type = ['RL', 'ID', 'ID_RL', 'CS', 'RL_ID_CS']
path = os.path.join(abst_dir, str(args.pca)+'_'+str(args.epoch)+'_'+str(stnum)+'_GMM.ast')
if not os.path.exists(path):
print('Cannot find the abstract model')
exit()
best_model = joblib.load(path)
feature_path = os.path.join(save_dir, 'feature_data',
str(k) + '_' + str(epoch) + '_' + str(stnum) + '_feature.npz')
if not os.path.exists(feature_path):
print('Cannot find the feaures')
exit()
for t in type:
train_trace, test_trace = joblib.load(feature_path)
train_len = len(train_trace)
train_trace.extend(test_trace)
train_trace = [torch.from_numpy(i) for i in train_trace]
total = torch.nn.utils.rnn.pad_sequence(train_trace, batch_first=True)
# Preprocess
total[:, :, 0] = total[:,:, 0] + 1
total[:, :, 1] = total[:, :, 1] + 1
total[:,:,0]/=(stnum+1)
total[:,:,1]/=(class_num+1)
if t == 'RL':
total[:,:,2:]=0
total[:,:,0] = 0
elif t == 'ID':
total[:, :, 1] = 0
total[:, :, 2:] = 0
elif t == 'ID_RL':
total[:, :, 2:] = 0
elif t == 'CS':
total[:, :, 0:2] = 0
else:
print('Keep All')
train_trace = total[0:train_len]
test_trace = total[train_len:]
input = train_trace[indexes].float()
_, y, z = total.shape
class my_m(nn.Module):
def __init__(self):
super(my_m, self).__init__()
self.fc = nn.Linear(y * z, class_num)
def forward(self, x):
if len(x.shape) != 2:
x = x.reshape(x.shape[0], -1)
return self.fc(x)
epochs = 500
model = my_m().to(device)
learning_rate = 0.02
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
label = label.to(device)
data_inx = input.to(device)
print(data_inx.shape)
for idx in range(epochs):
data_out = model(data_inx)
loss = loss_fn(data_out, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, predicted = torch.max(data_out.data, -1)
predicted = predicted.to(device)
with torch.no_grad():
test_inx = test_trace.float().to(device)
test_out = model(test_inx)
_, test_predicted = torch.max(test_out.data, -1)
print(
t,
loss.item(),
(predicted == label).sum().item() / len(label),
(test_labels == test_predicted).sum().item() / len(test_truth_labels)
)
| [
"os.path.exists",
"torch.nn.CrossEntropyLoss",
"RNNRepair.utils.get_project_path",
"numpy.where",
"torch.max",
"os.path.join",
"torch.nn.utils.rnn.pad_sequence",
"torch.from_numpy",
"RNNRepair.utils.create_args",
"numpy.sum",
"torch.cuda.is_available",
"torch.nn.Linear",
"joblib.load",
"to... | [((848, 887), 'RNNRepair.utils.get_project_path', 'get_project_path', (['args.path', 'args.model'], {}), '(args.path, args.model)\n', (864, 887), False, 'from RNNRepair.utils import create_args, get_project_path\n'), ((1035, 1070), 'os.path.join', 'os.path.join', (['save_dir', '"""pca_trace"""'], {}), "(save_dir, 'pca_trace')\n", (1047, 1070), False, 'import os\n'), ((1265, 1286), 'joblib.load', 'joblib.load', (['pca_path'], {}), '(pca_path)\n', (1276, 1286), False, 'import joblib\n'), ((1357, 1376), 'joblib.load', 'joblib.load', (['k_path'], {}), '(k_path)\n', (1368, 1376), False, 'import joblib\n'), ((1538, 1557), 'joblib.load', 'joblib.load', (['k_path'], {}), '(k_path)\n', (1549, 1557), False, 'import joblib\n'), ((1971, 2006), 'os.path.join', 'os.path.join', (['save_dir', '"""abs_model"""'], {}), "(save_dir, 'abs_model')\n", (1983, 2006), False, 'import os\n'), ((2424, 2441), 'joblib.load', 'joblib.load', (['path'], {}), '(path)\n', (2435, 2441), False, 'import joblib\n'), ((225, 250), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (248, 250), False, 'import torch\n'), ((1710, 1747), 'numpy.where', 'np.where', (['(pred_labels == train_labels)'], {}), '(pred_labels == train_labels)\n', (1718, 1747), True, 'import numpy as np\n'), ((1770, 1817), 'numpy.where', 'np.where', (['(test_pred_labels == test_truth_labels)'], {}), '(test_pred_labels == test_truth_labels)\n', (1778, 1817), True, 'import numpy as np\n'), ((2321, 2341), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2335, 2341), False, 'import os\n'), ((2607, 2635), 'os.path.exists', 'os.path.exists', (['feature_path'], {}), '(feature_path)\n', (2621, 2635), False, 'import os\n'), ((2747, 2772), 'joblib.load', 'joblib.load', (['feature_path'], {}), '(feature_path)\n', (2758, 2772), False, 'import joblib\n'), ((2933, 2995), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['train_trace'], {'batch_first': '(True)'}), '(train_trace, batch_first=True)\n', (2964, 2995), False, 'import torch\n'), ((4161, 4182), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4180, 4182), True, 'import torch.nn as nn\n'), ((805, 818), 'RNNRepair.utils.create_args', 'create_args', ([], {}), '()\n', (816, 818), False, 'from RNNRepair.utils import create_args, get_project_path\n'), ((1595, 1640), 'numpy.sum', 'np.sum', (['(test_pred_labels == test_truth_labels)'], {}), '(test_pred_labels == test_truth_labels)\n', (1601, 1640), True, 'import numpy as np\n'), ((1834, 1873), 'torch.from_numpy', 'torch.from_numpy', (['train_labels[indexes]'], {}), '(train_labels[indexes])\n', (1850, 1873), False, 'import torch\n'), ((2874, 2893), 'torch.from_numpy', 'torch.from_numpy', (['i'], {}), '(i)\n', (2890, 2893), False, 'import torch\n'), ((4520, 4548), 'torch.max', 'torch.max', (['data_out.data', '(-1)'], {}), '(data_out.data, -1)\n', (4529, 4548), False, 'import torch\n'), ((4608, 4623), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4621, 4623), False, 'import torch\n'), ((4749, 4777), 'torch.max', 'torch.max', (['test_out.data', '(-1)'], {}), '(test_out.data, -1)\n', (4758, 4777), False, 'import torch\n'), ((3795, 3822), 'torch.nn.Linear', 'nn.Linear', (['(y * z)', 'class_num'], {}), '(y * z, class_num)\n', (3804, 3822), True, 'import torch.nn as nn\n'), ((1901, 1936), 'torch.from_numpy', 'torch.from_numpy', (['test_truth_labels'], {}), '(test_truth_labels)\n', (1917, 1936), False, 'import torch\n')] |
import os, sys
from PIL import Image
import numpy as np
import tensorflow as tf
ROWS = 224
COLS = 224
lr = 0.001
window = 5
#tf.set_random_seed(0)
for infile in sys.argv[1:]:
img = Image.open(infile)
img = img.resize((COLS, ROWS), Image.ANTIALIAS)
img = np.array(img)
# print(img)
# print(img.shape)
images = np.zeros(shape = (1, ROWS, COLS, 3))
# Detect if image is grayscale
if(len(img.shape) == 2):
# Normalize the image
temp = img;
temp = (temp - np.mean(temp, axis = (0,1)))/np.std(temp, axis = (0,1))
# temp = temp/255
# Copy grayscale into each channel seperately
images[0, :, :, 0] = temp
images[0, :, :, 1] = temp
images[0, :, :, 2] = temp
else:
i1 = img[:,:,0]
i1 = (i1 - np.mean(i1, axis = (0, 1)))/np.std(i1, axis = (0, 1))
i2 = img[:,:,1]
i2 = (i2 - np.mean(i2, axis = (0, 1)))/np.std(i2, axis = (0, 1))
i3 = img[:,:,2]
i3 = (i3 - np.mean(i3, axis = (0, 1)))/np.std(i3, axis = (0, 1))
img[:,:,0] = i1
img[:,:,1] = i2
img[:,:,2] = i3
images[0,:,:,:] = img
#print(images)
#print(images.shape)
# Reset Graph
tf.reset_default_graph()
# Create placeholders
X = tf. placeholder(tf.float32, [None, ROWS, COLS, 3])
Y = tf.placeholder(tf.int32,[None])
depth = 10 # The number of classes
Y_onehot = tf.one_hot(Y,depth)
# lr = tf.placeholder(tf.float32)
pkeep = tf.placeholder(tf.float32)
# Specify parameters of the NN model & training
depth_1 = 10 # CNN Layer 1 o/p channels
depth_2 = 8
depth_3 = 6
fc_1 = 200
fc_2 = 100
# Create weights & biases
# Conv Layer 1
W1 = tf.Variable(tf.truncated_normal(shape = [window, window, 3, depth_1], stddev = 0.1))
B1 = tf.Variable(tf.constant(0.1, tf.float32, shape = [depth_1]))
W1_1 = tf.Variable(tf.truncated_normal(shape = [3, 3, depth_1, depth_2], stddev = 0.1))
B1_1 = tf.Variable(tf.constant(0.1, tf.float32, shape = [depth_2]))
W1_2 = tf.Variable(tf.truncated_normal(shape = [3, 3, depth_2, depth_3], stddev = 0.1))
B1_2 = tf.Variable(tf.constant(0.1, tf.float32, shape = [depth_3]))
# FC Layer 1
W2 = tf.Variable(tf.truncated_normal(shape = [28 * 28 * depth_3, fc_1], stddev = 0.1))
B2 = tf.Variable(tf.constant(0.1, tf.float32, shape = [fc_1]))
# FC Layer 2
W3 = tf.Variable(tf.truncated_normal(shape = [fc_1, fc_2], stddev = 0.1))
B3 = tf.Variable(tf.constant(0.1, tf.float32, shape = [fc_2]))
# Output Layer
W4 = tf.Variable(tf.truncated_normal(shape = [fc_2, 10], stddev = 0.1))
B4 = tf.Variable(tf.constant(0.1, tf.float32, shape = [10]))
# Create model
# CNN => 2 Convolutional Layers // 2 Fully Connected Layer
Y1 = tf.nn.conv2d(X, W1, strides = [1,1,1,1], padding = 'SAME') + B1 # Image Size => 224 x 224
Y1_max = tf.nn.max_pool(Y1, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
Y1_out = tf.nn.relu(Y1_max) # Image Size 112 x 112
Y1_1 = tf.nn.conv2d(Y1_out, W1_1, strides = [1,1,1,1], padding = 'SAME') + B1_1 # Image Size => 112 x 112
Y1_1_max = tf.nn.max_pool(Y1_1, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
Y1_1_out = tf.nn.relu(Y1_1_max) # Image Size 56 x 56
Y1_2 = tf.nn.conv2d(Y1_1_out, W1_2, strides = [1,1,1,1], padding = 'SAME') + B1_2 # Image Size => 56 x 56
Y1_2_max = tf.nn.max_pool(Y1_2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
Y1_2_out = tf.nn.relu(Y1_2_max) # Image Size 28 x 28
YY = tf.reshape(Y1_2_out, shape = [-1, 28 * 28 * depth_3])
Y2 = tf.matmul(YY, W2) + B2
Y2_drop = tf.nn.dropout(Y2, keep_prob = pkeep)
Y2_out = tf.nn.relu(Y2_drop)
Y3 = tf.matmul(Y2_out, W3) + B3
Y3_drop = tf.nn.dropout(Y3, keep_prob = pkeep)
Y3_out = tf.nn.relu(Y3_drop)
Y_logits= tf.matmul(Y3_out, W4) + B4
Y_pred = tf.nn.softmax(Y_logits)
cross_entropy = tf.losses.softmax_cross_entropy(Y_onehot, Y_logits)
cross_entropy = tf.reduce_mean(cross_entropy)
#Calculate accuracy for each mini batch
correct_prediction = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y_onehot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#Define optmizer
optimizer = tf.train.AdamOptimizer(lr)
train_step = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess,"model_new/model_9876")
prediction = sess.run([Y_pred], feed_dict = {X: images, Y: np.reshape(1, newshape = (1)), pkeep: 1.0})
#print('prediction', prediction)
pred = np.array(prediction)
category = np.argmax(pred)
#print(category)
if(category == 1):
print('Category : Gossiping')
elif(category == 2):
print('Category : Isolation')
elif(category == 3):
print('Category : Laughing')
elif(category == 4):
print('Category : Pulling Hair')
elif(category == 5):
print('Category : Punching')
elif(category == 6):
print('Category : Quarrel')
elif(category == 7):
print('Category : Slapping')
elif(category == 8):
print('Category : Stabbing')
elif(category == 9):
print('Category : Strangle')
elif(category == 0):
print('Non-bullying') | [
"numpy.array",
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.cast",
"numpy.mean",
"numpy.reshape",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.train.AdamOptimizer",
"tensorflow.one_hot",
"tensorflow.nn.conv2d",
... | [((274, 287), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (282, 287), True, 'import numpy as np\n'), ((332, 366), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, ROWS, COLS, 3)'}), '(shape=(1, ROWS, COLS, 3))\n', (340, 366), True, 'import numpy as np\n'), ((1163, 1187), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1185, 1187), True, 'import tensorflow as tf\n'), ((1218, 1267), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, ROWS, COLS, 3]'], {}), '(tf.float32, [None, ROWS, COLS, 3])\n', (1232, 1267), True, 'import tensorflow as tf\n'), ((1274, 1306), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (1288, 1306), True, 'import tensorflow as tf\n'), ((1354, 1374), 'tensorflow.one_hot', 'tf.one_hot', (['Y', 'depth'], {}), '(Y, depth)\n', (1364, 1374), True, 'import tensorflow as tf\n'), ((1418, 1444), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1432, 1444), True, 'import tensorflow as tf\n'), ((2771, 2847), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['Y1'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(Y1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (2785, 2847), True, 'import tensorflow as tf\n'), ((2858, 2876), 'tensorflow.nn.relu', 'tf.nn.relu', (['Y1_max'], {}), '(Y1_max)\n', (2868, 2876), True, 'import tensorflow as tf\n'), ((3021, 3099), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['Y1_1'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(Y1_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3035, 3099), True, 'import tensorflow as tf\n'), ((3112, 3132), 'tensorflow.nn.relu', 'tf.nn.relu', (['Y1_1_max'], {}), '(Y1_1_max)\n', (3122, 3132), True, 'import tensorflow as tf\n'), ((3275, 3353), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['Y1_2'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(Y1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3289, 3353), True, 'import tensorflow as tf\n'), ((3366, 3386), 'tensorflow.nn.relu', 'tf.nn.relu', (['Y1_2_max'], {}), '(Y1_2_max)\n', (3376, 3386), True, 'import tensorflow as tf\n'), ((3416, 3467), 'tensorflow.reshape', 'tf.reshape', (['Y1_2_out'], {'shape': '[-1, 28 * 28 * depth_3]'}), '(Y1_2_out, shape=[-1, 28 * 28 * depth_3])\n', (3426, 3467), True, 'import tensorflow as tf\n'), ((3510, 3544), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['Y2'], {'keep_prob': 'pkeep'}), '(Y2, keep_prob=pkeep)\n', (3523, 3544), True, 'import tensorflow as tf\n'), ((3557, 3576), 'tensorflow.nn.relu', 'tf.nn.relu', (['Y2_drop'], {}), '(Y2_drop)\n', (3567, 3576), True, 'import tensorflow as tf\n'), ((3623, 3657), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['Y3'], {'keep_prob': 'pkeep'}), '(Y3, keep_prob=pkeep)\n', (3636, 3657), True, 'import tensorflow as tf\n'), ((3670, 3689), 'tensorflow.nn.relu', 'tf.nn.relu', (['Y3_drop'], {}), '(Y3_drop)\n', (3680, 3689), True, 'import tensorflow as tf\n'), ((3742, 3765), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['Y_logits'], {}), '(Y_logits)\n', (3755, 3765), True, 'import tensorflow as tf\n'), ((3785, 3836), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['Y_onehot', 'Y_logits'], {}), '(Y_onehot, Y_logits)\n', (3816, 3836), True, 'import tensorflow as tf\n'), ((3854, 3883), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (3868, 3883), True, 'import tensorflow as tf\n'), ((4105, 4131), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (4127, 4131), True, 'import tensorflow as tf\n'), ((4190, 4223), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4221, 4223), True, 'import tensorflow as tf\n'), ((4233, 4249), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4247, 4249), True, 'import tensorflow as tf\n'), ((196, 214), 'PIL.Image.open', 'Image.open', (['infile'], {}), '(infile)\n', (206, 214), False, 'from PIL import Image\n'), ((1651, 1718), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[window, window, 3, depth_1]', 'stddev': '(0.1)'}), '(shape=[window, window, 3, depth_1], stddev=0.1)\n', (1670, 1718), True, 'import tensorflow as tf\n'), ((1742, 1787), 'tensorflow.constant', 'tf.constant', (['(0.1)', 'tf.float32'], {'shape': '[depth_1]'}), '(0.1, tf.float32, shape=[depth_1])\n', (1753, 1787), True, 'import tensorflow as tf\n'), ((1813, 1876), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[3, 3, depth_1, depth_2]', 'stddev': '(0.1)'}), '(shape=[3, 3, depth_1, depth_2], stddev=0.1)\n', (1832, 1876), True, 'import tensorflow as tf\n'), ((1902, 1947), 'tensorflow.constant', 'tf.constant', (['(0.1)', 'tf.float32'], {'shape': '[depth_2]'}), '(0.1, tf.float32, shape=[depth_2])\n', (1913, 1947), True, 'import tensorflow as tf\n'), ((1973, 2036), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[3, 3, depth_2, depth_3]', 'stddev': '(0.1)'}), '(shape=[3, 3, depth_2, depth_3], stddev=0.1)\n', (1992, 2036), True, 'import tensorflow as tf\n'), ((2062, 2107), 'tensorflow.constant', 'tf.constant', (['(0.1)', 'tf.float32'], {'shape': '[depth_3]'}), '(0.1, tf.float32, shape=[depth_3])\n', (2073, 2107), True, 'import tensorflow as tf\n'), ((2145, 2209), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[28 * 28 * depth_3, fc_1]', 'stddev': '(0.1)'}), '(shape=[28 * 28 * depth_3, fc_1], stddev=0.1)\n', (2164, 2209), True, 'import tensorflow as tf\n'), ((2233, 2275), 'tensorflow.constant', 'tf.constant', (['(0.1)', 'tf.float32'], {'shape': '[fc_1]'}), '(0.1, tf.float32, shape=[fc_1])\n', (2244, 2275), True, 'import tensorflow as tf\n'), ((2313, 2364), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[fc_1, fc_2]', 'stddev': '(0.1)'}), '(shape=[fc_1, fc_2], stddev=0.1)\n', (2332, 2364), True, 'import tensorflow as tf\n'), ((2388, 2430), 'tensorflow.constant', 'tf.constant', (['(0.1)', 'tf.float32'], {'shape': '[fc_2]'}), '(0.1, tf.float32, shape=[fc_2])\n', (2399, 2430), True, 'import tensorflow as tf\n'), ((2470, 2519), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[fc_2, 10]', 'stddev': '(0.1)'}), '(shape=[fc_2, 10], stddev=0.1)\n', (2489, 2519), True, 'import tensorflow as tf\n'), ((2543, 2583), 'tensorflow.constant', 'tf.constant', (['(0.1)', 'tf.float32'], {'shape': '[10]'}), '(0.1, tf.float32, shape=[10])\n', (2554, 2583), True, 'import tensorflow as tf\n'), ((2671, 2728), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['X', 'W1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(X, W1, strides=[1, 1, 1, 1], padding='SAME')\n", (2683, 2728), True, 'import tensorflow as tf\n'), ((2910, 2974), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['Y1_out', 'W1_1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(Y1_out, W1_1, strides=[1, 1, 1, 1], padding='SAME')\n", (2922, 2974), True, 'import tensorflow as tf\n'), ((3164, 3230), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['Y1_1_out', 'W1_2'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(Y1_1_out, W1_2, strides=[1, 1, 1, 1], padding='SAME')\n", (3176, 3230), True, 'import tensorflow as tf\n'), ((3476, 3493), 'tensorflow.matmul', 'tf.matmul', (['YY', 'W2'], {}), '(YY, W2)\n', (3485, 3493), True, 'import tensorflow as tf\n'), ((3585, 3606), 'tensorflow.matmul', 'tf.matmul', (['Y2_out', 'W3'], {}), '(Y2_out, W3)\n', (3594, 3606), True, 'import tensorflow as tf\n'), ((3703, 3724), 'tensorflow.matmul', 'tf.matmul', (['Y3_out', 'W4'], {}), '(Y3_out, W4)\n', (3712, 3724), True, 'import tensorflow as tf\n'), ((3958, 3978), 'tensorflow.argmax', 'tf.argmax', (['Y_pred', '(1)'], {}), '(Y_pred, 1)\n', (3967, 3978), True, 'import tensorflow as tf\n'), ((3980, 4002), 'tensorflow.argmax', 'tf.argmax', (['Y_onehot', '(1)'], {}), '(Y_onehot, 1)\n', (3989, 4002), True, 'import tensorflow as tf\n'), ((4031, 4070), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (4038, 4070), True, 'import tensorflow as tf\n'), ((4258, 4270), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4268, 4270), True, 'import tensorflow as tf\n'), ((4477, 4497), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (4485, 4497), True, 'import numpy as np\n'), ((4512, 4527), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (4521, 4527), True, 'import numpy as np\n'), ((537, 562), 'numpy.std', 'np.std', (['temp'], {'axis': '(0, 1)'}), '(temp, axis=(0, 1))\n', (543, 562), True, 'import numpy as np\n'), ((819, 842), 'numpy.std', 'np.std', (['i1'], {'axis': '(0, 1)'}), '(i1, axis=(0, 1))\n', (825, 842), True, 'import numpy as np\n'), ((906, 929), 'numpy.std', 'np.std', (['i2'], {'axis': '(0, 1)'}), '(i2, axis=(0, 1))\n', (912, 929), True, 'import numpy as np\n'), ((993, 1016), 'numpy.std', 'np.std', (['i3'], {'axis': '(0, 1)'}), '(i3, axis=(0, 1))\n', (999, 1016), True, 'import numpy as np\n'), ((508, 534), 'numpy.mean', 'np.mean', (['temp'], {'axis': '(0, 1)'}), '(temp, axis=(0, 1))\n', (515, 534), True, 'import numpy as np\n'), ((791, 815), 'numpy.mean', 'np.mean', (['i1'], {'axis': '(0, 1)'}), '(i1, axis=(0, 1))\n', (798, 815), True, 'import numpy as np\n'), ((878, 902), 'numpy.mean', 'np.mean', (['i2'], {'axis': '(0, 1)'}), '(i2, axis=(0, 1))\n', (885, 902), True, 'import numpy as np\n'), ((965, 989), 'numpy.mean', 'np.mean', (['i3'], {'axis': '(0, 1)'}), '(i3, axis=(0, 1))\n', (972, 989), True, 'import numpy as np\n'), ((4386, 4411), 'numpy.reshape', 'np.reshape', (['(1)'], {'newshape': '(1)'}), '(1, newshape=1)\n', (4396, 4411), True, 'import numpy as np\n')] |
import numpy as np
import scipy.optimize
from optwrapper import base, utils, lp
class Soln( base.Soln ):
def __init__( self ):
super().__init__()
self.retval = -1
self.message = "Uninitialized Solution"
def getStatus( self ):
return self.message
class Solver( base.Solver ):
def __init__( self, prob=None ):
super().__init__()
self.prob = None
self.warm_start = False
self.options = utils.Options()
self.options[ "method" ] = "simplex"
if( prob ):
self.setupProblem( prob )
def setupProblem( self, prob ):
if( not isinstance( prob, lp.Problem ) ):
raise TypeError( "Argument prob must be an instance of lp.Problem" )
if( not prob.checkSetup() ):
raise ValueError( "Argument 'prob' has not been properly configured" )
self.prob = prob
def solve( self ):
consvar = ( self.prob.lb == self.prob.ub )
c = self.prob.objL[ ~consvar ]
bounds = np.vstack( ( self.prob.lb[ ~consvar ],
self.prob.ub[ ~consvar ] ) ).T
A_ub = None
b_ub = None
A_eq = None
b_eq = None
if( self.prob.Nconslin > 0 ):
conseq = ( self.prob.conslinlb == self.prob.conslinub )
ineqlb = np.logical_and( ~conseq, np.isfinite( self.prob.conslinlb ) )
inequb = np.logical_and( ~conseq, np.isfinite( self.prob.conslinub ) )
if( np.any( ineqlb ) or np.any( inequb ) ):
A_ub = np.vstack( ( self.prob.conslinA[ np.ix_( inequb, ~consvar ) ],
- self.prob.conslinA[ np.ix_( ineqlb, ~consvar ) ] ) )
b_ub = np.hstack( ( self.prob.conslinub[ inequb ],
- self.prob.conslinlb[ ineqlb ] ) )
if( np.any( conseq ) ):
A_eq = self.prob.conslinA[ np.ix_( conseq, ~consvar ) ]
b_eq = self.prob.conslinub[ conseq ]
extraopts = self.options.toDict()
## remove internal options
if( "method" in extraopts ):
del extraopts[ "method" ]
res = scipy.optimize.linprog( c = c,
A_ub = A_ub,
b_ub = b_ub,
A_eq = A_eq,
b_eq = b_eq,
bounds = bounds,
method = self.options[ "method" ].value,
options = extraopts )
self.prob.soln = Soln()
self.prob.soln.final = np.empty( (self.prob.N,) )
self.prob.soln.final[ consvar ] = self.prob.ub[ consvar ]
self.prob.soln.final[ ~consvar ] = np.copy( res.x )
self.prob.soln.retval = res.status
self.prob.soln.message = res.message
self.prob.soln.value = res.fun
| [
"numpy.copy",
"optwrapper.utils.Options",
"numpy.hstack",
"numpy.any",
"numpy.ix_",
"numpy.empty",
"numpy.vstack",
"numpy.isfinite"
] | [((468, 483), 'optwrapper.utils.Options', 'utils.Options', ([], {}), '()\n', (481, 483), False, 'from optwrapper import base, utils, lp\n'), ((2683, 2707), 'numpy.empty', 'np.empty', (['(self.prob.N,)'], {}), '((self.prob.N,))\n', (2691, 2707), True, 'import numpy as np\n'), ((2819, 2833), 'numpy.copy', 'np.copy', (['res.x'], {}), '(res.x)\n', (2826, 2833), True, 'import numpy as np\n'), ((1036, 1095), 'numpy.vstack', 'np.vstack', (['(self.prob.lb[~consvar], self.prob.ub[~consvar])'], {}), '((self.prob.lb[~consvar], self.prob.ub[~consvar]))\n', (1045, 1095), True, 'import numpy as np\n'), ((1877, 1891), 'numpy.any', 'np.any', (['conseq'], {}), '(conseq)\n', (1883, 1891), True, 'import numpy as np\n'), ((1369, 1401), 'numpy.isfinite', 'np.isfinite', (['self.prob.conslinlb'], {}), '(self.prob.conslinlb)\n', (1380, 1401), True, 'import numpy as np\n'), ((1452, 1484), 'numpy.isfinite', 'np.isfinite', (['self.prob.conslinub'], {}), '(self.prob.conslinub)\n', (1463, 1484), True, 'import numpy as np\n'), ((1505, 1519), 'numpy.any', 'np.any', (['ineqlb'], {}), '(ineqlb)\n', (1511, 1519), True, 'import numpy as np\n'), ((1525, 1539), 'numpy.any', 'np.any', (['inequb'], {}), '(inequb)\n', (1531, 1539), True, 'import numpy as np\n'), ((1745, 1815), 'numpy.hstack', 'np.hstack', (['(self.prob.conslinub[inequb], -self.prob.conslinlb[ineqlb])'], {}), '((self.prob.conslinub[inequb], -self.prob.conslinlb[ineqlb]))\n', (1754, 1815), True, 'import numpy as np\n'), ((1940, 1964), 'numpy.ix_', 'np.ix_', (['conseq', '(~consvar)'], {}), '(conseq, ~consvar)\n', (1946, 1964), True, 'import numpy as np\n'), ((1601, 1625), 'numpy.ix_', 'np.ix_', (['inequb', '(~consvar)'], {}), '(inequb, ~consvar)\n', (1607, 1625), True, 'import numpy as np\n'), ((1689, 1713), 'numpy.ix_', 'np.ix_', (['ineqlb', '(~consvar)'], {}), '(ineqlb, ~consvar)\n', (1695, 1713), True, 'import numpy as np\n')] |
# NOTE: contains only one test, _est_cont_fit, that is renamed so that
# the test runner does not run it
# I put this here for the record and for the case when someone wants to
# verify the quality of fit
# with current parameters: relatively small sample size, default starting values
# Ran 84 tests in 401.797s
# FAILED (failures=15)
import numpy as np
from scipy import stats
from .distparams import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values do not differ by too much
n_repl1 = 1000 # sample size for first run
n_repl2 = 5000 # sample size for second run, if first run fails
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
#distcont = [['genextreme', (3.3184017469423535,)]]
def _est_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions do not converge with sample size <= 10000
for distname, arg in distcont:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n_repl1,*arg)
est = distfn.fit(rvs) #,*arg) # start with default values
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
txt = ''
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.any((np.abs(diff) - diffthreshold) > 0.0):
## txt = 'WARNING - diff too large with small sample'
## print 'parameter diff =', diff - diffthreshold, txt
rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])
est = distfn.fit(rvs) #,*arg)
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
if np.any((np.abs(diff) - diffthreshold) > 0.0):
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
if __name__ == "__main__":
import pytest
pytest.main([__file__, '-vvs', '-x', '--pdb'])
| [
"numpy.abs",
"numpy.ones",
"numpy.hstack",
"pytest.main",
"numpy.isnan"
] | [((1376, 1404), 'numpy.hstack', 'np.hstack', (['[arg, [0.0, 1.0]]'], {}), '([arg, [0.0, 1.0]])\n', (1385, 1404), True, 'import numpy as np\n'), ((2513, 2559), 'pytest.main', 'pytest.main', (["[__file__, '-vvs', '-x', '--pdb']"], {}), "([__file__, '-vvs', '-x', '--pdb'])\n", (2524, 2559), False, 'import pytest\n'), ((1687, 1700), 'numpy.isnan', 'np.isnan', (['est'], {}), '(est)\n', (1695, 1700), True, 'import numpy as np\n'), ((2095, 2123), 'numpy.hstack', 'np.hstack', (['[arg, [0.0, 1.0]]'], {}), '([arg, [0.0, 1.0]])\n', (2104, 2123), True, 'import numpy as np\n'), ((1522, 1549), 'numpy.ones', 'np.ones', (['(distfn.numargs + 2)'], {}), '(distfn.numargs + 2)\n', (1529, 1549), True, 'import numpy as np\n'), ((1784, 1796), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (1790, 1796), True, 'import numpy as np\n'), ((2176, 2188), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2182, 2188), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from pandas import DataFrame, Series
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.utils import pandas_sort, pandas_concat
from trackpy.tests.common import StrictTestCase
def random_walk(N):
return np.cumsum(np.random.randn(N))
def conformity(df):
""" Organize toy data to look like real data. Be strict about dtypes:
particle is a float and frame is an integer."""
df['frame'] = df['frame'].astype(np.int)
df['particle'] = df['particle'].astype(np.float)
df['x'] = df['x'].astype(np.float)
df['y'] = df['y'].astype(np.float)
df.set_index('frame', drop=False, inplace=True)
return pandas_sort(df, by=['frame', 'particle'])
def assert_traj_equal(t1, t2):
return assert_frame_equal(conformity(t1), conformity(t2))
def add_drift(df, drift):
df = df.copy()
df['x'] = df['x'].add(drift['x'], fill_value=0)
df['y'] = df['y'].add(drift['y'], fill_value=0)
return df
class TestDrift(StrictTestCase):
def setUp(self):
N = 10
Y = 1
a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.dead_still = conformity(pandas_concat([a, b]))
pandas_sort(self.dead_still, ['frame', 'particle'], inplace=True)
P = 1000 # particles
A = 0.00001 # step amplitude
np.random.seed(0)
particles = [DataFrame({'x': A*random_walk(N), 'y': A*random_walk(N),
'frame': np.arange(N), 'particle': i})
for i in range(P)]
self.many_walks = conformity(pandas_concat(particles))
a = DataFrame({'x': np.arange(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.steppers = conformity(pandas_concat([a, b]))
def test_no_drift(self):
N = 10
expected = DataFrame({'x': np.zeros(N), 'y': np.zeros(N)}).iloc[1:]
expected = expected.astype('float')
expected.index.name = 'frame'
expected.columns = ['x', 'y']
# ^ no drift measured for Frame 0
actual = tp.compute_drift(self.dead_still)
assert_frame_equal(actual, expected[['y', 'x']])
actual_rolling = tp.compute_drift(self.dead_still, smoothing=2)
assert_frame_equal(actual_rolling, expected[['y', 'x']])
# Small random drift
actual = tp.compute_drift(self.many_walks)
assert_frame_equal(actual, expected[['y', 'x']])
def test_constant_drift(self):
N = 10
expected = DataFrame({'x': np.arange(N), 'y': np.zeros(N)}).iloc[1:]
expected = expected.astype('float')
expected.index.name = 'frame'
expected.columns = ['x', 'y']
actual = tp.compute_drift(self.steppers)
assert_frame_equal(actual, expected[['y', 'x']])
def test_subtract_zero_drift(self):
N = 10
drift = DataFrame(np.zeros((N - 1, 2)),
np.arange(1, N, dtype=np.int)).astype('float64')
drift.columns = ['x', 'y']
drift.index.name = 'frame'
actual = tp.subtract_drift(self.dead_still, drift)
assert_traj_equal(actual, self.dead_still)
actual = tp.subtract_drift(self.many_walks, drift)
assert_traj_equal(actual, self.many_walks)
actual = tp.subtract_drift(self.steppers, drift)
assert_traj_equal(actual, self.steppers)
def test_subtract_constant_drift(self):
N = 10
# Add a constant drift here, and then use subtract_drift to
# subtract it.
drift = DataFrame(np.outer(np.arange(N - 1), [1, 1]),
index=np.arange(1, N, dtype=np.int)).astype('float64')
drift.columns = ['x', 'y']
drift.index.name = 'frame'
actual = tp.subtract_drift(add_drift(self.dead_still, drift), drift)
assert_traj_equal(actual, self.dead_still)
actual = tp.subtract_drift(add_drift(self.many_walks, drift), drift)
assert_traj_equal(actual, self.many_walks)
actual = tp.subtract_drift(add_drift(self.steppers, drift), drift)
assert_traj_equal(actual, self.steppers)
class TestMSD(StrictTestCase):
def setUp(self):
N = 10
Y = 1
a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.dead_still = conformity(pandas_concat([a, b]))
P = 50 # particles
A = 1 # step amplitude
np.random.seed(0)
particles = [DataFrame({'x': A*random_walk(N), 'y': A*random_walk(N),
'frame': np.arange(N), 'particle': i})
for i in range(P)]
self.many_walks = conformity(pandas_concat(particles))
a = DataFrame({'x': np.arange(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.steppers = conformity(pandas_concat([a, b]))
def test_zero_emsd(self):
N = 10
actual = tp.emsd(self.dead_still, 1, 1)
expected = Series(np.zeros(N, dtype=np.float),
index=np.arange(N, dtype=np.float)).iloc[1:]
expected.index.name = 'lagt'
expected.name = 'msd'
# HACK: Float64Index imprecision ruins index equality.
# Test them separately. If that works, make them exactly the same.
assert_almost_equal(actual.index.values, expected.index.values)
actual.index = expected.index
assert_series_equal(actual, expected)
def test_linear_emsd(self):
A = 1
EARLY = 7 # only early lag times have good stats
actual = tp.emsd(self.many_walks, 1, 1, max_lagtime=EARLY)
a = np.arange(EARLY+1, dtype='float64')
expected = Series(2*A*a, index=a).iloc[1:]
expected.name = 'msd'
expected.index.name = 'lagt'
# HACK: Float64Index imprecision ruins index equality.
# Test them separately. If that works, make them exactly the same.
assert_almost_equal(actual.index.values, expected.index.values)
actual.index = expected.index
assert_series_equal(np.round(actual), expected)
def test_linear_emsd_gaps(self):
A = 1
EARLY = 4 # only early lag times have good stats
gapped_walks = self.many_walks.reset_index(drop=True)
to_drop = np.random.choice(gapped_walks.index,
int(len(gapped_walks) * 0.1), replace=False)
gapped_walks = gapped_walks.drop(to_drop, axis=0)
actual = tp.emsd(gapped_walks, 1, 1, max_lagtime=EARLY)
a = np.arange(EARLY+1, dtype='float64')
expected = Series(2*A*a, index=a).iloc[1:]
expected.name = 'msd'
expected.index.name = 'lagt'
# HACK: Float64Index imprecision ruins index equality.
# Test them separately. If that works, make them exactly the same.
assert_almost_equal(actual.index.values, expected.index.values)
actual.index = expected.index
assert_series_equal(np.round(actual), expected)
class TestSpecial(StrictTestCase):
def setUp(self):
N = 10
Y = 1
a = DataFrame({'x': np.arange(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.steppers = conformity(pandas_concat([a, b]))
def test_theta_entropy(self):
# just a smoke test
theta_entropy = lambda x: tp.motion.theta_entropy(x, plot=False)
self.steppers.groupby('particle').apply(theta_entropy)
if __name__ == '__main__':
import unittest
unittest.main()
| [
"pandas.Series",
"pandas.util.testing.assert_almost_equal",
"trackpy.subtract_drift",
"numpy.ones",
"trackpy.utils.pandas_sort",
"numpy.round",
"trackpy.motion.theta_entropy",
"trackpy.emsd",
"trackpy.utils.pandas_concat",
"numpy.zeros",
"numpy.random.seed",
"trackpy.compute_drift",
"unittes... | [((877, 918), 'trackpy.utils.pandas_sort', 'pandas_sort', (['df'], {'by': "['frame', 'particle']"}), "(df, by=['frame', 'particle'])\n", (888, 918), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((8602, 8617), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8615, 8617), False, 'import unittest\n'), ((470, 488), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (485, 488), True, 'import numpy as np\n'), ((1612, 1677), 'trackpy.utils.pandas_sort', 'pandas_sort', (['self.dead_still', "['frame', 'particle']"], {'inplace': '(True)'}), "(self.dead_still, ['frame', 'particle'], inplace=True)\n", (1623, 1677), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((1753, 1770), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1767, 1770), True, 'import numpy as np\n'), ((2664, 2697), 'trackpy.compute_drift', 'tp.compute_drift', (['self.dead_still'], {}), '(self.dead_still)\n', (2680, 2697), True, 'import trackpy as tp\n'), ((2706, 2754), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', "expected[['y', 'x']]"], {}), "(actual, expected[['y', 'x']])\n", (2724, 2754), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((2781, 2827), 'trackpy.compute_drift', 'tp.compute_drift', (['self.dead_still'], {'smoothing': '(2)'}), '(self.dead_still, smoothing=2)\n', (2797, 2827), True, 'import trackpy as tp\n'), ((2836, 2892), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_rolling', "expected[['y', 'x']]"], {}), "(actual_rolling, expected[['y', 'x']])\n", (2854, 2892), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((2940, 2973), 'trackpy.compute_drift', 'tp.compute_drift', (['self.many_walks'], {}), '(self.many_walks)\n', (2956, 2973), True, 'import trackpy as tp\n'), ((2982, 3030), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', "expected[['y', 'x']]"], {}), "(actual, expected[['y', 'x']])\n", (3000, 3030), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((3297, 3328), 'trackpy.compute_drift', 'tp.compute_drift', (['self.steppers'], {}), '(self.steppers)\n', (3313, 3328), True, 'import trackpy as tp\n'), ((3337, 3385), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', "expected[['y', 'x']]"], {}), "(actual, expected[['y', 'x']])\n", (3355, 3385), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((3652, 3693), 'trackpy.subtract_drift', 'tp.subtract_drift', (['self.dead_still', 'drift'], {}), '(self.dead_still, drift)\n', (3669, 3693), True, 'import trackpy as tp\n'), ((3762, 3803), 'trackpy.subtract_drift', 'tp.subtract_drift', (['self.many_walks', 'drift'], {}), '(self.many_walks, drift)\n', (3779, 3803), True, 'import trackpy as tp\n'), ((3872, 3911), 'trackpy.subtract_drift', 'tp.subtract_drift', (['self.steppers', 'drift'], {}), '(self.steppers, drift)\n', (3889, 3911), True, 'import trackpy as tp\n'), ((5194, 5211), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5208, 5211), True, 'import numpy as np\n'), ((5866, 5896), 'trackpy.emsd', 'tp.emsd', (['self.dead_still', '(1)', '(1)'], {}), '(self.dead_still, 1, 1)\n', (5873, 5896), True, 'import trackpy as tp\n'), ((6236, 6299), 'pandas.util.testing.assert_almost_equal', 'assert_almost_equal', (['actual.index.values', 'expected.index.values'], {}), '(actual.index.values, expected.index.values)\n', (6255, 6299), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((6346, 6383), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (6365, 6383), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((6505, 6554), 'trackpy.emsd', 'tp.emsd', (['self.many_walks', '(1)', '(1)'], {'max_lagtime': 'EARLY'}), '(self.many_walks, 1, 1, max_lagtime=EARLY)\n', (6512, 6554), True, 'import trackpy as tp\n'), ((6567, 6604), 'numpy.arange', 'np.arange', (['(EARLY + 1)'], {'dtype': '"""float64"""'}), "(EARLY + 1, dtype='float64')\n", (6576, 6604), True, 'import numpy as np\n'), ((6867, 6930), 'pandas.util.testing.assert_almost_equal', 'assert_almost_equal', (['actual.index.values', 'expected.index.values'], {}), '(actual.index.values, expected.index.values)\n', (6886, 6930), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((7408, 7454), 'trackpy.emsd', 'tp.emsd', (['gapped_walks', '(1)', '(1)'], {'max_lagtime': 'EARLY'}), '(gapped_walks, 1, 1, max_lagtime=EARLY)\n', (7415, 7454), True, 'import trackpy as tp\n'), ((7467, 7504), 'numpy.arange', 'np.arange', (['(EARLY + 1)'], {'dtype': '"""float64"""'}), "(EARLY + 1, dtype='float64')\n", (7476, 7504), True, 'import numpy as np\n'), ((7767, 7830), 'pandas.util.testing.assert_almost_equal', 'assert_almost_equal', (['actual.index.values', 'expected.index.values'], {}), '(actual.index.values, expected.index.values)\n', (7786, 7830), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((1581, 1602), 'trackpy.utils.pandas_concat', 'pandas_concat', (['[a, b]'], {}), '([a, b])\n', (1594, 1602), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((1997, 2021), 'trackpy.utils.pandas_concat', 'pandas_concat', (['particles'], {}), '(particles)\n', (2010, 2021), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((2340, 2361), 'trackpy.utils.pandas_concat', 'pandas_concat', (['[a, b]'], {}), '([a, b])\n', (2353, 2361), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((5104, 5125), 'trackpy.utils.pandas_concat', 'pandas_concat', (['[a, b]'], {}), '([a, b])\n', (5117, 5125), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((5438, 5462), 'trackpy.utils.pandas_concat', 'pandas_concat', (['particles'], {}), '(particles)\n', (5451, 5462), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((5780, 5801), 'trackpy.utils.pandas_concat', 'pandas_concat', (['[a, b]'], {}), '([a, b])\n', (5793, 5801), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((6997, 7013), 'numpy.round', 'np.round', (['actual'], {}), '(actual)\n', (7005, 7013), True, 'import numpy as np\n'), ((7897, 7913), 'numpy.round', 'np.round', (['actual'], {}), '(actual)\n', (7905, 7913), True, 'import numpy as np\n'), ((8327, 8348), 'trackpy.utils.pandas_concat', 'pandas_concat', (['[a, b]'], {}), '([a, b])\n', (8340, 8348), False, 'from trackpy.utils import pandas_sort, pandas_concat\n'), ((8447, 8485), 'trackpy.motion.theta_entropy', 'tp.motion.theta_entropy', (['x'], {'plot': '(False)'}), '(x, plot=False)\n', (8470, 8485), True, 'import trackpy as tp\n'), ((1292, 1303), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1300, 1303), True, 'import numpy as np\n'), ((1310, 1321), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1318, 1321), True, 'import numpy as np\n'), ((1355, 1367), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1364, 1367), True, 'import numpy as np\n'), ((1381, 1392), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1389, 1392), True, 'import numpy as np\n'), ((1423, 1438), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (1431, 1438), True, 'import numpy as np\n'), ((1498, 1513), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (1507, 1513), True, 'import numpy as np\n'), ((1527, 1541), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (1534, 1541), True, 'import numpy as np\n'), ((2052, 2064), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2061, 2064), True, 'import numpy as np\n'), ((2071, 2082), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2079, 2082), True, 'import numpy as np\n'), ((2116, 2128), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2125, 2128), True, 'import numpy as np\n'), ((2142, 2153), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2150, 2153), True, 'import numpy as np\n'), ((2184, 2199), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (2193, 2199), True, 'import numpy as np\n'), ((2259, 2274), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (2268, 2274), True, 'import numpy as np\n'), ((2288, 2302), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (2295, 2302), True, 'import numpy as np\n'), ((4816, 4827), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4824, 4827), True, 'import numpy as np\n'), ((4834, 4845), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4842, 4845), True, 'import numpy as np\n'), ((4878, 4890), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4887, 4890), True, 'import numpy as np\n'), ((4904, 4915), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4912, 4915), True, 'import numpy as np\n'), ((4946, 4961), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (4954, 4961), True, 'import numpy as np\n'), ((5021, 5036), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (5030, 5036), True, 'import numpy as np\n'), ((5050, 5064), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (5057, 5064), True, 'import numpy as np\n'), ((5493, 5505), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5502, 5505), True, 'import numpy as np\n'), ((5512, 5523), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (5520, 5523), True, 'import numpy as np\n'), ((5556, 5568), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5565, 5568), True, 'import numpy as np\n'), ((5582, 5593), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (5590, 5593), True, 'import numpy as np\n'), ((5624, 5639), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (5633, 5639), True, 'import numpy as np\n'), ((5699, 5714), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (5708, 5714), True, 'import numpy as np\n'), ((5728, 5742), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (5735, 5742), True, 'import numpy as np\n'), ((6622, 6648), 'pandas.Series', 'Series', (['(2 * A * a)'], {'index': 'a'}), '(2 * A * a, index=a)\n', (6628, 6648), False, 'from pandas import DataFrame, Series\n'), ((7522, 7548), 'pandas.Series', 'Series', (['(2 * A * a)'], {'index': 'a'}), '(2 * A * a, index=a)\n', (7528, 7548), False, 'from pandas import DataFrame, Series\n'), ((8040, 8052), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (8049, 8052), True, 'import numpy as np\n'), ((8059, 8070), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (8067, 8070), True, 'import numpy as np\n'), ((8103, 8115), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (8112, 8115), True, 'import numpy as np\n'), ((8129, 8140), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (8137, 8140), True, 'import numpy as np\n'), ((8171, 8186), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (8180, 8186), True, 'import numpy as np\n'), ((8246, 8261), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (8255, 8261), True, 'import numpy as np\n'), ((8275, 8289), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (8282, 8289), True, 'import numpy as np\n'), ((1449, 1464), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (1457, 1464), True, 'import numpy as np\n'), ((1890, 1902), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1899, 1902), True, 'import numpy as np\n'), ((2210, 2225), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (2218, 2225), True, 'import numpy as np\n'), ((3468, 3488), 'numpy.zeros', 'np.zeros', (['(N - 1, 2)'], {}), '((N - 1, 2))\n', (3476, 3488), True, 'import numpy as np\n'), ((3516, 3545), 'numpy.arange', 'np.arange', (['(1)', 'N'], {'dtype': 'np.int'}), '(1, N, dtype=np.int)\n', (3525, 3545), True, 'import numpy as np\n'), ((4972, 4987), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (4980, 4987), True, 'import numpy as np\n'), ((5331, 5343), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5340, 5343), True, 'import numpy as np\n'), ((5650, 5665), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (5658, 5665), True, 'import numpy as np\n'), ((5923, 5950), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'np.float'}), '(N, dtype=np.float)\n', (5931, 5950), True, 'import numpy as np\n'), ((8197, 8212), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (8205, 8212), True, 'import numpy as np\n'), ((2443, 2454), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2451, 2454), True, 'import numpy as np\n'), ((2461, 2472), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2469, 2472), True, 'import numpy as np\n'), ((3117, 3129), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3126, 3129), True, 'import numpy as np\n'), ((3136, 3147), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3144, 3147), True, 'import numpy as np\n'), ((4147, 4163), 'numpy.arange', 'np.arange', (['(N - 1)'], {}), '(N - 1)\n', (4156, 4163), True, 'import numpy as np\n'), ((4206, 4235), 'numpy.arange', 'np.arange', (['(1)', 'N'], {'dtype': 'np.int'}), '(1, N, dtype=np.int)\n', (4215, 4235), True, 'import numpy as np\n'), ((5984, 6012), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'np.float'}), '(N, dtype=np.float)\n', (5993, 6012), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dialog_state_tracking."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
from uncertainty_baselines.datasets import dialog_state_tracking
max_dial_len = dialog_state_tracking.MAX_DIALOG_LEN
max_utt_len = dialog_state_tracking.MAX_UTT_LEN
class DialogStateTrackingTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('Train', tfds.Split.TRAIN, dialog_state_tracking.NUM_TRAIN),
('Test', tfds.Split.TEST, dialog_state_tracking.NUM_TEST))
def testDatasetSize(self, split, expected_size):
dataset_builder = ub.datasets.SimDialDataset(
split=split, shuffle_buffer_size=20)
self.assertEqual(dataset_builder.num_examples, expected_size)
@parameterized.named_parameters(('Train', tfds.Split.TRAIN),
('Test', tfds.Split.TEST))
def testDatasetShape(self, split):
batch_size = 9 if split == tfds.Split.TRAIN else 5
dataset_builder = ub.datasets.SimDialDataset(
split=split, shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
features_usr = element['usr_utt']
features_sys = element['sys_utt']
labels = element['label']
dialog_len = element['dialog_len']
features_usr_shape = features_usr.shape
features_sys_shape = features_sys.shape
labels_shape = labels.shape
dialog_len_shape = dialog_len.shape
self.assertEqual(features_usr_shape,
(batch_size, max_dial_len, max_utt_len))
self.assertEqual(features_sys_shape,
(batch_size, max_dial_len, max_utt_len))
self.assertEqual(labels_shape, (batch_size, max_dial_len))
self.assertEqual(dialog_len_shape, (batch_size,))
@parameterized.named_parameters(('Train', tfds.Split.TRAIN),
('Test', tfds.Split.TEST))
def testDialogLength(self, split):
"""Checks dialog length matches with that in dialog_len."""
batch_size = 9 if split == tfds.Split.TRAIN else 5
dataset_builder = ub.datasets.SimDialDataset(
split=split, shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
features_usr = element['usr_utt'].numpy()
features_sys = element['sys_utt'].numpy()
labels = element['label'].numpy()
dialog_len = element['dialog_len'].numpy()
# Compute dialog length based on user utterances.
utter_len_usr = np.sum(features_usr > 0, axis=-1)
dialog_len_usr = np.sum(utter_len_usr > 0, axis=-1)
# Compute dialog length based on system utterances.
utter_len_sys = np.sum(features_sys > 0, axis=-1)
dialog_len_sys = np.sum(utter_len_sys > 0, axis=-1)
# Compute dialog length based on state labels.
dialog_len_label = np.sum(labels > 0, axis=-1)
np.testing.assert_array_equal(dialog_len_usr, dialog_len)
np.testing.assert_array_equal(dialog_len_sys, dialog_len)
np.testing.assert_array_equal(dialog_len_label, dialog_len)
def testVocab(self):
"""Tests if vocab is loaded correctly."""
dataset_builder = ub.datasets.SimDialDataset(
split=tfds.Split.TRAIN, shuffle_buffer_size=20)
vocab_dict_utter = dataset_builder.vocab_utter
vocab_dict_label = dataset_builder.vocab_label
self.assertLen(vocab_dict_utter, dialog_state_tracking.VOCAB_SIZE_UTT)
self.assertLen(vocab_dict_label, dialog_state_tracking.VOCAB_SIZE_LABEL)
@parameterized.named_parameters(('Train', tfds.Split.TRAIN),
('Test', tfds.Split.TEST))
def testDatasetSpec(self, split):
"""Tests if dataset specification returns valid tensor shapes."""
batch_size = 9
dataset_builder = ub.datasets.SimDialDataset(
split=split, shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size)
dataset_spec = tf.data.DatasetSpec.from_value(dataset).element_spec
# Specify expected shape.
utt_spec = tf.TensorSpec((batch_size, max_dial_len, max_utt_len),
dtype=tf.int32)
label_spec = tf.TensorSpec((batch_size, max_dial_len), dtype=tf.int32)
self.assertEqual(dataset_spec['sys_utt'], utt_spec)
self.assertEqual(dataset_spec['usr_utt'], utt_spec)
self.assertEqual(dataset_spec['label'], label_spec)
if __name__ == '__main__':
tf.test.main()
| [
"uncertainty_baselines.datasets.SimDialDataset",
"absl.testing.parameterized.named_parameters",
"tensorflow.test.main",
"tensorflow.TensorSpec",
"numpy.sum",
"tensorflow.data.DatasetSpec.from_value",
"numpy.testing.assert_array_equal"
] | [((1051, 1210), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Train', tfds.Split.TRAIN, dialog_state_tracking.NUM_TRAIN)", "('Test', tfds.Split.TEST, dialog_state_tracking.NUM_TEST)"], {}), "(('Train', tfds.Split.TRAIN,\n dialog_state_tracking.NUM_TRAIN), ('Test', tfds.Split.TEST,\n dialog_state_tracking.NUM_TEST))\n", (1081, 1210), False, 'from absl.testing import parameterized\n'), ((1432, 1523), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Train', tfds.Split.TRAIN)", "('Test', tfds.Split.TEST)"], {}), "(('Train', tfds.Split.TRAIN), ('Test', tfds.\n Split.TEST))\n", (1462, 1523), False, 'from absl.testing import parameterized\n'), ((2475, 2566), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Train', tfds.Split.TRAIN)", "('Test', tfds.Split.TEST)"], {}), "(('Train', tfds.Split.TRAIN), ('Test', tfds.\n Split.TEST))\n", (2505, 2566), False, 'from absl.testing import parameterized\n'), ((4185, 4276), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Train', tfds.Split.TRAIN)", "('Test', tfds.Split.TEST)"], {}), "(('Train', tfds.Split.TRAIN), ('Test', tfds.\n Split.TEST))\n", (4215, 4276), False, 'from absl.testing import parameterized\n'), ((5077, 5091), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (5089, 5091), True, 'import tensorflow as tf\n'), ((1289, 1352), 'uncertainty_baselines.datasets.SimDialDataset', 'ub.datasets.SimDialDataset', ([], {'split': 'split', 'shuffle_buffer_size': '(20)'}), '(split=split, shuffle_buffer_size=20)\n', (1315, 1352), True, 'import uncertainty_baselines as ub\n'), ((1667, 1730), 'uncertainty_baselines.datasets.SimDialDataset', 'ub.datasets.SimDialDataset', ([], {'split': 'split', 'shuffle_buffer_size': '(20)'}), '(split=split, shuffle_buffer_size=20)\n', (1693, 1730), True, 'import uncertainty_baselines as ub\n'), ((2774, 2837), 'uncertainty_baselines.datasets.SimDialDataset', 'ub.datasets.SimDialDataset', ([], {'split': 'split', 'shuffle_buffer_size': '(20)'}), '(split=split, shuffle_buffer_size=20)\n', (2800, 2837), True, 'import uncertainty_baselines as ub\n'), ((3200, 3233), 'numpy.sum', 'np.sum', (['(features_usr > 0)'], {'axis': '(-1)'}), '(features_usr > 0, axis=-1)\n', (3206, 3233), True, 'import numpy as np\n'), ((3255, 3289), 'numpy.sum', 'np.sum', (['(utter_len_usr > 0)'], {'axis': '(-1)'}), '(utter_len_usr > 0, axis=-1)\n', (3261, 3289), True, 'import numpy as np\n'), ((3367, 3400), 'numpy.sum', 'np.sum', (['(features_sys > 0)'], {'axis': '(-1)'}), '(features_sys > 0, axis=-1)\n', (3373, 3400), True, 'import numpy as np\n'), ((3422, 3456), 'numpy.sum', 'np.sum', (['(utter_len_sys > 0)'], {'axis': '(-1)'}), '(utter_len_sys > 0, axis=-1)\n', (3428, 3456), True, 'import numpy as np\n'), ((3532, 3559), 'numpy.sum', 'np.sum', (['(labels > 0)'], {'axis': '(-1)'}), '(labels > 0, axis=-1)\n', (3538, 3559), True, 'import numpy as np\n'), ((3565, 3622), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dialog_len_usr', 'dialog_len'], {}), '(dialog_len_usr, dialog_len)\n', (3594, 3622), True, 'import numpy as np\n'), ((3627, 3684), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dialog_len_sys', 'dialog_len'], {}), '(dialog_len_sys, dialog_len)\n', (3656, 3684), True, 'import numpy as np\n'), ((3689, 3748), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dialog_len_label', 'dialog_len'], {}), '(dialog_len_label, dialog_len)\n', (3718, 3748), True, 'import numpy as np\n'), ((3841, 3915), 'uncertainty_baselines.datasets.SimDialDataset', 'ub.datasets.SimDialDataset', ([], {'split': 'tfds.Split.TRAIN', 'shuffle_buffer_size': '(20)'}), '(split=tfds.Split.TRAIN, shuffle_buffer_size=20)\n', (3867, 3915), True, 'import uncertainty_baselines as ub\n'), ((4453, 4516), 'uncertainty_baselines.datasets.SimDialDataset', 'ub.datasets.SimDialDataset', ([], {'split': 'split', 'shuffle_buffer_size': '(20)'}), '(split=split, shuffle_buffer_size=20)\n', (4479, 4516), True, 'import uncertainty_baselines as ub\n'), ((4702, 4772), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(batch_size, max_dial_len, max_utt_len)'], {'dtype': 'tf.int32'}), '((batch_size, max_dial_len, max_utt_len), dtype=tf.int32)\n', (4715, 4772), True, 'import tensorflow as tf\n'), ((4819, 4876), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(batch_size, max_dial_len)'], {'dtype': 'tf.int32'}), '((batch_size, max_dial_len), dtype=tf.int32)\n', (4832, 4876), True, 'import tensorflow as tf\n'), ((4603, 4642), 'tensorflow.data.DatasetSpec.from_value', 'tf.data.DatasetSpec.from_value', (['dataset'], {}), '(dataset)\n', (4633, 4642), True, 'import tensorflow as tf\n')] |
import numpy as np
from ..element_h1 import ElementH1
from ...mesh.mesh2d import MeshTri
class ElementTriP0(ElementH1):
interior_dofs = 1
dim = 2
maxdeg = 0
dofnames = ['u']
doflocs = np.array([[.5, .5]])
mesh_type = MeshTri
def lbasis(self, X, i):
return 1. + 0. * X[0], 0. * X
| [
"numpy.array"
] | [((207, 229), 'numpy.array', 'np.array', (['[[0.5, 0.5]]'], {}), '([[0.5, 0.5]])\n', (215, 229), True, 'import numpy as np\n')] |
from __future__ import print_function
from timeit import default_timer as time
import sys
import numpy as np
import numba_dppy, numba_dppy as dppl
import dpctl
from numba_dppy.testing import unittest
from numba_dppy.testing import DPPLTestCase
def data_parallel_sum(a, b, c):
i = dppl.get_global_id(0)
c[i] = a[i] + b[i]
class TestCaching(DPPLTestCase):
def test_caching_kernel(self):
global_size = 10
N = global_size
a = np.array(np.random.random(N), dtype=np.float32)
b = np.array(np.random.random(N), dtype=np.float32)
c = np.ones_like(a)
with dpctl.device_context("opencl:gpu") as gpu_queue:
func = dppl.kernel(data_parallel_sum)
caching_kernel = func[global_size, dppl.DEFAULT_LOCAL_SIZE].specialize(a, b, c)
for i in range(10):
cached_kernel = func[global_size, dppl.DEFAULT_LOCAL_SIZE].specialize(a, b, c)
self.assertIs(caching_kernel, cached_kernel)
if __name__ == '__main__':
unittest.main()
| [
"numpy.ones_like",
"numpy.random.random",
"numba_dppy.get_global_id",
"numba_dppy.kernel",
"dpctl.device_context",
"numba_dppy.testing.unittest.main"
] | [((287, 308), 'numba_dppy.get_global_id', 'dppl.get_global_id', (['(0)'], {}), '(0)\n', (305, 308), True, 'import numba_dppy, numba_dppy as dppl\n'), ((1028, 1043), 'numba_dppy.testing.unittest.main', 'unittest.main', ([], {}), '()\n', (1041, 1043), False, 'from numba_dppy.testing import unittest\n'), ((584, 599), 'numpy.ones_like', 'np.ones_like', (['a'], {}), '(a)\n', (596, 599), True, 'import numpy as np\n'), ((473, 492), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (489, 492), True, 'import numpy as np\n'), ((533, 552), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (549, 552), True, 'import numpy as np\n'), ((615, 649), 'dpctl.device_context', 'dpctl.device_context', (['"""opencl:gpu"""'], {}), "('opencl:gpu')\n", (635, 649), False, 'import dpctl\n'), ((683, 713), 'numba_dppy.kernel', 'dppl.kernel', (['data_parallel_sum'], {}), '(data_parallel_sum)\n', (694, 713), True, 'import numba_dppy, numba_dppy as dppl\n')] |
import numpy as np
from frameworks.CPLELearning import CPLELearningModel
from frameworks.SelfLearning import SelfLearningModel
from methods.scikitWQDA import WQDA
from examples.plotutils import evaluate_and_plot
# number of data points
N = 60
supevised_data_points = 4
# generate data
meandistance = 1
s = np.random.random()
cov = [[s, 0], [0, s]]
Xs = np.random.multivariate_normal([-s*meandistance, -s*meandistance], cov, (N,))
Xs = np.vstack(( Xs, np.random.multivariate_normal([s*meandistance, s*meandistance], cov, (N,)) ))
ytrue = np.array([0]*N + [1]*N)
ys = np.array([-1]*(2*N))
for i in range(supevised_data_points/2):
ys[np.random.randint(0, N)] = 0
for i in range(supevised_data_points/2):
ys[np.random.randint(N, 2*N)] = 1
Xsupervised = Xs[ys!=-1, :]
ysupervised = ys[ys!=-1]
# compare models
lbl = "Purely supervised QDA:"
print(lbl)
model = WQDA()
model.fit(Xsupervised, ysupervised)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 1)
lbl = "SelfLearning QDA:"
print(lbl)
model = SelfLearningModel(WQDA())
model.fit(Xs, ys)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 2)
lbl = "CPLE(pessimistic) QDA:"
print(lbl)
model = CPLELearningModel(WQDA(), predict_from_probabilities=True)
model.fit(Xs, ys)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 3)
lbl = "CPLE(optimistic) QDA:"
print(lbl)
CPLELearningModel.pessimistic = False
model = CPLELearningModel(WQDA(), predict_from_probabilities=True)
model.fit(Xs, ys)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 4, block=True)
| [
"numpy.random.random",
"numpy.random.multivariate_normal",
"methods.scikitWQDA.WQDA",
"numpy.array",
"numpy.random.randint",
"examples.plotutils.evaluate_and_plot"
] | [((310, 328), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (326, 328), True, 'import numpy as np\n'), ((357, 442), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[-s * meandistance, -s * meandistance]', 'cov', '(N,)'], {}), '([-s * meandistance, -s * meandistance], cov, (N,)\n )\n', (386, 442), True, 'import numpy as np\n'), ((541, 568), 'numpy.array', 'np.array', (['([0] * N + [1] * N)'], {}), '([0] * N + [1] * N)\n', (549, 568), True, 'import numpy as np\n'), ((571, 595), 'numpy.array', 'np.array', (['([-1] * (2 * N))'], {}), '([-1] * (2 * N))\n', (579, 595), True, 'import numpy as np\n'), ((871, 877), 'methods.scikitWQDA.WQDA', 'WQDA', ([], {}), '()\n', (875, 877), False, 'from methods.scikitWQDA import WQDA\n'), ((914, 961), 'examples.plotutils.evaluate_and_plot', 'evaluate_and_plot', (['model', 'Xs', 'ys', 'ytrue', 'lbl', '(1)'], {}), '(model, Xs, ys, ytrue, lbl, 1)\n', (931, 961), False, 'from examples.plotutils import evaluate_and_plot\n'), ((1052, 1099), 'examples.plotutils.evaluate_and_plot', 'evaluate_and_plot', (['model', 'Xs', 'ys', 'ytrue', 'lbl', '(2)'], {}), '(model, Xs, ys, ytrue, lbl, 2)\n', (1069, 1099), False, 'from examples.plotutils import evaluate_and_plot\n'), ((1228, 1275), 'examples.plotutils.evaluate_and_plot', 'evaluate_and_plot', (['model', 'Xs', 'ys', 'ytrue', 'lbl', '(3)'], {}), '(model, Xs, ys, ytrue, lbl, 3)\n', (1245, 1275), False, 'from examples.plotutils import evaluate_and_plot\n'), ((1441, 1500), 'examples.plotutils.evaluate_and_plot', 'evaluate_and_plot', (['model', 'Xs', 'ys', 'ytrue', 'lbl', '(4)'], {'block': '(True)'}), '(model, Xs, ys, ytrue, lbl, 4, block=True)\n', (1458, 1500), False, 'from examples.plotutils import evaluate_and_plot\n'), ((1026, 1032), 'methods.scikitWQDA.WQDA', 'WQDA', ([], {}), '()\n', (1030, 1032), False, 'from methods.scikitWQDA import WQDA\n'), ((1169, 1175), 'methods.scikitWQDA.WQDA', 'WQDA', ([], {}), '()\n', (1173, 1175), False, 'from methods.scikitWQDA import WQDA\n'), ((1382, 1388), 'methods.scikitWQDA.WQDA', 'WQDA', ([], {}), '()\n', (1386, 1388), False, 'from methods.scikitWQDA import WQDA\n'), ((455, 533), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[s * meandistance, s * meandistance]', 'cov', '(N,)'], {}), '([s * meandistance, s * meandistance], cov, (N,))\n', (484, 533), True, 'import numpy as np\n'), ((640, 663), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N'], {}), '(0, N)\n', (657, 663), True, 'import numpy as np\n'), ((717, 744), 'numpy.random.randint', 'np.random.randint', (['N', '(2 * N)'], {}), '(N, 2 * N)\n', (734, 744), True, 'import numpy as np\n')] |
import json
import os
import torch
import torch.nn.functional as F
from collections import OrderedDict
from .base_cam import BaseCAM
import numpy as np
# Load the dictionary of model configs
# that for each model has the name of
# the last layer before the GAP
filepath = os.path.join(os.path.dirname(__file__), 'model_cam_configs.json')
with open(filepath) as f:
MODEL_CONFIGS = json.load(f)
class GradCAM(BaseCAM):
"""Class for generating grad CAMs.
Adapted from: https://github.com/kazuto1011/grad-cam-pytorch
"""
def __init__(self, model, device):
super(GradCAM, self).__init__(model, device)
self.fmaps = OrderedDict()
self.grads = OrderedDict()
self.target_layer = MODEL_CONFIGS[model.module.__class__.__name__]['target_layer']
def save_fmap(m, _, output):
self.fmaps[id(m)] = output.to('cpu')
def save_grad(m, _, grad_out):
self.grads[id(m)] = grad_out[0].to('cpu')
for name, module in self.model.named_modules():
# Only put hooks on the target layer
if name == self.target_layer:
self.target_module_id = id(module)
module.register_forward_hook(save_fmap)
module.register_backward_hook(save_grad)
def _find(self, outputs):
# Since we've only put hooks on one layer
# the target layer, we can return the value
# right away
return outputs[self.target_module_id]
@staticmethod
def _normalize(grads):
return grads / (torch.norm(grads).item() + 1e-5)
def _compute_grad_weights(self, grads):
grads = self._normalize(grads)
weights = F.adaptive_avg_pool2d(grads, 1)
return weights
def extract_cam(self):
"""
c: number of filters in final conv layer
f: filter size
shape of fmaps and grads : num_images x c x f x f
shape of weights: num_images x c x 1 x 1
shape of gcam: num_images x f x f
"""
fmaps = self._find(self.fmaps)
grads = self._find(self.grads)
weights = self._compute_grad_weights(grads)
assert len(fmaps.size()) == 4 and fmaps.size()[0] == 1
assert len(weights.size()) == 4 and weights.size()[0] == 1
# Sum up along the filter dimension
gcam = (fmaps * weights).sum(dim=1)
gcam = torch.clamp(gcam, min=0, max=float('inf'))
gcam -= gcam.min()
gcam /= (gcam.max() + 1e-7)
return gcam.detach().to('cpu').numpy()
def get_cam(self, x, task_id, task=None):
probs = self.forward(x)
sorted_probs = np.sort(probs, axis=0)[::-1]
idx = np.argsort(probs, axis=0)[::-1]
self.backward(idx=task_id)
cam = self.extract_cam()[0]
return sorted_probs, idx, cam
| [
"collections.OrderedDict",
"torch.nn.functional.adaptive_avg_pool2d",
"numpy.sort",
"numpy.argsort",
"os.path.dirname",
"torch.norm",
"json.load"
] | [((287, 312), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (302, 312), False, 'import os\n'), ((386, 398), 'json.load', 'json.load', (['f'], {}), '(f)\n', (395, 398), False, 'import json\n'), ((651, 664), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (662, 664), False, 'from collections import OrderedDict\n'), ((686, 699), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (697, 699), False, 'from collections import OrderedDict\n'), ((1691, 1722), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['grads', '(1)'], {}), '(grads, 1)\n', (1712, 1722), True, 'import torch.nn.functional as F\n'), ((2675, 2697), 'numpy.sort', 'np.sort', (['probs'], {'axis': '(0)'}), '(probs, axis=0)\n', (2682, 2697), True, 'import numpy as np\n'), ((2718, 2743), 'numpy.argsort', 'np.argsort', (['probs'], {'axis': '(0)'}), '(probs, axis=0)\n', (2728, 2743), True, 'import numpy as np\n'), ((1556, 1573), 'torch.norm', 'torch.norm', (['grads'], {}), '(grads)\n', (1566, 1573), False, 'import torch\n')] |
import logging
from time import sleep, time
import numpy as np
import pybullet as p
from transforms3d import euler
log = logging.getLogger(__name__)
from igibson.external.pybullet_tools.utils import (
control_joints,
get_base_values,
get_joint_positions,
get_max_limits,
get_min_limits,
get_sample_fn,
is_collision_free,
joints_from_names,
link_from_name,
plan_base_motion_2d,
plan_joint_motion,
set_base_values_with_z,
set_joint_positions,
)
from igibson.objects.visual_marker import VisualMarker
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d
class MotionPlanningWrapper(object):
"""
Motion planner wrapper that supports both base and arm motion
"""
def __init__(
self,
env=None,
base_mp_algo="birrt",
arm_mp_algo="birrt",
optimize_iter=0,
fine_motion_plan=True,
full_observability_2d_planning=False,
collision_with_pb_2d_planning=False,
visualize_2d_planning=False,
visualize_2d_result=False,
):
"""
Get planning related parameters.
"""
self.env = env
assert "occupancy_grid" in self.env.output
# get planning related parameters from env
body_ids = self.env.robots[0].get_body_ids()
assert len(body_ids) == 1, "Only single-body robots are supported."
self.robot_id = body_ids[0]
# Types of 2D planning
# full_observability_2d_planning=TRUE and collision_with_pb_2d_planning=TRUE -> We teleport the robot to locations and check for collisions
# full_observability_2d_planning=TRUE and collision_with_pb_2d_planning=FALSE -> We use the global occupancy map from the scene
# full_observability_2d_planning=FALSE and collision_with_pb_2d_planning=FALSE -> We use the occupancy_grid from the lidar sensor
# full_observability_2d_planning=FALSE and collision_with_pb_2d_planning=TRUE -> [not suported yet]
self.full_observability_2d_planning = full_observability_2d_planning
self.collision_with_pb_2d_planning = collision_with_pb_2d_planning
assert not ((not self.full_observability_2d_planning) and self.collision_with_pb_2d_planning)
self.robot_footprint_radius = self.env.sensors["scan_occ"].robot_footprint_radius
if self.full_observability_2d_planning:
# TODO: it may be better to unify and make that scene.floor_map uses OccupancyGridState values always
assert len(self.env.scene.floor_map) == 1 # We assume there is only one floor (not true for Gibson scenes)
self.map_2d = np.array(self.env.scene.floor_map[0])
self.map_2d = np.array((self.map_2d == 255)).astype(np.float32)
self.per_pixel_resolution = self.env.scene.trav_map_resolution
assert np.array(self.map_2d).shape[0] == np.array(self.map_2d).shape[1]
self.grid_resolution = self.map_2d.shape[0]
self.occupancy_range = self.grid_resolution * self.per_pixel_resolution
self.robot_footprint_radius_in_map = int(np.ceil(self.robot_footprint_radius / self.per_pixel_resolution))
else:
self.grid_resolution = self.env.grid_resolution
self.occupancy_range = self.env.sensors["scan_occ"].occupancy_range
self.robot_footprint_radius_in_map = self.env.sensors["scan_occ"].robot_footprint_radius_in_map
self.robot = self.env.robots[0]
self.base_mp_algo = base_mp_algo
self.arm_mp_algo = arm_mp_algo
# If we plan in the map, we do not need to check rotations: a location is in collision (or not) independently
# of the orientation. If we use pybullet, we may find some cases where the base orientation changes the
# collision value for the same location between True/False
if not self.collision_with_pb_2d_planning:
self.base_mp_resolutions = np.array([0.05, 0.05, 2 * np.pi])
else:
self.base_mp_resolutions = np.array([0.05, 0.05, 0.05])
self.optimize_iter = optimize_iter
self.mode = self.env.mode
self.initial_height = self.env.initial_pos_z_offset
self.fine_motion_plan = fine_motion_plan
self.robot_type = self.robot.model_name
if self.env.simulator.viewer is not None:
self.env.simulator.viewer.setup_motion_planner(self)
if self.robot_type in ["Fetch"]:
self.setup_arm_mp()
self.arm_interaction_length = 0.2
self.marker = None
self.marker_direction = None
if self.mode in ["gui_non_interactive", "gui_interactive"]:
self.marker = VisualMarker(radius=0.04, rgba_color=[0, 0, 1, 1])
self.marker_direction = VisualMarker(
visual_shape=p.GEOM_CAPSULE,
radius=0.01,
length=0.2,
initial_offset=[0, 0, -0.1],
rgba_color=[0, 0, 1, 1],
)
self.env.simulator.import_object(self.marker)
self.env.simulator.import_object(self.marker_direction)
self.visualize_2d_planning = visualize_2d_planning
self.visualize_2d_result = visualize_2d_result
def set_marker_position(self, pos):
"""
Set subgoal marker position
:param pos: position
"""
self.marker.set_position(pos)
def set_marker_position_yaw(self, pos, yaw):
"""
Set subgoal marker position and orientation
:param pos: position
:param yaw: yaw angle
"""
quat = quatToXYZW(seq="wxyz", orn=euler.euler2quat(0, -np.pi / 2, yaw))
self.marker.set_position(pos)
self.marker_direction.set_position_orientation(pos, quat)
def set_marker_position_direction(self, pos, direction):
"""
Set subgoal marker position and orientation
:param pos: position
:param direction: direction vector
"""
yaw = np.arctan2(direction[1], direction[0])
self.set_marker_position_yaw(pos, yaw)
def setup_arm_mp(self):
"""
Set up arm motion planner
"""
if self.robot_type == "Fetch":
self.arm_default_joint_positions = (
0.1,
-1.41,
1.517,
0.82,
2.2,
2.96,
-1.286,
0.0,
)
self.arm_joint_names = [
"torso_lift_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
]
self.robot_joint_names = [
"r_wheel_joint",
"l_wheel_joint",
"torso_lift_joint",
"head_pan_joint",
"head_tilt_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
"r_gripper_finger_joint",
"l_gripper_finger_joint",
]
self.arm_joint_ids = joints_from_names(
self.robot_id,
self.arm_joint_names,
)
self.robot_arm_indices = [
self.robot_joint_names.index(arm_joint_name) for arm_joint_name in self.arm_joint_names
]
self.arm_ik_threshold = 0.05
self.mp_obstacles = []
if type(self.env.scene) == StaticIndoorScene:
if self.env.scene.mesh_body_id is not None:
self.mp_obstacles.append(self.env.scene.mesh_body_id)
elif type(self.env.scene) == InteractiveIndoorScene:
self.mp_obstacles.extend(self.env.scene.get_body_ids())
# Since the refactoring, the robot is another object in the scene
# We need to remove it to not check twice for self collisions
self.mp_obstacles.remove(self.robot_id)
def plan_base_motion(self, goal):
"""
Plan base motion given a base subgoal
:param goal: base subgoal
:return: waypoints or None if no plan can be found
"""
if self.marker is not None:
self.set_marker_position_yaw([goal[0], goal[1], 0.05], goal[2])
log.debug("Motion planning base goal: {}".format(goal))
state = self.env.get_state()
x, y, theta = goal
map_2d = state["occupancy_grid"] if not self.full_observability_2d_planning else self.map_2d
if not self.full_observability_2d_planning:
yaw = self.robot.get_rpy()[2]
half_occupancy_range = self.occupancy_range / 2.0
robot_position_xy = self.robot.get_position()[:2]
corners = [
robot_position_xy + rotate_vector_2d(local_corner, -yaw)
for local_corner in [
np.array([half_occupancy_range, half_occupancy_range]),
np.array([half_occupancy_range, -half_occupancy_range]),
np.array([-half_occupancy_range, half_occupancy_range]),
np.array([-half_occupancy_range, -half_occupancy_range]),
]
]
else:
top_left = self.env.scene.map_to_world(np.array([0, 0]))
bottom_right = self.env.scene.map_to_world(np.array(self.map_2d.shape) - np.array([1, 1]))
corners = [top_left, bottom_right]
if self.collision_with_pb_2d_planning:
obstacles = [
body_id
for body_id in self.env.scene.get_body_ids()
if body_id not in self.robot.get_body_ids()
and body_id != self.env.scene.objects_by_category["floors"][0].get_body_ids()[0]
]
else:
obstacles = []
path = plan_base_motion_2d(
self.robot_id,
[x, y, theta],
(tuple(np.min(corners, axis=0)), tuple(np.max(corners, axis=0))),
map_2d=map_2d,
occupancy_range=self.occupancy_range,
grid_resolution=self.grid_resolution,
# If we use the global map, it has been eroded: we do not need to use the full size of the robot, a 1 px
# robot would be enough
robot_footprint_radius_in_map=[self.robot_footprint_radius_in_map, 1][self.full_observability_2d_planning],
resolutions=self.base_mp_resolutions,
# Add all objects in the scene as obstacles except the robot itself and the floor
obstacles=obstacles,
algorithm=self.base_mp_algo,
optimize_iter=self.optimize_iter,
visualize_planning=self.visualize_2d_planning,
visualize_result=self.visualize_2d_result,
metric2map=[None, self.env.scene.world_to_map][self.full_observability_2d_planning],
flip_vertically=self.full_observability_2d_planning,
use_pb_for_collisions=self.collision_with_pb_2d_planning,
)
if path is not None and len(path) > 0:
log.debug("Path found!")
else:
log.debug("Path NOT found!")
return path
def simulator_sync(self):
"""Sync the simulator to renderer"""
self.env.simulator.sync()
def simulator_step(self):
"""Step the simulator and sync the simulator to renderer"""
self.env.simulator.step()
self.simulator_sync()
def dry_run_base_plan(self, path):
"""
Dry run base motion plan by setting the base positions without physics simulation
:param path: base waypoints or None if no plan can be found
"""
if path is not None:
if self.mode in ["gui_non_interactive", "gui_interactive"]:
for way_point in path:
set_base_values_with_z(
self.robot_id, [way_point[0], way_point[1], way_point[2]], z=self.initial_height
)
self.simulator_sync()
# sleep(0.005) # for animation
else:
set_base_values_with_z(self.robot_id, [path[-1][0], path[-1][1], path[-1][2]], z=self.initial_height)
def get_ik_parameters(self):
"""
Get IK parameters such as joint limits, joint damping, reset position, etc
:return: IK parameters
"""
max_limits, min_limits, rest_position, joint_range, joint_damping = None, None, None, None, None
if self.robot_type == "Fetch":
max_limits_arm = get_max_limits(self.robot_id, self.arm_joint_ids)
max_limits = [0.5, 0.5] + [max_limits_arm[0]] + [0.5, 0.5] + list(max_limits_arm[1:]) + [0.05, 0.05]
min_limits_arm = get_min_limits(self.robot_id, self.arm_joint_ids)
min_limits = [-0.5, -0.5] + [min_limits_arm[0]] + [-0.5, -0.5] + list(min_limits_arm[1:]) + [0.0, 0.0]
# increase torso_lift_joint lower limit to 0.02 to avoid self-collision
min_limits[2] += 0.02
current_position = get_joint_positions(self.robot_id, self.arm_joint_ids)
rest_position = [0.0, 0.0] + [current_position[0]] + [0.0, 0.0] + list(current_position[1:]) + [0.01, 0.01]
joint_range = list(np.array(max_limits) - np.array(min_limits))
joint_range = [item + 1 for item in joint_range]
joint_damping = [0.1 for _ in joint_range]
return (max_limits, min_limits, rest_position, joint_range, joint_damping)
def get_arm_joint_positions(self, arm_ik_goal):
"""
Attempt to find arm_joint_positions that satisfies arm_subgoal
If failed, return None
:param arm_ik_goal: [x, y, z] in the world frame
:return: arm joint positions
"""
log.debug("IK query for EE position {}".format(arm_ik_goal))
ik_start = time()
max_limits, min_limits, rest_position, joint_range, joint_damping = self.get_ik_parameters()
n_attempt = 0
max_attempt = 75
sample_fn = get_sample_fn(self.robot_id, self.arm_joint_ids)
base_pose = get_base_values(self.robot_id)
state_id = p.saveState()
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False)
# find collision-free IK solution for arm_subgoal
while n_attempt < max_attempt:
set_joint_positions(self.robot_id, self.arm_joint_ids, sample_fn())
arm_joint_positions = p.calculateInverseKinematics(
self.robot_id,
self.robot.eef_links[self.robot.default_arm].link_id,
targetPosition=arm_ik_goal,
# targetOrientation=self.robots[0].get_orientation(),
lowerLimits=min_limits,
upperLimits=max_limits,
jointRanges=joint_range,
restPoses=rest_position,
jointDamping=joint_damping,
# solver=p.IK_DLS,
maxNumIterations=100,
)
if self.robot_type == "Fetch":
arm_joint_positions = np.array(arm_joint_positions)[self.robot_arm_indices]
set_joint_positions(self.robot_id, self.arm_joint_ids, arm_joint_positions)
dist = l2_distance(self.robot.get_eef_position(), arm_ik_goal)
# print('dist', dist)
if dist > self.arm_ik_threshold:
n_attempt += 1
continue
# need to simulator_step to get the latest collision
self.simulator_step()
# simulator_step will slightly move the robot base and the objects
set_base_values_with_z(self.robot_id, base_pose, z=self.initial_height)
# self.reset_object_states()
# TODO: have a princpled way for stashing and resetting object states
# arm should not have any collision
collision_free = is_collision_free(body_a=self.robot_id, link_a_list=self.arm_joint_ids)
if not collision_free:
n_attempt += 1
# print('arm has collision')
continue
# gripper should not have any self-collision
collision_free = is_collision_free(
body_a=self.robot_id,
link_a_list=[self.robot.eef_links[self.robot.default_arm].link_id],
body_b=self.robot_id,
)
if not collision_free:
n_attempt += 1
log.debug("Gripper in collision")
continue
# self.episode_metrics['arm_ik_time'] += time() - ik_start
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
restoreState(state_id)
p.removeState(state_id)
log.debug("IK Solver found a valid configuration")
return arm_joint_positions
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
restoreState(state_id)
p.removeState(state_id)
# self.episode_metrics['arm_ik_time'] += time() - ik_start
log.debug("IK Solver failed to find a configuration")
return None
def plan_arm_motion(self, arm_joint_positions, override_fetch_collision_links=False):
"""
Attempt to reach arm arm_joint_positions and return arm trajectory
If failed, reset the arm to its original pose and return None
:param arm_joint_positions: final arm joint position to reach
:param override_fetch_collision_links: if True, include Fetch hand and finger collisions while motion planning
:return: arm trajectory or None if no plan can be found
"""
log.debug("Planning path in joint space to {}".format(arm_joint_positions))
disabled_collisions = {}
if self.robot_type == "Fetch":
disabled_collisions = {
(link_from_name(self.robot_id, "torso_lift_link"), link_from_name(self.robot_id, "torso_fixed_link")),
(link_from_name(self.robot_id, "torso_lift_link"), link_from_name(self.robot_id, "shoulder_lift_link")),
(link_from_name(self.robot_id, "torso_lift_link"), link_from_name(self.robot_id, "upperarm_roll_link")),
(link_from_name(self.robot_id, "torso_lift_link"), link_from_name(self.robot_id, "forearm_roll_link")),
(link_from_name(self.robot_id, "torso_lift_link"), link_from_name(self.robot_id, "elbow_flex_link")),
}
if self.fine_motion_plan:
self_collisions = True
mp_obstacles = self.mp_obstacles
else:
self_collisions = False
mp_obstacles = []
plan_arm_start = time()
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False)
state_id = p.saveState()
allow_collision_links = []
if self.robot_type == "Fetch" and not override_fetch_collision_links:
allow_collision_links = [self.robot.eef_links[self.robot.default_arm].link_id] + [
finger.link_id for finger in self.robot.finger_links[self.robot.default_arm]
]
arm_path = plan_joint_motion(
self.robot_id,
self.arm_joint_ids,
arm_joint_positions,
disabled_collisions=disabled_collisions,
self_collisions=self_collisions,
obstacles=mp_obstacles,
algorithm=self.arm_mp_algo,
allow_collision_links=allow_collision_links,
)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)
restoreState(state_id)
p.removeState(state_id)
if arm_path is not None and len(arm_path) > 0:
log.debug("Path found!")
else:
log.debug("Path NOT found!")
return arm_path
def dry_run_arm_plan(self, arm_path):
"""
Dry run arm motion plan by setting the arm joint position without physics simulation
:param arm_path: arm trajectory or None if no plan can be found
"""
base_pose = get_base_values(self.robot_id)
if arm_path is not None:
if self.mode in ["gui_non_interactive", "gui_interactive"]:
for joint_way_point in arm_path:
set_joint_positions(self.robot_id, self.arm_joint_ids, joint_way_point)
set_base_values_with_z(self.robot_id, base_pose, z=self.initial_height)
self.simulator_sync()
# sleep(0.02) # animation
else:
set_joint_positions(self.robot_id, self.arm_joint_ids, arm_path[-1])
else:
set_joint_positions(self.robot_id, self.arm_joint_ids, self.arm_default_joint_positions)
def plan_arm_push(self, hit_pos, hit_normal):
"""
Attempt to reach a 3D position and prepare for a push later
:param hit_pos: 3D position to reach
:param hit_normal: direction to push after reacehing that position
:return: arm trajectory or None if no plan can be found
"""
log.debug("Planning arm push at point {} with direction {}".format(hit_pos, hit_normal))
if self.marker is not None:
self.set_marker_position_direction(hit_pos, hit_normal)
# Solve the IK problem to set the arm at the desired position
joint_positions = self.get_arm_joint_positions(hit_pos)
if joint_positions is not None:
# Set the arm in the default configuration to initiate arm motion planning (e.g. untucked)
set_joint_positions(self.robot_id, self.arm_joint_ids, self.arm_default_joint_positions)
self.simulator_sync()
plan = self.plan_arm_motion(joint_positions)
return plan
else:
log.debug("Planning failed: goal position may be non-reachable")
return None
def interact(self, push_point, push_direction):
"""
Move the arm starting from the push_point along the push_direction
and physically simulate the interaction
:param push_point: 3D point to start pushing from
:param push_direction: push direction
"""
push_vector = np.array(push_direction) * self.arm_interaction_length
max_limits, min_limits, rest_position, joint_range, joint_damping = self.get_ik_parameters()
base_pose = get_base_values(self.robot_id)
steps = 50
for i in range(steps):
push_goal = np.array(push_point) + push_vector * (i + 1) / float(steps)
joint_positions = p.calculateInverseKinematics(
self.robot_id,
self.robot.eef_links[self.robot.default_arm].link_id,
targetPosition=push_goal,
# targetOrientation=self.robots[0].get_orientation(),
lowerLimits=min_limits,
upperLimits=max_limits,
jointRanges=joint_range,
restPoses=rest_position,
jointDamping=joint_damping,
# solver=p.IK_DLS,
maxNumIterations=100,
)
if self.robot_type == "Fetch":
joint_positions = np.array(joint_positions)[self.robot_arm_indices]
control_joints(self.robot_id, self.arm_joint_ids, joint_positions)
# set_joint_positions(self.robot_id, self.arm_joint_ids, joint_positions)
achieved = self.robot.get_eef_position()
self.simulator_step()
set_base_values_with_z(self.robot_id, base_pose, z=self.initial_height)
if self.mode == "gui_interactive":
sleep(0.02) # for visualization
def execute_arm_push(self, plan, hit_pos, hit_normal):
"""
Execute arm push given arm trajectory
Should be called after plan_arm_push()
:param plan: arm trajectory or None if no plan can be found
:param hit_pos: 3D position to reach
:param hit_normal: direction to push after reacehing that position
"""
if plan is not None:
log.debug("Teleporting arm along the trajectory. No physics simulation")
self.dry_run_arm_plan(plan)
log.debug("Performing pushing actions")
self.interact(hit_pos, hit_normal)
log.debug("Teleporting arm to the default configuration")
set_joint_positions(self.robot_id, self.arm_joint_ids, self.arm_default_joint_positions)
self.simulator_sync()
| [
"logging.getLogger",
"igibson.external.pybullet_tools.utils.get_base_values",
"time.sleep",
"numpy.array",
"numpy.arctan2",
"igibson.external.pybullet_tools.utils.control_joints",
"igibson.external.pybullet_tools.utils.link_from_name",
"igibson.utils.utils.rotate_vector_2d",
"igibson.external.pybull... | [((123, 150), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (140, 150), False, 'import logging\n'), ((6158, 6196), 'numpy.arctan2', 'np.arctan2', (['direction[1]', 'direction[0]'], {}), '(direction[1], direction[0])\n', (6168, 6196), True, 'import numpy as np\n'), ((14300, 14306), 'time.time', 'time', ([], {}), '()\n', (14304, 14306), False, 'from time import sleep, time\n'), ((14477, 14525), 'igibson.external.pybullet_tools.utils.get_sample_fn', 'get_sample_fn', (['self.robot_id', 'self.arm_joint_ids'], {}), '(self.robot_id, self.arm_joint_ids)\n', (14490, 14525), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((14546, 14576), 'igibson.external.pybullet_tools.utils.get_base_values', 'get_base_values', (['self.robot_id'], {}), '(self.robot_id)\n', (14561, 14576), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((14596, 14609), 'pybullet.saveState', 'p.saveState', ([], {}), '()\n', (14607, 14609), True, 'import pybullet as p\n'), ((17351, 17373), 'igibson.utils.utils.restoreState', 'restoreState', (['state_id'], {}), '(state_id)\n', (17363, 17373), False, 'from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d\n'), ((17382, 17405), 'pybullet.removeState', 'p.removeState', (['state_id'], {}), '(state_id)\n', (17395, 17405), True, 'import pybullet as p\n'), ((19095, 19101), 'time.time', 'time', ([], {}), '()\n', (19099, 19101), False, 'from time import sleep, time\n'), ((19110, 19167), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(False)'], {}), '(p.COV_ENABLE_RENDERING, False)\n', (19136, 19167), True, 'import pybullet as p\n'), ((19187, 19200), 'pybullet.saveState', 'p.saveState', ([], {}), '()\n', (19198, 19200), True, 'import pybullet as p\n'), ((19536, 19793), 'igibson.external.pybullet_tools.utils.plan_joint_motion', 'plan_joint_motion', (['self.robot_id', 'self.arm_joint_ids', 'arm_joint_positions'], {'disabled_collisions': 'disabled_collisions', 'self_collisions': 'self_collisions', 'obstacles': 'mp_obstacles', 'algorithm': 'self.arm_mp_algo', 'allow_collision_links': 'allow_collision_links'}), '(self.robot_id, self.arm_joint_ids, arm_joint_positions,\n disabled_collisions=disabled_collisions, self_collisions=\n self_collisions, obstacles=mp_obstacles, algorithm=self.arm_mp_algo,\n allow_collision_links=allow_collision_links)\n', (19553, 19793), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((19896, 19952), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(True)'], {}), '(p.COV_ENABLE_RENDERING, True)\n', (19922, 19952), True, 'import pybullet as p\n'), ((19961, 19983), 'igibson.utils.utils.restoreState', 'restoreState', (['state_id'], {}), '(state_id)\n', (19973, 19983), False, 'from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d\n'), ((19992, 20015), 'pybullet.removeState', 'p.removeState', (['state_id'], {}), '(state_id)\n', (20005, 20015), True, 'import pybullet as p\n'), ((20442, 20472), 'igibson.external.pybullet_tools.utils.get_base_values', 'get_base_values', (['self.robot_id'], {}), '(self.robot_id)\n', (20457, 20472), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((22761, 22791), 'igibson.external.pybullet_tools.utils.get_base_values', 'get_base_values', (['self.robot_id'], {}), '(self.robot_id)\n', (22776, 22791), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((2805, 2842), 'numpy.array', 'np.array', (['self.env.scene.floor_map[0]'], {}), '(self.env.scene.floor_map[0])\n', (2813, 2842), True, 'import numpy as np\n'), ((4107, 4140), 'numpy.array', 'np.array', (['[0.05, 0.05, 2 * np.pi]'], {}), '([0.05, 0.05, 2 * np.pi])\n', (4115, 4140), True, 'import numpy as np\n'), ((4194, 4222), 'numpy.array', 'np.array', (['[0.05, 0.05, 0.05]'], {}), '([0.05, 0.05, 0.05])\n', (4202, 4222), True, 'import numpy as np\n'), ((4850, 4900), 'igibson.objects.visual_marker.VisualMarker', 'VisualMarker', ([], {'radius': '(0.04)', 'rgba_color': '[0, 0, 1, 1]'}), '(radius=0.04, rgba_color=[0, 0, 1, 1])\n', (4862, 4900), False, 'from igibson.objects.visual_marker import VisualMarker\n'), ((4937, 5061), 'igibson.objects.visual_marker.VisualMarker', 'VisualMarker', ([], {'visual_shape': 'p.GEOM_CAPSULE', 'radius': '(0.01)', 'length': '(0.2)', 'initial_offset': '[0, 0, -0.1]', 'rgba_color': '[0, 0, 1, 1]'}), '(visual_shape=p.GEOM_CAPSULE, radius=0.01, length=0.2,\n initial_offset=[0, 0, -0.1], rgba_color=[0, 0, 1, 1])\n', (4949, 5061), False, 'from igibson.objects.visual_marker import VisualMarker\n'), ((7562, 7616), 'igibson.external.pybullet_tools.utils.joints_from_names', 'joints_from_names', (['self.robot_id', 'self.arm_joint_names'], {}), '(self.robot_id, self.arm_joint_names)\n', (7579, 7616), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((12981, 13030), 'igibson.external.pybullet_tools.utils.get_max_limits', 'get_max_limits', (['self.robot_id', 'self.arm_joint_ids'], {}), '(self.robot_id, self.arm_joint_ids)\n', (12995, 13030), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((13173, 13222), 'igibson.external.pybullet_tools.utils.get_min_limits', 'get_min_limits', (['self.robot_id', 'self.arm_joint_ids'], {}), '(self.robot_id, self.arm_joint_ids)\n', (13187, 13222), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((13487, 13541), 'igibson.external.pybullet_tools.utils.get_joint_positions', 'get_joint_positions', (['self.robot_id', 'self.arm_joint_ids'], {}), '(self.robot_id, self.arm_joint_ids)\n', (13506, 13541), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((14890, 15178), 'pybullet.calculateInverseKinematics', 'p.calculateInverseKinematics', (['self.robot_id', 'self.robot.eef_links[self.robot.default_arm].link_id'], {'targetPosition': 'arm_ik_goal', 'lowerLimits': 'min_limits', 'upperLimits': 'max_limits', 'jointRanges': 'joint_range', 'restPoses': 'rest_position', 'jointDamping': 'joint_damping', 'maxNumIterations': '(100)'}), '(self.robot_id, self.robot.eef_links[self.robot\n .default_arm].link_id, targetPosition=arm_ik_goal, lowerLimits=\n min_limits, upperLimits=max_limits, jointRanges=joint_range, restPoses=\n rest_position, jointDamping=joint_damping, maxNumIterations=100)\n', (14918, 15178), True, 'import pybullet as p\n'), ((15577, 15652), 'igibson.external.pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['self.robot_id', 'self.arm_joint_ids', 'arm_joint_positions'], {}), '(self.robot_id, self.arm_joint_ids, arm_joint_positions)\n', (15596, 15652), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((16056, 16127), 'igibson.external.pybullet_tools.utils.set_base_values_with_z', 'set_base_values_with_z', (['self.robot_id', 'base_pose'], {'z': 'self.initial_height'}), '(self.robot_id, base_pose, z=self.initial_height)\n', (16078, 16127), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((16329, 16400), 'igibson.external.pybullet_tools.utils.is_collision_free', 'is_collision_free', ([], {'body_a': 'self.robot_id', 'link_a_list': 'self.arm_joint_ids'}), '(body_a=self.robot_id, link_a_list=self.arm_joint_ids)\n', (16346, 16400), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((16625, 16759), 'igibson.external.pybullet_tools.utils.is_collision_free', 'is_collision_free', ([], {'body_a': 'self.robot_id', 'link_a_list': '[self.robot.eef_links[self.robot.default_arm].link_id]', 'body_b': 'self.robot_id'}), '(body_a=self.robot_id, link_a_list=[self.robot.eef_links[\n self.robot.default_arm].link_id], body_b=self.robot_id)\n', (16642, 16759), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((17114, 17136), 'igibson.utils.utils.restoreState', 'restoreState', (['state_id'], {}), '(state_id)\n', (17126, 17136), False, 'from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d\n'), ((17149, 17172), 'pybullet.removeState', 'p.removeState', (['state_id'], {}), '(state_id)\n', (17162, 17172), True, 'import pybullet as p\n'), ((21029, 21122), 'igibson.external.pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['self.robot_id', 'self.arm_joint_ids', 'self.arm_default_joint_positions'], {}), '(self.robot_id, self.arm_joint_ids, self.\n arm_default_joint_positions)\n', (21048, 21122), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((21938, 22031), 'igibson.external.pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['self.robot_id', 'self.arm_joint_ids', 'self.arm_default_joint_positions'], {}), '(self.robot_id, self.arm_joint_ids, self.\n arm_default_joint_positions)\n', (21957, 22031), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((22584, 22608), 'numpy.array', 'np.array', (['push_direction'], {}), '(push_direction)\n', (22592, 22608), True, 'import numpy as np\n'), ((22958, 23243), 'pybullet.calculateInverseKinematics', 'p.calculateInverseKinematics', (['self.robot_id', 'self.robot.eef_links[self.robot.default_arm].link_id'], {'targetPosition': 'push_goal', 'lowerLimits': 'min_limits', 'upperLimits': 'max_limits', 'jointRanges': 'joint_range', 'restPoses': 'rest_position', 'jointDamping': 'joint_damping', 'maxNumIterations': '(100)'}), '(self.robot_id, self.robot.eef_links[self.robot\n .default_arm].link_id, targetPosition=push_goal, lowerLimits=min_limits,\n upperLimits=max_limits, jointRanges=joint_range, restPoses=\n rest_position, jointDamping=joint_damping, maxNumIterations=100)\n', (22986, 23243), True, 'import pybullet as p\n'), ((23635, 23701), 'igibson.external.pybullet_tools.utils.control_joints', 'control_joints', (['self.robot_id', 'self.arm_joint_ids', 'joint_positions'], {}), '(self.robot_id, self.arm_joint_ids, joint_positions)\n', (23649, 23701), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((23888, 23959), 'igibson.external.pybullet_tools.utils.set_base_values_with_z', 'set_base_values_with_z', (['self.robot_id', 'base_pose'], {'z': 'self.initial_height'}), '(self.robot_id, base_pose, z=self.initial_height)\n', (23910, 23959), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((24758, 24851), 'igibson.external.pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['self.robot_id', 'self.arm_joint_ids', 'self.arm_default_joint_positions'], {}), '(self.robot_id, self.arm_joint_ids, self.\n arm_default_joint_positions)\n', (24777, 24851), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((3271, 3335), 'numpy.ceil', 'np.ceil', (['(self.robot_footprint_radius / self.per_pixel_resolution)'], {}), '(self.robot_footprint_radius / self.per_pixel_resolution)\n', (3278, 3335), True, 'import numpy as np\n'), ((5791, 5827), 'transforms3d.euler.euler2quat', 'euler.euler2quat', (['(0)', '(-np.pi / 2)', 'yaw'], {}), '(0, -np.pi / 2, yaw)\n', (5807, 5827), False, 'from transforms3d import euler\n'), ((9710, 9726), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9718, 9726), True, 'import numpy as np\n'), ((12533, 12639), 'igibson.external.pybullet_tools.utils.set_base_values_with_z', 'set_base_values_with_z', (['self.robot_id', '[path[-1][0], path[-1][1], path[-1][2]]'], {'z': 'self.initial_height'}), '(self.robot_id, [path[-1][0], path[-1][1], path[-1][2\n ]], z=self.initial_height)\n', (12555, 12639), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((20934, 21002), 'igibson.external.pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['self.robot_id', 'self.arm_joint_ids', 'arm_path[-1]'], {}), '(self.robot_id, self.arm_joint_ids, arm_path[-1])\n', (20953, 21002), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((22867, 22887), 'numpy.array', 'np.array', (['push_point'], {}), '(push_point)\n', (22875, 22887), True, 'import numpy as np\n'), ((24024, 24035), 'time.sleep', 'sleep', (['(0.02)'], {}), '(0.02)\n', (24029, 24035), False, 'from time import sleep, time\n'), ((2869, 2897), 'numpy.array', 'np.array', (['(self.map_2d == 255)'], {}), '(self.map_2d == 255)\n', (2877, 2897), True, 'import numpy as np\n'), ((9230, 9266), 'igibson.utils.utils.rotate_vector_2d', 'rotate_vector_2d', (['local_corner', '(-yaw)'], {}), '(local_corner, -yaw)\n', (9246, 9266), False, 'from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d\n'), ((9783, 9810), 'numpy.array', 'np.array', (['self.map_2d.shape'], {}), '(self.map_2d.shape)\n', (9791, 9810), True, 'import numpy as np\n'), ((9813, 9829), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (9821, 9829), True, 'import numpy as np\n'), ((10359, 10382), 'numpy.min', 'np.min', (['corners'], {'axis': '(0)'}), '(corners, axis=0)\n', (10365, 10382), True, 'import numpy as np\n'), ((10391, 10414), 'numpy.max', 'np.max', (['corners'], {'axis': '(0)'}), '(corners, axis=0)\n', (10397, 10414), True, 'import numpy as np\n'), ((12255, 12363), 'igibson.external.pybullet_tools.utils.set_base_values_with_z', 'set_base_values_with_z', (['self.robot_id', '[way_point[0], way_point[1], way_point[2]]'], {'z': 'self.initial_height'}), '(self.robot_id, [way_point[0], way_point[1],\n way_point[2]], z=self.initial_height)\n', (12277, 12363), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((13693, 13713), 'numpy.array', 'np.array', (['max_limits'], {}), '(max_limits)\n', (13701, 13713), True, 'import numpy as np\n'), ((13716, 13736), 'numpy.array', 'np.array', (['min_limits'], {}), '(min_limits)\n', (13724, 13736), True, 'import numpy as np\n'), ((15510, 15539), 'numpy.array', 'np.array', (['arm_joint_positions'], {}), '(arm_joint_positions)\n', (15518, 15539), True, 'import numpy as np\n'), ((18278, 18326), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""torso_lift_link"""'], {}), "(self.robot_id, 'torso_lift_link')\n", (18292, 18326), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18328, 18377), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""torso_fixed_link"""'], {}), "(self.robot_id, 'torso_fixed_link')\n", (18342, 18377), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18397, 18445), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""torso_lift_link"""'], {}), "(self.robot_id, 'torso_lift_link')\n", (18411, 18445), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18447, 18498), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""shoulder_lift_link"""'], {}), "(self.robot_id, 'shoulder_lift_link')\n", (18461, 18498), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18518, 18566), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""torso_lift_link"""'], {}), "(self.robot_id, 'torso_lift_link')\n", (18532, 18566), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18568, 18619), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""upperarm_roll_link"""'], {}), "(self.robot_id, 'upperarm_roll_link')\n", (18582, 18619), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18639, 18687), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""torso_lift_link"""'], {}), "(self.robot_id, 'torso_lift_link')\n", (18653, 18687), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18689, 18739), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""forearm_roll_link"""'], {}), "(self.robot_id, 'forearm_roll_link')\n", (18703, 18739), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18759, 18807), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""torso_lift_link"""'], {}), "(self.robot_id, 'torso_lift_link')\n", (18773, 18807), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((18809, 18857), 'igibson.external.pybullet_tools.utils.link_from_name', 'link_from_name', (['self.robot_id', '"""elbow_flex_link"""'], {}), "(self.robot_id, 'elbow_flex_link')\n", (18823, 18857), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((20647, 20718), 'igibson.external.pybullet_tools.utils.set_joint_positions', 'set_joint_positions', (['self.robot_id', 'self.arm_joint_ids', 'joint_way_point'], {}), '(self.robot_id, self.arm_joint_ids, joint_way_point)\n', (20666, 20718), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((20739, 20810), 'igibson.external.pybullet_tools.utils.set_base_values_with_z', 'set_base_values_with_z', (['self.robot_id', 'base_pose'], {'z': 'self.initial_height'}), '(self.robot_id, base_pose, z=self.initial_height)\n', (20761, 20810), False, 'from igibson.external.pybullet_tools.utils import control_joints, get_base_values, get_joint_positions, get_max_limits, get_min_limits, get_sample_fn, is_collision_free, joints_from_names, link_from_name, plan_base_motion_2d, plan_joint_motion, set_base_values_with_z, set_joint_positions\n'), ((23572, 23597), 'numpy.array', 'np.array', (['joint_positions'], {}), '(joint_positions)\n', (23580, 23597), True, 'import numpy as np\n'), ((3013, 3034), 'numpy.array', 'np.array', (['self.map_2d'], {}), '(self.map_2d)\n', (3021, 3034), True, 'import numpy as np\n'), ((3047, 3068), 'numpy.array', 'np.array', (['self.map_2d'], {}), '(self.map_2d)\n', (3055, 3068), True, 'import numpy as np\n'), ((9325, 9379), 'numpy.array', 'np.array', (['[half_occupancy_range, half_occupancy_range]'], {}), '([half_occupancy_range, half_occupancy_range])\n', (9333, 9379), True, 'import numpy as np\n'), ((9401, 9456), 'numpy.array', 'np.array', (['[half_occupancy_range, -half_occupancy_range]'], {}), '([half_occupancy_range, -half_occupancy_range])\n', (9409, 9456), True, 'import numpy as np\n'), ((9478, 9533), 'numpy.array', 'np.array', (['[-half_occupancy_range, half_occupancy_range]'], {}), '([-half_occupancy_range, half_occupancy_range])\n', (9486, 9533), True, 'import numpy as np\n'), ((9555, 9611), 'numpy.array', 'np.array', (['[-half_occupancy_range, -half_occupancy_range]'], {}), '([-half_occupancy_range, -half_occupancy_range])\n', (9563, 9611), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from pyrender import (
DirectionalLight,
SpotLight,
PointLight,
Texture,
PerspectiveCamera,
OrthographicCamera,
)
from pyrender.constants import SHADOW_TEX_SZ
def test_directional_light():
d = DirectionalLight()
assert d.name is None
assert np.all(d.color == 1.0)
assert d.intensity == 1.0
d.name = "direc"
with pytest.raises(ValueError):
d.color = None
with pytest.raises(TypeError):
d.intensity = None
d = DirectionalLight(color=[0.0, 0.0, 0.0])
assert np.all(d.color == 0.0)
d._generate_shadow_texture()
st = d.shadow_texture
assert isinstance(st, Texture)
assert st.width == st.height == SHADOW_TEX_SZ
sc = d._get_shadow_camera(scene_scale=5.0)
assert isinstance(sc, OrthographicCamera)
assert sc.xmag == sc.ymag == 5.0
assert sc.znear == 0.01 * 5.0
assert sc.zfar == 10 * 5.0
def test_spot_light():
s = SpotLight()
assert s.name is None
assert np.all(s.color == 1.0)
assert s.intensity == 1.0
assert s.innerConeAngle == 0.0
assert s.outerConeAngle == np.pi / 4.0
assert s.range is None
with pytest.raises(ValueError):
s.range = -1.0
with pytest.raises(ValueError):
s.range = 0.0
with pytest.raises(ValueError):
s.innerConeAngle = -1.0
with pytest.raises(ValueError):
s.innerConeAngle = np.pi / 3.0
with pytest.raises(ValueError):
s.outerConeAngle = -1.0
with pytest.raises(ValueError):
s.outerConeAngle = np.pi
s.range = 5.0
s.outerConeAngle = np.pi / 2 - 0.05
s.innerConeAngle = np.pi / 3
s.innerConeAngle = 0.0
s.outerConeAngle = np.pi / 4.0
s._generate_shadow_texture()
st = s.shadow_texture
assert isinstance(st, Texture)
assert st.width == st.height == SHADOW_TEX_SZ
sc = s._get_shadow_camera(scene_scale=5.0)
assert isinstance(sc, PerspectiveCamera)
assert sc.znear == 0.01 * 5.0
assert sc.zfar == 10 * 5.0
assert sc.aspectRatio == 1.0
assert np.allclose(sc.yfov, np.pi / 16.0 * 9.0) # Plus pi / 16
def test_point_light():
s = PointLight()
assert s.name is None
assert np.all(s.color == 1.0)
assert s.intensity == 1.0
assert s.range is None
with pytest.raises(ValueError):
s.range = -1.0
with pytest.raises(ValueError):
s.range = 0.0
s.range = 5.0
with pytest.raises(NotImplementedError):
s._generate_shadow_texture()
with pytest.raises(NotImplementedError):
s._get_shadow_camera(scene_scale=5.0)
| [
"numpy.all",
"pyrender.DirectionalLight",
"numpy.allclose",
"pyrender.SpotLight",
"pytest.raises",
"pyrender.PointLight"
] | [((258, 276), 'pyrender.DirectionalLight', 'DirectionalLight', ([], {}), '()\n', (274, 276), False, 'from pyrender import DirectionalLight, SpotLight, PointLight, Texture, PerspectiveCamera, OrthographicCamera\n'), ((314, 336), 'numpy.all', 'np.all', (['(d.color == 1.0)'], {}), '(d.color == 1.0)\n', (320, 336), True, 'import numpy as np\n'), ((519, 558), 'pyrender.DirectionalLight', 'DirectionalLight', ([], {'color': '[0.0, 0.0, 0.0]'}), '(color=[0.0, 0.0, 0.0])\n', (535, 558), False, 'from pyrender import DirectionalLight, SpotLight, PointLight, Texture, PerspectiveCamera, OrthographicCamera\n'), ((570, 592), 'numpy.all', 'np.all', (['(d.color == 0.0)'], {}), '(d.color == 0.0)\n', (576, 592), True, 'import numpy as np\n'), ((968, 979), 'pyrender.SpotLight', 'SpotLight', ([], {}), '()\n', (977, 979), False, 'from pyrender import DirectionalLight, SpotLight, PointLight, Texture, PerspectiveCamera, OrthographicCamera\n'), ((1017, 1039), 'numpy.all', 'np.all', (['(s.color == 1.0)'], {}), '(s.color == 1.0)\n', (1023, 1039), True, 'import numpy as np\n'), ((2079, 2119), 'numpy.allclose', 'np.allclose', (['sc.yfov', '(np.pi / 16.0 * 9.0)'], {}), '(sc.yfov, np.pi / 16.0 * 9.0)\n', (2090, 2119), True, 'import numpy as np\n'), ((2171, 2183), 'pyrender.PointLight', 'PointLight', ([], {}), '()\n', (2181, 2183), False, 'from pyrender import DirectionalLight, SpotLight, PointLight, Texture, PerspectiveCamera, OrthographicCamera\n'), ((2221, 2243), 'numpy.all', 'np.all', (['(s.color == 1.0)'], {}), '(s.color == 1.0)\n', (2227, 2243), True, 'import numpy as np\n'), ((398, 423), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (411, 423), False, 'import pytest\n'), ((457, 481), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (470, 481), False, 'import pytest\n'), ((1185, 1210), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1198, 1210), False, 'import pytest\n'), ((1245, 1270), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1258, 1270), False, 'import pytest\n'), ((1304, 1329), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1317, 1329), False, 'import pytest\n'), ((1373, 1398), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1386, 1398), False, 'import pytest\n'), ((1449, 1474), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1462, 1474), False, 'import pytest\n'), ((1518, 1543), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1531, 1543), False, 'import pytest\n'), ((2311, 2336), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2324, 2336), False, 'import pytest\n'), ((2371, 2396), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2384, 2396), False, 'import pytest\n'), ((2449, 2483), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2462, 2483), False, 'import pytest\n'), ((2532, 2566), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2545, 2566), False, 'import pytest\n')] |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.math.generic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
from scipy import special as sp_special
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import gradient_checker_v2 # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class LogCombinationsTest(test_case.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
log_combs = np.log(sp_special.binom(n, k))
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = tfp.math.log_combinations(n, counts)
self.assertEqual([4], log_binom.shape)
self.assertAllClose(log_combs, self.evaluate(log_binom))
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = tfp.math.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.shape)
@test_util.run_all_in_graph_and_eager_modes
class ReduceWeightedLogSumExp(test_case.TestCase):
def _reduce_weighted_logsumexp(self, logx, w, axis, keep_dims=False):
m = np.max(logx, axis=axis, keepdims=True)
sum_ = np.sum(w * np.exp(logx - m), axis=axis, keepdims=keep_dims)
sgn = np.sign(sum_)
if not keep_dims:
m = np.squeeze(m, axis=axis)
return m + np.log(sgn * sum_), sgn
def testNoWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
logx = tf.constant(logx_)
with tf.GradientTape() as tape:
tape.watch(logx)
expected = tf.reduce_logsumexp(input_tensor=logx, axis=-1)
grad_expected = tape.gradient(expected, logx)
with tf.GradientTape() as tape:
tape.watch(logx)
actual, actual_sgn = tfp.math.reduce_weighted_logsumexp(
logx, axis=-1, return_sign=True)
grad_actual = tape.gradient(actual, logx)
[
actual_,
actual_sgn_,
grad_actual_,
expected_,
grad_expected_,
] = self.evaluate([
actual,
actual_sgn,
grad_actual,
expected,
grad_expected,
])
self.assertAllEqual(expected_, actual_)
self.assertAllEqual(grad_expected_, grad_actual_)
self.assertAllEqual([1., 1, 1], actual_sgn_)
def testNegativeWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(logx_, w_, axis=-1)
logx = tf.constant(logx_)
w = tf.constant(w_)
actual, actual_sgn = tfp.math.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True)
actual_, actual_sgn_ = self.evaluate([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([-1., -1, 1], actual_sgn_)
def testKeepDims(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(
logx_, w_, axis=-1, keep_dims=True)
logx = tf.constant(logx_)
w = tf.constant(w_)
actual, actual_sgn = tfp.math.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True, keep_dims=True)
actual_, actual_sgn_ = self.evaluate([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([[-1.], [-1], [1]], actual_sgn_)
def testDocString(self):
"""This test verifies the correctness of the docstring examples."""
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
self.assertAllClose(
np.log(4),
self.evaluate(tfp.math.reduce_weighted_logsumexp(x, w)))
with np.errstate(divide='ignore'):
self.assertAllClose(
np.log([0, 2, 2]),
self.evaluate(
tfp.math.reduce_weighted_logsumexp(x, w, axis=0)))
self.assertAllClose(
np.log([1, 3]),
self.evaluate(
tfp.math.reduce_weighted_logsumexp(x, w, axis=1)))
self.assertAllClose(
np.log([[1], [3]]),
self.evaluate(
tfp.math.reduce_weighted_logsumexp(
x, w, axis=1, keep_dims=True)))
self.assertAllClose(
np.log(4),
self.evaluate(
tfp.math.reduce_weighted_logsumexp(x, w, axis=[0, 1])))
@test_util.run_all_in_graph_and_eager_modes
class SoftThresholdTest(test_case.TestCase, parameterized.TestCase):
dtype = tf.float32
# Expected values computed using arbitrary precision.
# pyformat: disable
# pylint: disable=bad-whitespace
@parameterized.parameters(
# x threshold expected_y expected_dydx
(5., 5., 0., 1.),
(2., 5., 0., 0.),
(-2., 5., 0., 0.),
(3., 2.5, 0.5, 1.),
(-3., 2.5, -0.5, 1.),
(-1., 1., 0., 1.),
(-6., 5., -1., 1.),
(0., 0., 0., 0.),
)
# pylint: enable=bad-whitespace
# pyformat: enable
def test_soft_threshold(self, x, threshold, expected_y, expected_dydx):
x = tf.convert_to_tensor(x, dtype=self.dtype)
y, dydx = tfp.math.value_and_gradient(
lambda x_: tfp.math.soft_threshold(x_, threshold), x)
y_, dydx_ = self.evaluate([y, dydx])
self.assertAllClose(expected_y, y_)
self.assertAllClose(expected_dydx, dydx_)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/kernel_tests/softplus_op_test.py
# once TF core is accepting new ops.
@test_util.run_all_in_graph_and_eager_modes
class SoftplusInverseTest(test_case.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_features = np.asarray(np_features)
np_softplus = self._npSoftplus(np_features)
softplus = tf.math.softplus(np_features)
softplus_inverse = tfp.math.softplus_inverse(softplus)
[tf_softplus, tf_softplus_inverse] = self.evaluate([
softplus, softplus_inverse])
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
rtol = {'float16': 0.07, 'float32': 0.003, 'float64': 0.002}.get(
str(np_features.dtype), 1e-6)
# This will test that we correctly computed the inverse by verifying we
# recovered the original input.
self.assertAllCloseAccordingToType(
np_features, tf_softplus_inverse,
atol=0., rtol=rtol)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
tf_softplus > 0)
self.assertShapeEqual(np_softplus, softplus)
self.assertShapeEqual(np_softplus, softplus_inverse)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
np.isfinite(tf_softplus))
self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
np.isfinite(tf_softplus_inverse))
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)
upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=False)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps,
log_eps - one,
log_eps + one,
log_eps - ten,
log_eps + ten,
-log_eps,
-log_eps - one,
-log_eps + one,
-log_eps - ten,
-log_eps + ten,
],
use_gpu=False)
self._testSoftplus(
[
log_eps,
log_eps - one,
log_eps + one,
log_eps - ten,
log_eps + ten - log_eps,
-log_eps - one,
-log_eps + one,
-log_eps - ten,
-log_eps + ten,
],
use_gpu=True)
def testGradient(self):
x = tf.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name='x')
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(tf.math.softplus, [x]))
tf1.logging.vlog(2, 'softplus (float) gradient err = ', err)
self.assertLess(err, 1e-4)
def testInverseSoftplusGradientNeverNan(self):
# Note that this range contains both zero and inf.
x = tf.constant(np.logspace(-8, 6).astype(np.float16))
_, grads = self.evaluate(tfp.math.value_and_gradient(
tfp.math.softplus_inverse, x))
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))
def testInverseSoftplusGradientFinite(self):
# This range of x is all finite, and so is 1 / x. So the
# gradient and its approximations should be finite as well.
x = tf.constant(np.logspace(-4.8, 4.5).astype(np.float16))
_, grads = self.evaluate(tfp.math.value_and_gradient(
tfp.math.softplus_inverse, x))
# Equivalent to `assertAllTrue` (if it existed).
self.assertAllEqual(
np.ones_like(grads).astype(np.bool), np.isfinite(grads))
@test_util.run_all_in_graph_and_eager_modes
class LogAddExp(test_case.TestCase):
def test_small(self):
x = [-2, -1000]
y = [-1000, -3]
z, g = self.evaluate(
tfp.math.value_and_gradient(tfp.math.log_add_exp, [x, y]))
self.assertAllClose([-2., -3.], z, atol=0., rtol=1e-5)
self.assertAllEqual(np.eye(2), g)
def test_medium(self):
x = [-2, -3]
y = [-3, 2]
z, g = self.evaluate(
tfp.math.value_and_gradient(tfp.math.log_add_exp, [x, y]))
self.assertAllClose(np.log(np.exp(x) + np.exp(y)), z, atol=0., rtol=1e-5)
self.assertAllNotNone(g)
def test_big(self):
x = [2, 1000]
y = [1000, 3]
z, g = self.evaluate(
tfp.math.value_and_gradient(tfp.math.log_add_exp, [x, y]))
self.assertAllClose([1000., 1000.], z, atol=0., rtol=1e-5)
self.assertAllEqual(1. - np.eye(2), g)
@test_util.run_all_in_graph_and_eager_modes
class LogSubExpTest(test_case.TestCase):
def testLogSubExp(self):
self.assertAllClose(-np.inf, self.evaluate(tfp.math.log_sub_exp(1., 1.)))
# Try log(exp(-1000) - (exp(-1000) + 2)
# log(e^-k / 2) = log(e^-k) - log(2), or
# = log(e^-k - .5*e^-k)
# = log(e^-k - e^(-k + log(.5)))
self.assertAllClose(
-1000. - np.log(2.),
self.evaluate(tfp.math.log_sub_exp(-1000., -1000. + np.log(.5))))
def test_small(self):
x = [-2]
y = [-1000]
z, g = self.evaluate(
tfp.math.value_and_gradient(tfp.math.log_sub_exp, [x, y]))
self.assertAllClose([-2.], z, atol=0., rtol=1e-5)
self.assertAllClose([[1.], [0.]], g)
def test_medium(self):
x = [-2, -3, -5, -3]
y = [-3, -5, -3, -2]
z, g = self.evaluate(
tfp.math.value_and_gradient(tfp.math.log_sub_exp, [x, y]))
self.assertAllClose(np.log(np.abs(np.exp(x) - np.exp(y))), z,
atol=0., rtol=1e-5)
self.assertAllEqual([1., 1, -1, -1],
tfp.math.log_sub_exp(x, y, return_sign=True)[1])
self.assertAllNotNone(g)
def test_big(self):
x = [1000, -3]
y = [2, 1000]
z, g = self.evaluate(
tfp.math.value_and_gradient(tfp.math.log_sub_exp, [x, y]))
self.assertAllClose([1000., 1000.], z, atol=0., rtol=1e-5)
self.assertAllEqual([[1., 0.], [0., 1.]], g)
self.assertAllEqual([1., -1.],
tfp.math.log_sub_exp(x, y, return_sign=True)[1])
@test_util.run_all_in_graph_and_eager_modes
class Log1mexpTest(test_case.TestCase):
def testLog1mexp(self):
self.assertAllClose(-np.inf, self.evaluate(tfp.math.log1mexp(0.)))
self.assertAllClose(0., self.evaluate(tfp.math.log1mexp(np.inf)))
x = np.linspace(0.1, 20, 100)
self.assertAllClose(
np.log(-np.expm1(-x)), self.evaluate(tfp.math.log1mexp(x)))
x = np.linspace(-20., -0.1, 100)
self.assertAllClose(
np.log(-np.expm1(x)), self.evaluate(tfp.math.log1mexp(x)))
@test_util.run_all_in_graph_and_eager_modes
class Smootherstep(test_case.TestCase):
def test_value_vector(self):
x = tf.constant([-np.inf, -20., 0., 0.5, 1., 20., np.inf])
y, _ = tfp.math.value_and_gradient(tfp.math.smootherstep, x)
self.assertAllEqual([7], y.shape)
y_ = self.evaluate(y)
self.assertAllClose([0., 0., 0., 0.5, 1., 1., 1.], y_, atol=1e-5, rtol=1e-5)
def test_gradient_matrix(self):
x = tf.constant([[-np.inf, -20., 0., 0.5],
[np.inf, 20., 1., 0.5]])
_, g = tfp.math.value_and_gradient(tfp.math.smootherstep, x)
self.assertAllEqual([2, 4], g.shape)
g_ = self.evaluate(g)
self.assertAllClose([[0., 0., 0., 1.875]] * 2, g_, atol=1e-5, rtol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class SoftSortingMatrixTest(parameterized.TestCase, test_case.TestCase):
# By applying an argmax on each column of the generated matrix,
# we should recover an argsort. This is an invariant with respect
# to temperature.
@parameterized.parameters(
{'shape': (4,), 'temperature': 1e2},
{'shape': (4,), 'temperature': 1e1},
{'shape': (4,), 'temperature': 1e0},
{'shape': (4,), 'temperature': 1e-1},
{'shape': (5, 5, 4), 'temperature': 1e2},
{'shape': (5, 5, 4), 'temperature': 1e1},
{'shape': (5, 5, 4), 'temperature': 1e0},
{'shape': (5, 5, 4), 'temperature': 1e-1},
)
def testMatchesArgsort(self, shape, temperature):
x = np.random.randn(*shape)
# We sort in decreasing order.
expected_sort = np.flip(np.argsort(x, axis=-1), axis=-1)
soft_sort_permutation_ = self.evaluate(
tfp.math.soft_sorting_matrix(x=x, temperature=temperature))
# Check that the rows sum to 1.
self.assertAllClose(np.ones(shape), np.sum(soft_sort_permutation_, axis=-1))
# Check non-negativity.
self.assertTrue(np.all(soft_sort_permutation_ >= 0.))
# Check that by applying an argmax on the columns we actually get
# the indices that correspond to the argsort.
actual_sort_ = np.argmax(soft_sort_permutation_, axis=-1)
self.assertAllClose(expected_sort, actual_sort_)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow_probability.math.softplus_inverse",
"numpy.log",
"numpy.logaddexp",
"numpy.argsort",
"numpy.array",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"tensorflow_probability.math.log1mexp",
"tensorflow.compat.v2.reduce_logsumexp",
"numpy.isfinite",
"tensorflow.compat.v1.log... | [((6026, 6241), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(5.0, 5.0, 0.0, 1.0)', '(2.0, 5.0, 0.0, 0.0)', '(-2.0, 5.0, 0.0, 0.0)', '(3.0, 2.5, 0.5, 1.0)', '(-3.0, 2.5, -0.5, 1.0)', '(-1.0, 1.0, 0.0, 1.0)', '(-6.0, 5.0, -1.0, 1.0)', '(0.0, 0.0, 0.0, 0.0)'], {}), '((5.0, 5.0, 0.0, 1.0), (2.0, 5.0, 0.0, 0.0), (-2.0,\n 5.0, 0.0, 0.0), (3.0, 2.5, 0.5, 1.0), (-3.0, 2.5, -0.5, 1.0), (-1.0, \n 1.0, 0.0, 1.0), (-6.0, 5.0, -1.0, 1.0), (0.0, 0.0, 0.0, 0.0))\n', (6050, 6241), False, 'from absl.testing import parameterized\n'), ((14750, 15114), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'shape': (4,), 'temperature': 100.0}", "{'shape': (4,), 'temperature': 10.0}", "{'shape': (4,), 'temperature': 1.0}", "{'shape': (4,), 'temperature': 0.1}", "{'shape': (5, 5, 4), 'temperature': 100.0}", "{'shape': (5, 5, 4), 'temperature': 10.0}", "{'shape': (5, 5, 4), 'temperature': 1.0}", "{'shape': (5, 5, 4), 'temperature': 0.1}"], {}), "({'shape': (4,), 'temperature': 100.0}, {'shape': (\n 4,), 'temperature': 10.0}, {'shape': (4,), 'temperature': 1.0}, {\n 'shape': (4,), 'temperature': 0.1}, {'shape': (5, 5, 4), 'temperature':\n 100.0}, {'shape': (5, 5, 4), 'temperature': 10.0}, {'shape': (5, 5, 4),\n 'temperature': 1.0}, {'shape': (5, 5, 4), 'temperature': 0.1})\n", (14774, 15114), False, 'from absl.testing import parameterized\n'), ((15907, 15921), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (15919, 15921), True, 'import tensorflow.compat.v2 as tf\n'), ((1596, 1625), 'numpy.array', 'np.array', (['n'], {'dtype': 'np.float32'}), '(n, dtype=np.float32)\n', (1604, 1625), True, 'import numpy as np\n'), ((1692, 1728), 'tensorflow_probability.math.log_combinations', 'tfp.math.log_combinations', (['n', 'counts'], {}), '(n, counts)\n', (1717, 1728), True, 'import tensorflow_probability as tfp\n'), ((1927, 1956), 'numpy.array', 'np.array', (['n'], {'dtype': 'np.float32'}), '(n, dtype=np.float32)\n', (1935, 1956), True, 'import numpy as np\n'), ((2073, 2109), 'tensorflow_probability.math.log_combinations', 'tfp.math.log_combinations', (['n', 'counts'], {}), '(n, counts)\n', (2098, 2109), True, 'import tensorflow_probability as tfp\n'), ((2334, 2372), 'numpy.max', 'np.max', (['logx'], {'axis': 'axis', 'keepdims': '(True)'}), '(logx, axis=axis, keepdims=True)\n', (2340, 2372), True, 'import numpy as np\n'), ((2454, 2467), 'numpy.sign', 'np.sign', (['sum_'], {}), '(sum_)\n', (2461, 2467), True, 'import numpy as np\n'), ((2604, 2662), 'numpy.array', 'np.array', (['[[0.0, -1, 1000.0], [0, 1, -1000.0], [-5, 0, 5]]'], {}), '([[0.0, -1, 1000.0], [0, 1, -1000.0], [-5, 0, 5]])\n', (2612, 2662), True, 'import numpy as np\n'), ((2715, 2733), 'tensorflow.compat.v2.constant', 'tf.constant', (['logx_'], {}), '(logx_)\n', (2726, 2733), True, 'import tensorflow.compat.v2 as tf\n'), ((3550, 3608), 'numpy.array', 'np.array', (['[[0.0, -1, 1000.0], [0, 1, -1000.0], [-5, 0, 5]]'], {}), '([[0.0, -1, 1000.0], [0, 1, -1000.0], [-5, 0, 5]])\n', (3558, 3608), True, 'import numpy as np\n'), ((3659, 3706), 'numpy.array', 'np.array', (['[[1.0, 1, -1], [1, -2, 1], [1, 0, 1]]'], {}), '([[1.0, 1, -1], [1, -2, 1], [1, 0, 1]])\n', (3667, 3706), True, 'import numpy as np\n'), ((3825, 3843), 'tensorflow.compat.v2.constant', 'tf.constant', (['logx_'], {}), '(logx_)\n', (3836, 3843), True, 'import tensorflow.compat.v2 as tf\n'), ((3852, 3867), 'tensorflow.compat.v2.constant', 'tf.constant', (['w_'], {}), '(w_)\n', (3863, 3867), True, 'import tensorflow.compat.v2 as tf\n'), ((3893, 3963), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['logx', 'w'], {'axis': '(-1)', 'return_sign': '(True)'}), '(logx, w, axis=-1, return_sign=True)\n', (3927, 3963), True, 'import tensorflow_probability as tfp\n'), ((4169, 4227), 'numpy.array', 'np.array', (['[[0.0, -1, 1000.0], [0, 1, -1000.0], [-5, 0, 5]]'], {}), '([[0.0, -1, 1000.0], [0, 1, -1000.0], [-5, 0, 5]])\n', (4177, 4227), True, 'import numpy as np\n'), ((4278, 4325), 'numpy.array', 'np.array', (['[[1.0, 1, -1], [1, -2, 1], [1, 0, 1]]'], {}), '([[1.0, 1, -1], [1, -2, 1], [1, 0, 1]])\n', (4286, 4325), True, 'import numpy as np\n'), ((4469, 4487), 'tensorflow.compat.v2.constant', 'tf.constant', (['logx_'], {}), '(logx_)\n', (4480, 4487), True, 'import tensorflow.compat.v2 as tf\n'), ((4496, 4511), 'tensorflow.compat.v2.constant', 'tf.constant', (['w_'], {}), '(w_)\n', (4507, 4511), True, 'import tensorflow.compat.v2 as tf\n'), ((4537, 4627), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['logx', 'w'], {'axis': '(-1)', 'return_sign': '(True)', 'keep_dims': '(True)'}), '(logx, w, axis=-1, return_sign=True,\n keep_dims=True)\n', (4571, 4627), True, 'import tensorflow_probability as tfp\n'), ((4905, 4942), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 0, 0], [0, 0, 0]]'], {}), '([[0.0, 0, 0], [0, 0, 0]])\n', (4916, 4942), True, 'import tensorflow.compat.v2 as tf\n'), ((4972, 5010), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[-1.0, 1, 1], [1, 1, 1]]'], {}), '([[-1.0, 1, 1], [1, 1, 1]])\n', (4983, 5010), True, 'import tensorflow.compat.v2 as tf\n'), ((6444, 6485), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'self.dtype'}), '(x, dtype=self.dtype)\n', (6464, 6485), True, 'import tensorflow.compat.v2 as tf\n'), ((7003, 7026), 'numpy.asarray', 'np.asarray', (['np_features'], {}), '(np_features)\n', (7013, 7026), True, 'import numpy as np\n'), ((7089, 7120), 'numpy.logaddexp', 'np.logaddexp', (['zero', 'np_features'], {}), '(zero, np_features)\n', (7101, 7120), True, 'import numpy as np\n'), ((7195, 7218), 'numpy.asarray', 'np.asarray', (['np_features'], {}), '(np_features)\n', (7205, 7218), True, 'import numpy as np\n'), ((7282, 7311), 'tensorflow.compat.v2.math.softplus', 'tf.math.softplus', (['np_features'], {}), '(np_features)\n', (7298, 7311), True, 'import tensorflow.compat.v2 as tf\n'), ((7335, 7370), 'tensorflow_probability.math.softplus_inverse', 'tfp.math.softplus_inverse', (['softplus'], {}), '(softplus)\n', (7360, 7370), True, 'import tensorflow_probability as tfp\n'), ((9669, 9766), 'tensorflow.compat.v2.constant', 'tf.constant', (['[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]'], {'shape': '[2, 5]', 'name': '"""x"""'}), "([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9], shape=\n [2, 5], name='x')\n", (9680, 9766), True, 'import tensorflow.compat.v2 as tf\n'), ((9902, 9962), 'tensorflow.compat.v1.logging.vlog', 'tf1.logging.vlog', (['(2)', '"""softplus (float) gradient err = """', 'err'], {}), "(2, 'softplus (float) gradient err = ', err)\n", (9918, 9962), True, 'import tensorflow.compat.v1 as tf1\n'), ((13496, 13521), 'numpy.linspace', 'np.linspace', (['(0.1)', '(20)', '(100)'], {}), '(0.1, 20, 100)\n', (13507, 13521), True, 'import numpy as np\n'), ((13624, 13653), 'numpy.linspace', 'np.linspace', (['(-20.0)', '(-0.1)', '(100)'], {}), '(-20.0, -0.1, 100)\n', (13635, 13653), True, 'import numpy as np\n'), ((13871, 13929), 'tensorflow.compat.v2.constant', 'tf.constant', (['[-np.inf, -20.0, 0.0, 0.5, 1.0, 20.0, np.inf]'], {}), '([-np.inf, -20.0, 0.0, 0.5, 1.0, 20.0, np.inf])\n', (13882, 13929), True, 'import tensorflow.compat.v2 as tf\n'), ((13937, 13990), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.smootherstep', 'x'], {}), '(tfp.math.smootherstep, x)\n', (13964, 13990), True, 'import tensorflow_probability as tfp\n'), ((14179, 14246), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[-np.inf, -20.0, 0.0, 0.5], [np.inf, 20.0, 1.0, 0.5]]'], {}), '([[-np.inf, -20.0, 0.0, 0.5], [np.inf, 20.0, 1.0, 0.5]])\n', (14190, 14246), True, 'import tensorflow.compat.v2 as tf\n'), ((14275, 14328), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.smootherstep', 'x'], {}), '(tfp.math.smootherstep, x)\n', (14302, 14328), True, 'import tensorflow_probability as tfp\n'), ((15206, 15229), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (15221, 15229), True, 'import numpy as np\n'), ((15781, 15823), 'numpy.argmax', 'np.argmax', (['soft_sort_permutation_'], {'axis': '(-1)'}), '(soft_sort_permutation_, axis=-1)\n', (15790, 15823), True, 'import numpy as np\n'), ((1563, 1585), 'scipy.special.binom', 'sp_special.binom', (['n', 'k'], {}), '(n, k)\n', (1579, 1585), True, 'from scipy import special as sp_special\n'), ((2500, 2524), 'numpy.squeeze', 'np.squeeze', (['m'], {'axis': 'axis'}), '(m, axis=axis)\n', (2510, 2524), True, 'import numpy as np\n'), ((2743, 2760), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2758, 2760), True, 'import tensorflow.compat.v2 as tf\n'), ((2810, 2857), 'tensorflow.compat.v2.reduce_logsumexp', 'tf.reduce_logsumexp', ([], {'input_tensor': 'logx', 'axis': '(-1)'}), '(input_tensor=logx, axis=-1)\n', (2829, 2857), True, 'import tensorflow.compat.v2 as tf\n'), ((2917, 2934), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2932, 2934), True, 'import tensorflow.compat.v2 as tf\n'), ((2994, 3061), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['logx'], {'axis': '(-1)', 'return_sign': '(True)'}), '(logx, axis=-1, return_sign=True)\n', (3028, 3061), True, 'import tensorflow_probability as tfp\n'), ((5065, 5074), 'numpy.log', 'np.log', (['(4)'], {}), '(4)\n', (5071, 5074), True, 'import numpy as np\n'), ((5151, 5179), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (5162, 5179), True, 'import numpy as np\n'), ((5361, 5375), 'numpy.log', 'np.log', (['[1, 3]'], {}), '([1, 3])\n', (5367, 5375), True, 'import numpy as np\n'), ((5497, 5515), 'numpy.log', 'np.log', (['[[1], [3]]'], {}), '([[1], [3]])\n', (5503, 5515), True, 'import numpy as np\n'), ((5670, 5679), 'numpy.log', 'np.log', (['(4)'], {}), '(4)\n', (5676, 5679), True, 'import numpy as np\n'), ((8167, 8191), 'numpy.isfinite', 'np.isfinite', (['tf_softplus'], {}), '(tf_softplus)\n', (8178, 8191), True, 'import numpy as np\n'), ((8292, 8324), 'numpy.isfinite', 'np.isfinite', (['tf_softplus_inverse'], {}), '(tf_softplus_inverse)\n', (8303, 8324), True, 'import numpy as np\n'), ((10187, 10244), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.softplus_inverse', 'x'], {}), '(tfp.math.softplus_inverse, x)\n', (10214, 10244), True, 'import tensorflow_probability as tfp\n'), ((10371, 10386), 'numpy.isnan', 'np.isnan', (['grads'], {}), '(grads)\n', (10379, 10386), True, 'import numpy as np\n'), ((10654, 10711), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.softplus_inverse', 'x'], {}), '(tfp.math.softplus_inverse, x)\n', (10681, 10711), True, 'import tensorflow_probability as tfp\n'), ((10845, 10863), 'numpy.isfinite', 'np.isfinite', (['grads'], {}), '(grads)\n', (10856, 10863), True, 'import numpy as np\n'), ((11047, 11104), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.log_add_exp', '[x, y]'], {}), '(tfp.math.log_add_exp, [x, y])\n', (11074, 11104), True, 'import tensorflow_probability as tfp\n'), ((11189, 11198), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11195, 11198), True, 'import numpy as np\n'), ((11296, 11353), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.log_add_exp', '[x, y]'], {}), '(tfp.math.log_add_exp, [x, y])\n', (11323, 11353), True, 'import tensorflow_probability as tfp\n'), ((11555, 11612), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.log_add_exp', '[x, y]'], {}), '(tfp.math.log_add_exp, [x, y])\n', (11582, 11612), True, 'import tensorflow_probability as tfp\n'), ((12284, 12341), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.log_sub_exp', '[x, y]'], {}), '(tfp.math.log_sub_exp, [x, y])\n', (12311, 12341), True, 'import tensorflow_probability as tfp\n'), ((12548, 12605), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.log_sub_exp', '[x, y]'], {}), '(tfp.math.log_sub_exp, [x, y])\n', (12575, 12605), True, 'import tensorflow_probability as tfp\n'), ((12954, 13011), 'tensorflow_probability.math.value_and_gradient', 'tfp.math.value_and_gradient', (['tfp.math.log_sub_exp', '[x, y]'], {}), '(tfp.math.log_sub_exp, [x, y])\n', (12981, 13011), True, 'import tensorflow_probability as tfp\n'), ((15293, 15315), 'numpy.argsort', 'np.argsort', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (15303, 15315), True, 'import numpy as np\n'), ((15378, 15436), 'tensorflow_probability.math.soft_sorting_matrix', 'tfp.math.soft_sorting_matrix', ([], {'x': 'x', 'temperature': 'temperature'}), '(x=x, temperature=temperature)\n', (15406, 15436), True, 'import tensorflow_probability as tfp\n'), ((15498, 15512), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (15505, 15512), True, 'import numpy as np\n'), ((15514, 15553), 'numpy.sum', 'np.sum', (['soft_sort_permutation_'], {'axis': '(-1)'}), '(soft_sort_permutation_, axis=-1)\n', (15520, 15553), True, 'import numpy as np\n'), ((15603, 15640), 'numpy.all', 'np.all', (['(soft_sort_permutation_ >= 0.0)'], {}), '(soft_sort_permutation_ >= 0.0)\n', (15609, 15640), True, 'import numpy as np\n'), ((2395, 2411), 'numpy.exp', 'np.exp', (['(logx - m)'], {}), '(logx - m)\n', (2401, 2411), True, 'import numpy as np\n'), ((2540, 2558), 'numpy.log', 'np.log', (['(sgn * sum_)'], {}), '(sgn * sum_)\n', (2546, 2558), True, 'import numpy as np\n'), ((5098, 5138), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['x', 'w'], {}), '(x, w)\n', (5132, 5138), True, 'import tensorflow_probability as tfp\n'), ((5218, 5235), 'numpy.log', 'np.log', (['[0, 2, 2]'], {}), '([0, 2, 2])\n', (5224, 5235), True, 'import numpy as np\n'), ((5412, 5460), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['x', 'w'], {'axis': '(1)'}), '(x, w, axis=1)\n', (5446, 5460), True, 'import tensorflow_probability as tfp\n'), ((5552, 5616), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['x', 'w'], {'axis': '(1)', 'keep_dims': '(True)'}), '(x, w, axis=1, keep_dims=True)\n', (5586, 5616), True, 'import tensorflow_probability as tfp\n'), ((5716, 5769), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['x', 'w'], {'axis': '[0, 1]'}), '(x, w, axis=[0, 1])\n', (5750, 5769), True, 'import tensorflow_probability as tfp\n'), ((6548, 6586), 'tensorflow_probability.math.soft_threshold', 'tfp.math.soft_threshold', (['x_', 'threshold'], {}), '(x_, threshold)\n', (6571, 6586), True, 'import tensorflow_probability as tfp\n'), ((7038, 7051), 'numpy.asarray', 'np.asarray', (['(0)'], {}), '(0)\n', (7048, 7051), True, 'import numpy as np\n'), ((9837, 9896), 'tensorflow.python.ops.gradient_checker_v2.compute_gradient', 'gradient_checker_v2.compute_gradient', (['tf.math.softplus', '[x]'], {}), '(tf.math.softplus, [x])\n', (9873, 9896), False, 'from tensorflow.python.ops import gradient_checker_v2\n'), ((11706, 11715), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11712, 11715), True, 'import numpy as np\n'), ((11882, 11912), 'tensorflow_probability.math.log_sub_exp', 'tfp.math.log_sub_exp', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (11902, 11912), True, 'import tensorflow_probability as tfp\n'), ((12110, 12121), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (12116, 12121), True, 'import numpy as np\n'), ((12782, 12826), 'tensorflow_probability.math.log_sub_exp', 'tfp.math.log_sub_exp', (['x', 'y'], {'return_sign': '(True)'}), '(x, y, return_sign=True)\n', (12802, 12826), True, 'import tensorflow_probability as tfp\n'), ((13184, 13228), 'tensorflow_probability.math.log_sub_exp', 'tfp.math.log_sub_exp', (['x', 'y'], {'return_sign': '(True)'}), '(x, y, return_sign=True)\n', (13204, 13228), True, 'import tensorflow_probability as tfp\n'), ((13393, 13415), 'tensorflow_probability.math.log1mexp', 'tfp.math.log1mexp', (['(0.0)'], {}), '(0.0)\n', (13410, 13415), True, 'import tensorflow_probability as tfp\n'), ((13459, 13484), 'tensorflow_probability.math.log1mexp', 'tfp.math.log1mexp', (['np.inf'], {}), '(np.inf)\n', (13476, 13484), True, 'import tensorflow_probability as tfp\n'), ((13592, 13612), 'tensorflow_probability.math.log1mexp', 'tfp.math.log1mexp', (['x'], {}), '(x)\n', (13609, 13612), True, 'import tensorflow_probability as tfp\n'), ((13722, 13742), 'tensorflow_probability.math.log1mexp', 'tfp.math.log1mexp', (['x'], {}), '(x)\n', (13739, 13742), True, 'import tensorflow_probability as tfp\n'), ((5276, 5324), 'tensorflow_probability.math.reduce_weighted_logsumexp', 'tfp.math.reduce_weighted_logsumexp', (['x', 'w'], {'axis': '(0)'}), '(x, w, axis=0)\n', (5310, 5324), True, 'import tensorflow_probability as tfp\n'), ((7884, 7909), 'numpy.ones_like', 'np.ones_like', (['tf_softplus'], {}), '(tf_softplus)\n', (7896, 7909), True, 'import numpy as np\n'), ((8100, 8125), 'numpy.ones_like', 'np.ones_like', (['tf_softplus'], {}), '(tf_softplus)\n', (8112, 8125), True, 'import numpy as np\n'), ((8217, 8250), 'numpy.ones_like', 'np.ones_like', (['tf_softplus_inverse'], {}), '(tf_softplus_inverse)\n', (8229, 8250), True, 'import numpy as np\n'), ((8879, 8890), 'numpy.finfo', 'np.finfo', (['t'], {}), '(t)\n', (8887, 8890), True, 'import numpy as np\n'), ((10119, 10137), 'numpy.logspace', 'np.logspace', (['(-8)', '(6)'], {}), '(-8, 6)\n', (10130, 10137), True, 'import numpy as np\n'), ((10333, 10353), 'numpy.zeros_like', 'np.zeros_like', (['grads'], {}), '(grads)\n', (10346, 10353), True, 'import numpy as np\n'), ((10582, 10604), 'numpy.logspace', 'np.logspace', (['(-4.8)', '(4.5)'], {}), '(-4.8, 4.5)\n', (10593, 10604), True, 'import numpy as np\n'), ((10808, 10827), 'numpy.ones_like', 'np.ones_like', (['grads'], {}), '(grads)\n', (10820, 10827), True, 'import numpy as np\n'), ((11386, 11395), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (11392, 11395), True, 'import numpy as np\n'), ((11398, 11407), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (11404, 11407), True, 'import numpy as np\n'), ((13563, 13575), 'numpy.expm1', 'np.expm1', (['(-x)'], {}), '(-x)\n', (13571, 13575), True, 'import numpy as np\n'), ((13694, 13705), 'numpy.expm1', 'np.expm1', (['x'], {}), '(x)\n', (13702, 13705), True, 'import numpy as np\n'), ((12182, 12193), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (12188, 12193), True, 'import numpy as np\n'), ((12645, 12654), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (12651, 12654), True, 'import numpy as np\n'), ((12657, 12666), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (12663, 12666), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-agents suite for loading Adversarial environments.
Adds two new functions: reset_agent, and step_adversary in addition to usual
RL env functions. Therefore we have the following environment functions:
env.reset(): completely resets the environment and removes anything the
adversary has built.
env.reset_agent(): resets the position of the agent, but does not
remove the obstacles the adversary has created when building the env.
env.step(): steps the agent as before in the environment. i.e. if the agent
passes action 'left' it will move left.
env.step_adversary(): processes an adversary action, which involves choosing
the location of the agent, goal, or an obstacle.
Adds additional functions for logging metrics related to the generated
environments, like the shortest path length to the goal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import gym
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.environments import batched_py_environment
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from tf_agents.environments import wrappers
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts_lib
from tf_agents.utils import nest_utils
@gin.configurable
def load(environment_name,
discount=1.0,
max_episode_steps=None,
gym_env_wrappers=(),
env_wrappers=(),
spec_dtype_map=None,
gym_kwargs=None,
auto_reset=True):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a TimeLimit wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name: Name for the environment to load.
discount: Discount to use for the environment.
max_episode_steps: If None the max_episode_steps will be set to the default
step limit defined in the environment's spec. No limit is applied if set
to 0 or if there is no max_episode_steps set in the environment's spec.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
env_wrappers: Iterable with references to wrapper classes to use on the
gym_wrapped environment.
spec_dtype_map: A dict that maps gym specs to tf dtypes to use as the
default dtype for the tensors. An easy way to configure a custom
mapping through Gin is to define a gin-configurable function that returns
desired mapping and call it in your Gin config file, for example:
`suite_gym.load.spec_dtype_map = @get_custom_mapping()`.
gym_kwargs: The kwargs to pass to the Gym environment class.
auto_reset: If True (default), reset the environment automatically after a
terminal state is reached.
Returns:
A PyEnvironment instance.
"""
gym_kwargs = gym_kwargs if gym_kwargs else {}
gym_spec = gym.spec(environment_name)
gym_env = gym_spec.make(**gym_kwargs)
if max_episode_steps is None and gym_spec.max_episode_steps is not None:
max_episode_steps = gym_spec.max_episode_steps
for wrapper in gym_env_wrappers:
gym_env = wrapper(gym_env)
env = AdversarialGymWrapper(
gym_env,
discount=discount,
spec_dtype_map=spec_dtype_map,
auto_reset=auto_reset,
)
if max_episode_steps is not None and max_episode_steps > 0:
env = wrappers.TimeLimit(env, max_episode_steps)
for wrapper in env_wrappers:
env = wrapper(env)
return env
class AdversarialGymWrapper(gym_wrapper.GymWrapper):
"""Wrapper implementing PyEnvironment interface for adversarial environments.
Implements special reset_agent and step_adversary functions that are not
present in a normal Gym environment.
"""
def __init__(self,
gym_env,
discount=1.0,
spec_dtype_map=None,
match_obs_space_dtype=True,
auto_reset=False,
simplify_box_bounds=True):
super(AdversarialGymWrapper, self).__init__(
gym_env, discount, spec_dtype_map, match_obs_space_dtype, auto_reset,
simplify_box_bounds)
self.adversary_observation_spec = gym_wrapper.spec_from_gym_space(
self._gym_env.adversary_observation_space, name='observation')
self.adversary_action_spec = gym_wrapper.spec_from_gym_space(
self._gym_env.adversary_action_space, name='action')
self.adversary_time_step_spec = ts_lib.time_step_spec(
self.adversary_observation_spec, self.reward_spec())
self.adversary_flat_obs_spec = tf.nest.flatten(
self.adversary_observation_spec)
def _reset(self):
observation = self._gym_env.reset()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._adversary_to_obs_space_dtype(observation)
reset_step = ts_lib.restart(observation, reward_spec=self.reward_spec())
return reset_step
def reset_random(self):
observation = self._gym_env.reset_random()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
self._current_time_step = ts_lib.restart(
observation, reward_spec=self.reward_spec())
return self._current_time_step
def reset_agent(self):
observation = self._gym_env.reset_agent()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
self._current_time_step = ts_lib.restart(
observation, reward_spec=self.reward_spec())
return self._current_time_step
def _adversary_to_obs_space_dtype(self, observation):
# Make sure we handle cases where observations are provided as a list.
flat_obs = nest_utils.flatten_up_to(
self.adversary_observation_spec, observation)
matched_observations = []
for spec, obs in zip(self.adversary_flat_obs_spec, flat_obs):
matched_observations.append(np.asarray(obs, dtype=spec.dtype))
return tf.nest.pack_sequence_as(self.adversary_observation_spec,
matched_observations)
def _step(self, action):
# Automatically reset the environments on step if they need to be reset.
if self._auto_reset and self._done:
return self.reset_agent()
action = action.item() if self._action_is_discrete else action
observation, reward, self._done, self._info = self._gym_env.step(action)
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
reward = np.asarray(reward, dtype=self.reward_spec().dtype)
outer_dims = nest_utils.get_outer_array_shape(reward, self.reward_spec())
if self._done:
return ts_lib.termination(observation, reward, outer_dims=outer_dims)
else:
return ts_lib.transition(observation, reward, self._discount,
outer_dims=outer_dims)
def step_adversary(self, action):
action = action.item() if self._action_is_discrete else action
observation, reward, self._done, self._info = self._gym_env.step_adversary(
action)
if self._match_obs_space_dtype:
observation = self._adversary_to_obs_space_dtype(observation)
reward = np.asarray(reward, dtype=self.reward_spec().dtype)
outer_dims = nest_utils.get_outer_array_shape(reward, self.reward_spec())
if self._done:
return ts_lib.termination(observation, reward, outer_dims=outer_dims)
else:
return ts_lib.transition(observation, reward, self._discount,
outer_dims=outer_dims)
@gin.configurable
class AdversarialBatchedPyEnvironment(
batched_py_environment.BatchedPyEnvironment):
"""Batch together multiple adversarial py environments acting as single batch.
The environments should only access shared python variables using
shared mutex locks (from the threading module).
"""
def __init__(self, envs, multithreading=True):
super(AdversarialBatchedPyEnvironment, self).__init__(
envs, multithreading=multithreading)
self.adversary_action_spec = self._envs[0].adversary_action_spec
self.adversary_observation_spec = self._envs[0].adversary_observation_spec
self.adversary_time_step_spec = self._envs[0].adversary_time_step_spec
def get_num_blocks(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].n_clutter_placed, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.n_clutter_placed, tf.float32), self._envs)
def get_distance_to_goal(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].distance_to_goal, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.distance_to_goal, tf.float32), self._envs)
def get_deliberate_placement(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].deliberate_agent_placement, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.deliberate_agent_placement, tf.float32),
self._envs)
def get_goal_x(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].get_goal_x(), tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.get_goal_x(), tf.float32),
self._envs)
def get_goal_y(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].get_goal_y(), tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.get_goal_y(), tf.float32),
self._envs)
def get_passable(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].passable, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.passable, tf.float32),
self._envs)
def get_shortest_path_length(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].shortest_path_length, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.shortest_path_length, tf.float32),
self._envs)
def reset_agent(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(self._envs[0].reset_agent())
else:
time_steps = self._execute(lambda env: env.reset_agent(), self._envs)
return nest_utils.stack_nested_arrays(time_steps)
def reset_random(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(self._envs[0].reset_random())
else:
time_steps = self._execute(lambda env: env.reset_random(), self._envs)
return nest_utils.stack_nested_arrays(time_steps)
def step_adversary(self, actions):
if self._num_envs == 1:
actions = nest_utils.unbatch_nested_array(actions)
time_steps = self._envs[0].step_adversary(actions)
return nest_utils.batch_nested_array(time_steps)
else:
unstacked_actions = batched_py_environment.unstack_actions(actions)
if len(unstacked_actions) != self.batch_size:
raise ValueError(
'Primary dimension of action items does not match '
'batch size: %d vs. %d' % (len(unstacked_actions), self.batch_size))
time_steps = self._execute(
lambda env_action: env_action[0].step_adversary(env_action[1]),
zip(self._envs, unstacked_actions))
return nest_utils.stack_nested_arrays(time_steps)
class AdversarialTFPyEnvironment(tf_py_environment.TFPyEnvironment):
"""Override TFPyEnvironment to add support for additional adversary functions.
Note that the 'step' function resets the agent, but 'reset' resets the whole
environment. Therefore use 'reset_agent' to reset just the agent to its
initial location, without resetting the environment the adversary has created.
The time_step_spec and other specs relate to the agent's observations, and
there are additional specs for the adversarial policy that alters the
environment.
The adversary's specs should match the output of reset(), step_adversary(),
_current_time_step(), and self.time_step, while the agent's specs should
match reset_agent(), step(), _current_agent_time_step(), and
self._agent_time_step.
"""
def __init__(self, environment, check_dims=False, isolation=False):
"""Calls parent constructors and initializes adversary specs.
Args:
environment: A tf-agents PyEnvironment, or a `callable` that returns
an environment of this form.
check_dims: Whether the batch dimensions should be checked in the 'step'
function.
isolation: If True, create a dedicated thread for interactions with the
environment. If Falso, interactions with the environment occur within
whichever thread calls a method belonging to this class. See tf-agents
parent class documentation for more details.
"""
# Prevent parent class from using its own batched environment
super(AdversarialTFPyEnvironment, self).__init__(
environment, check_dims=check_dims, isolation=isolation)
if not environment.batched:
self._env = AdversarialBatchedPyEnvironment(
[environment], multithreading=not self._pool)
self._agent_time_step = None
self.adversary_action_spec = tensor_spec.from_spec(
self._env.adversary_action_spec)
self.adversary_time_step_spec = tensor_spec.from_spec(
self._env.adversary_time_step_spec)
self.adversary_observation_spec = tensor_spec.from_spec(
self._env.adversary_observation_spec)
self._adversary_time_step_dtypes = [
s.dtype for s in tf.nest.flatten(self.adversary_time_step_spec)
]
# Make sure this is called without conversion from tf.function.
@tf.autograph.experimental.do_not_convert()
def reset_agent(self):
def _reset_py():
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
self._agent_time_step = self._env.reset_agent()
def _isolated_reset_py():
return self._execute(_reset_py)
with tf.name_scope('reset_agent'):
reset_op = tf.numpy_function(
_isolated_reset_py,
[], # No inputs.
[],
name='reset_py_func')
with tf.control_dependencies([reset_op]):
return self._current_agent_time_step()
@tf.autograph.experimental.do_not_convert()
def _current_time_step(self):
def _current_time_step_py():
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
if self._time_step is None:
self._time_step = self._env.reset()
return tf.nest.flatten(self._time_step)
def _isolated_current_time_step_py():
return self._execute(_current_time_step_py)
with tf.name_scope('current_time_step'):
outputs = tf.numpy_function(
_isolated_current_time_step_py,
[], # No inputs.
self._time_step_dtypes,
name='current_time_step_py_func')
step_type, reward, discount = outputs[0:3]
flat_observations = outputs[3:]
return self._set_names_and_shapes(
self.adversary_time_step_spec, self.adversary_observation_spec,
step_type, reward, discount, *flat_observations)
@tf.autograph.experimental.do_not_convert()
def _current_agent_time_step(self):
def _current_agent_time_step_py():
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
if self._agent_time_step is None:
self._agent_time_step = self._env.reset_agent()
return tf.nest.flatten(self._agent_time_step)
def _isolated_current_agent_time_step_py():
return self._execute(_current_agent_time_step_py)
with tf.name_scope('current_agent_time_step'):
outputs = tf.numpy_function(
_isolated_current_agent_time_step_py,
[], # No inputs.
self._time_step_dtypes,
name='current_agent_time_step_py_func')
step_type, reward, discount = outputs[0:3]
flat_observations = outputs[3:]
return self._set_names_and_shapes(
self.time_step_spec(), self.observation_spec(),
step_type, reward, discount, *flat_observations)
@tf.autograph.experimental.do_not_convert()
def reset_random(self):
def _reset_py():
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
self._time_step = self._env.reset_random()
def _isolated_reset_py():
return self._execute(_reset_py)
with tf.name_scope('reset_random'):
reset_op = tf.numpy_function(
_isolated_reset_py,
[], # No inputs.
[],
name='reset_py_func')
with tf.control_dependencies([reset_op]):
return self._current_random_time_step()
@tf.autograph.experimental.do_not_convert()
def _current_random_time_step(self):
def _current_random_time_step_py():
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
if self._time_step is None:
self._time_step = self._env.reset_random()
return tf.nest.flatten(self._time_step)
def _isolated_current_random_time_step_py():
return self._execute(_current_random_time_step_py)
with tf.name_scope('current_random_time_step'):
outputs = tf.numpy_function(
_isolated_current_random_time_step_py,
[], # No inputs.
self._time_step_dtypes,
name='current_random_time_step_py_func')
step_type, reward, discount = outputs[0:3]
flat_observations = outputs[3:]
return self._set_names_and_shapes(
self.time_step_spec(), self.observation_spec(),
step_type, reward, discount, *flat_observations)
def _set_names_and_shapes(
self, ts_spec, obs_spec, step_type, reward, discount, *flat_observations):
"""Returns a `TimeStep` namedtuple."""
step_type = tf.identity(step_type, name='step_type')
reward = tf.identity(reward, name='reward')
discount = tf.identity(discount, name='discount')
batch_shape = () if not self.batched else (self.batch_size,)
batch_shape = tf.TensorShape(batch_shape)
if not tf.executing_eagerly():
# Shapes are not required in eager mode.
reward.set_shape(batch_shape.concatenate(ts_spec.reward.shape))
step_type.set_shape(batch_shape)
discount.set_shape(batch_shape)
# Give each tensor a meaningful name and set the static shape.
named_observations = []
for obs, spec in zip(flat_observations, tf.nest.flatten(obs_spec)):
named_observation = tf.identity(obs, name=spec.name)
if not tf.executing_eagerly():
named_observation.set_shape(batch_shape.concatenate(spec.shape))
named_observations.append(named_observation)
observations = tf.nest.pack_sequence_as(obs_spec, named_observations)
return ts_lib.TimeStep(step_type, reward, discount, observations)
# Make sure this is called without conversion from tf.function.
@tf.autograph.experimental.do_not_convert()
def _step(self, actions):
def _step_py(*flattened_actions):
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
packed = tf.nest.pack_sequence_as(
structure=self.action_spec(), flat_sequence=flattened_actions)
self._agent_time_step = self._env.step(packed)
return tf.nest.flatten(self._agent_time_step)
def _isolated_step_py(*flattened_actions):
return self._execute(_step_py, *flattened_actions)
with tf.name_scope('step'):
flat_actions = [tf.identity(x) for x in tf.nest.flatten(actions)]
if self._check_dims:
for action in flat_actions:
dim_value = tf.compat.dimension_value(action.shape[0])
if (action.shape.rank == 0 or
(dim_value is not None and dim_value != self.batch_size)):
raise ValueError(
'Expected actions whose major dimension is batch_size (%d), '
'but saw action with shape %s:\n %s' %
(self.batch_size, action.shape, action))
outputs = tf.numpy_function(
_isolated_step_py,
flat_actions,
self._time_step_dtypes,
name='step_py_func')
step_type, reward, discount = outputs[0:3]
flat_observations = outputs[3:]
return self._set_names_and_shapes(
self.time_step_spec(), self.observation_spec(),
step_type, reward, discount, *flat_observations)
# Make sure this is called without conversion from tf.function.
@tf.autograph.experimental.do_not_convert()
def step_adversary(self, actions):
def _step_adversary_py(*flattened_actions):
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
packed = tf.nest.pack_sequence_as(
structure=self.adversary_action_spec,
flat_sequence=flattened_actions)
self._time_step = self._env.step_adversary(packed)
return tf.nest.flatten(self._time_step)
def _isolated_step_adversary_py(*flattened_actions):
return self._execute(_step_adversary_py, *flattened_actions)
with tf.name_scope('step_adversary'):
flat_actions = [tf.identity(x) for x in tf.nest.flatten(actions)]
if self._check_dims:
for action in flat_actions:
dim_value = tf.compat.dimension_value(action.shape[0])
if (action.shape.rank == 0 or
(dim_value is not None and dim_value != self.batch_size)):
raise ValueError(
'Expected adversary actions whose major dimension is batch_size '
'(%d), but saw action with shape %s:\n %s' %
(self.batch_size, action.shape, action))
outputs = tf.numpy_function(
_isolated_step_adversary_py,
flat_actions,
self._adversary_time_step_dtypes,
name='step_adversary_py_func')
step_type, reward, discount = outputs[0:3]
flat_observations = outputs[3:]
return self._set_names_and_shapes(
self.adversary_time_step_spec, self.adversary_observation_spec,
step_type, reward, discount, *flat_observations)
| [
"tf_agents.environments.batched_py_environment.unstack_actions",
"tensorflow.autograph.experimental.do_not_convert",
"tensorflow.numpy_function",
"tf_agents.environments.wrappers.TimeLimit",
"tf_agents.utils.nest_utils.flatten_up_to",
"tf_agents.environments.tf_py_environment._check_not_called_concurrentl... | [((3695, 3721), 'gym.spec', 'gym.spec', (['environment_name'], {}), '(environment_name)\n', (3703, 3721), False, 'import gym\n'), ((14686, 14728), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (14726, 14728), True, 'import tensorflow as tf\n'), ((15287, 15329), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (15327, 15329), True, 'import tensorflow as tf\n'), ((16220, 16262), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (16260, 16262), True, 'import tensorflow as tf\n'), ((17203, 17245), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (17243, 17245), True, 'import tensorflow as tf\n'), ((17802, 17844), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (17842, 17844), True, 'import tensorflow as tf\n'), ((20027, 20069), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (20067, 20069), True, 'import tensorflow as tf\n'), ((21613, 21655), 'tensorflow.autograph.experimental.do_not_convert', 'tf.autograph.experimental.do_not_convert', ([], {}), '()\n', (21653, 21655), True, 'import tensorflow as tf\n'), ((4171, 4213), 'tf_agents.environments.wrappers.TimeLimit', 'wrappers.TimeLimit', (['env', 'max_episode_steps'], {}), '(env, max_episode_steps)\n', (4189, 4213), False, 'from tf_agents.environments import wrappers\n'), ((4964, 5062), 'tf_agents.environments.gym_wrapper.spec_from_gym_space', 'gym_wrapper.spec_from_gym_space', (['self._gym_env.adversary_observation_space'], {'name': '"""observation"""'}), "(self._gym_env.adversary_observation_space,\n name='observation')\n", (4995, 5062), False, 'from tf_agents.environments import gym_wrapper\n'), ((5101, 5190), 'tf_agents.environments.gym_wrapper.spec_from_gym_space', 'gym_wrapper.spec_from_gym_space', (['self._gym_env.adversary_action_space'], {'name': '"""action"""'}), "(self._gym_env.adversary_action_space, name=\n 'action')\n", (5132, 5190), False, 'from tf_agents.environments import gym_wrapper\n'), ((5350, 5398), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self.adversary_observation_spec'], {}), '(self.adversary_observation_spec)\n', (5365, 5398), True, 'import tensorflow as tf\n'), ((6559, 6629), 'tf_agents.utils.nest_utils.flatten_up_to', 'nest_utils.flatten_up_to', (['self.adversary_observation_spec', 'observation'], {}), '(self.adversary_observation_spec, observation)\n', (6583, 6629), False, 'from tf_agents.utils import nest_utils\n'), ((6816, 6895), 'tensorflow.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['self.adversary_observation_spec', 'matched_observations'], {}), '(self.adversary_observation_spec, matched_observations)\n', (6840, 6895), True, 'import tensorflow as tf\n'), ((14222, 14276), 'tf_agents.specs.tensor_spec.from_spec', 'tensor_spec.from_spec', (['self._env.adversary_action_spec'], {}), '(self._env.adversary_action_spec)\n', (14243, 14276), False, 'from tf_agents.specs import tensor_spec\n'), ((14322, 14379), 'tf_agents.specs.tensor_spec.from_spec', 'tensor_spec.from_spec', (['self._env.adversary_time_step_spec'], {}), '(self._env.adversary_time_step_spec)\n', (14343, 14379), False, 'from tf_agents.specs import tensor_spec\n'), ((14427, 14486), 'tf_agents.specs.tensor_spec.from_spec', 'tensor_spec.from_spec', (['self._env.adversary_observation_spec'], {}), '(self._env.adversary_observation_spec)\n', (14448, 14486), False, 'from tf_agents.specs import tensor_spec\n'), ((18941, 18981), 'tensorflow.identity', 'tf.identity', (['step_type'], {'name': '"""step_type"""'}), "(step_type, name='step_type')\n", (18952, 18981), True, 'import tensorflow as tf\n'), ((18995, 19029), 'tensorflow.identity', 'tf.identity', (['reward'], {'name': '"""reward"""'}), "(reward, name='reward')\n", (19006, 19029), True, 'import tensorflow as tf\n'), ((19045, 19083), 'tensorflow.identity', 'tf.identity', (['discount'], {'name': '"""discount"""'}), "(discount, name='discount')\n", (19056, 19083), True, 'import tensorflow as tf\n'), ((19167, 19194), 'tensorflow.TensorShape', 'tf.TensorShape', (['batch_shape'], {}), '(batch_shape)\n', (19181, 19194), True, 'import tensorflow as tf\n'), ((19831, 19885), 'tensorflow.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['obs_spec', 'named_observations'], {}), '(obs_spec, named_observations)\n', (19855, 19885), True, 'import tensorflow as tf\n'), ((19898, 19956), 'tf_agents.trajectories.time_step.TimeStep', 'ts_lib.TimeStep', (['step_type', 'reward', 'discount', 'observations'], {}), '(step_type, reward, discount, observations)\n', (19913, 19956), True, 'from tf_agents.trajectories import time_step as ts_lib\n'), ((7526, 7588), 'tf_agents.trajectories.time_step.termination', 'ts_lib.termination', (['observation', 'reward'], {'outer_dims': 'outer_dims'}), '(observation, reward, outer_dims=outer_dims)\n', (7544, 7588), True, 'from tf_agents.trajectories import time_step as ts_lib\n'), ((7612, 7689), 'tf_agents.trajectories.time_step.transition', 'ts_lib.transition', (['observation', 'reward', 'self._discount'], {'outer_dims': 'outer_dims'}), '(observation, reward, self._discount, outer_dims=outer_dims)\n', (7629, 7689), True, 'from tf_agents.trajectories import time_step as ts_lib\n'), ((8203, 8265), 'tf_agents.trajectories.time_step.termination', 'ts_lib.termination', (['observation', 'reward'], {'outer_dims': 'outer_dims'}), '(observation, reward, outer_dims=outer_dims)\n', (8221, 8265), True, 'from tf_agents.trajectories import time_step as ts_lib\n'), ((8289, 8366), 'tf_agents.trajectories.time_step.transition', 'ts_lib.transition', (['observation', 'reward', 'self._discount'], {'outer_dims': 'outer_dims'}), '(observation, reward, self._discount, outer_dims=outer_dims)\n', (8306, 8366), True, 'from tf_agents.trajectories import time_step as ts_lib\n'), ((11310, 11352), 'tf_agents.utils.nest_utils.stack_nested_arrays', 'nest_utils.stack_nested_arrays', (['time_steps'], {}), '(time_steps)\n', (11340, 11352), False, 'from tf_agents.utils import nest_utils\n'), ((11581, 11623), 'tf_agents.utils.nest_utils.stack_nested_arrays', 'nest_utils.stack_nested_arrays', (['time_steps'], {}), '(time_steps)\n', (11611, 11623), False, 'from tf_agents.utils import nest_utils\n'), ((11706, 11746), 'tf_agents.utils.nest_utils.unbatch_nested_array', 'nest_utils.unbatch_nested_array', (['actions'], {}), '(actions)\n', (11737, 11746), False, 'from tf_agents.utils import nest_utils\n'), ((11817, 11858), 'tf_agents.utils.nest_utils.batch_nested_array', 'nest_utils.batch_nested_array', (['time_steps'], {}), '(time_steps)\n', (11846, 11858), False, 'from tf_agents.utils import nest_utils\n'), ((11895, 11942), 'tf_agents.environments.batched_py_environment.unstack_actions', 'batched_py_environment.unstack_actions', (['actions'], {}), '(actions)\n', (11933, 11942), False, 'from tf_agents.environments import batched_py_environment\n'), ((12333, 12375), 'tf_agents.utils.nest_utils.stack_nested_arrays', 'nest_utils.stack_nested_arrays', (['time_steps'], {}), '(time_steps)\n', (12363, 12375), False, 'from tf_agents.utils import nest_utils\n'), ((15018, 15046), 'tensorflow.name_scope', 'tf.name_scope', (['"""reset_agent"""'], {}), "('reset_agent')\n", (15031, 15046), True, 'import tensorflow as tf\n'), ((15065, 15132), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_reset_py', '[]', '[]'], {'name': '"""reset_py_func"""'}), "(_isolated_reset_py, [], [], name='reset_py_func')\n", (15082, 15132), True, 'import tensorflow as tf\n'), ((15736, 15770), 'tensorflow.name_scope', 'tf.name_scope', (['"""current_time_step"""'], {}), "('current_time_step')\n", (15749, 15770), True, 'import tensorflow as tf\n'), ((15788, 15904), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_current_time_step_py', '[]', 'self._time_step_dtypes'], {'name': '"""current_time_step_py_func"""'}), "(_isolated_current_time_step_py, [], self.\n _time_step_dtypes, name='current_time_step_py_func')\n", (15805, 15904), True, 'import tensorflow as tf\n'), ((16717, 16757), 'tensorflow.name_scope', 'tf.name_scope', (['"""current_agent_time_step"""'], {}), "('current_agent_time_step')\n", (16730, 16757), True, 'import tensorflow as tf\n'), ((16775, 16903), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_current_agent_time_step_py', '[]', 'self._time_step_dtypes'], {'name': '"""current_agent_time_step_py_func"""'}), "(_isolated_current_agent_time_step_py, [], self.\n _time_step_dtypes, name='current_agent_time_step_py_func')\n", (16792, 16903), True, 'import tensorflow as tf\n'), ((17531, 17560), 'tensorflow.name_scope', 'tf.name_scope', (['"""reset_random"""'], {}), "('reset_random')\n", (17544, 17560), True, 'import tensorflow as tf\n'), ((17579, 17646), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_reset_py', '[]', '[]'], {'name': '"""reset_py_func"""'}), "(_isolated_reset_py, [], [], name='reset_py_func')\n", (17596, 17646), True, 'import tensorflow as tf\n'), ((18286, 18327), 'tensorflow.name_scope', 'tf.name_scope', (['"""current_random_time_step"""'], {}), "('current_random_time_step')\n", (18299, 18327), True, 'import tensorflow as tf\n'), ((18345, 18475), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_current_random_time_step_py', '[]', 'self._time_step_dtypes'], {'name': '"""current_random_time_step_py_func"""'}), "(_isolated_current_random_time_step_py, [], self.\n _time_step_dtypes, name='current_random_time_step_py_func')\n", (18362, 18475), True, 'import tensorflow as tf\n'), ((19206, 19228), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (19226, 19228), True, 'import tensorflow as tf\n'), ((19563, 19588), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['obs_spec'], {}), '(obs_spec)\n', (19578, 19588), True, 'import tensorflow as tf\n'), ((19617, 19649), 'tensorflow.identity', 'tf.identity', (['obs'], {'name': 'spec.name'}), '(obs, name=spec.name)\n', (19628, 19649), True, 'import tensorflow as tf\n'), ((20586, 20607), 'tensorflow.name_scope', 'tf.name_scope', (['"""step"""'], {}), "('step')\n", (20599, 20607), True, 'import tensorflow as tf\n'), ((21160, 21259), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_step_py', 'flat_actions', 'self._time_step_dtypes'], {'name': '"""step_py_func"""'}), "(_isolated_step_py, flat_actions, self._time_step_dtypes,\n name='step_py_func')\n", (21177, 21259), True, 'import tensorflow as tf\n'), ((22229, 22260), 'tensorflow.name_scope', 'tf.name_scope', (['"""step_adversary"""'], {}), "('step_adversary')\n", (22242, 22260), True, 'import tensorflow as tf\n'), ((22823, 22953), 'tensorflow.numpy_function', 'tf.numpy_function', (['_isolated_step_adversary_py', 'flat_actions', 'self._adversary_time_step_dtypes'], {'name': '"""step_adversary_py_func"""'}), "(_isolated_step_adversary_py, flat_actions, self.\n _adversary_time_step_dtypes, name='step_adversary_py_func')\n", (22840, 22953), True, 'import tensorflow as tf\n'), ((6770, 6803), 'numpy.asarray', 'np.asarray', (['obs'], {'dtype': 'spec.dtype'}), '(obs, dtype=spec.dtype)\n', (6780, 6803), True, 'import numpy as np\n'), ((9204, 9255), 'tensorflow.cast', 'tf.cast', (['self._envs[0].n_clutter_placed', 'tf.float32'], {}), '(self._envs[0].n_clutter_placed, tf.float32)\n', (9211, 9255), True, 'import tensorflow as tf\n'), ((9486, 9537), 'tensorflow.cast', 'tf.cast', (['self._envs[0].distance_to_goal', 'tf.float32'], {}), '(self._envs[0].distance_to_goal, tf.float32)\n', (9493, 9537), True, 'import tensorflow as tf\n'), ((9772, 9833), 'tensorflow.cast', 'tf.cast', (['self._envs[0].deliberate_agent_placement', 'tf.float32'], {}), '(self._envs[0].deliberate_agent_placement, tf.float32)\n', (9779, 9833), True, 'import tensorflow as tf\n'), ((10624, 10667), 'tensorflow.cast', 'tf.cast', (['self._envs[0].passable', 'tf.float32'], {}), '(self._envs[0].passable, tf.float32)\n', (10631, 10667), True, 'import tensorflow as tf\n'), ((10904, 10959), 'tensorflow.cast', 'tf.cast', (['self._envs[0].shortest_path_length', 'tf.float32'], {}), '(self._envs[0].shortest_path_length, tf.float32)\n', (10911, 10959), True, 'import tensorflow as tf\n'), ((14563, 14609), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self.adversary_time_step_spec'], {}), '(self.adversary_time_step_spec)\n', (14578, 14609), True, 'import tensorflow as tf\n'), ((14786, 14846), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (14834, 14846), False, 'from tf_agents.environments import tf_py_environment\n'), ((15199, 15234), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[reset_op]'], {}), '([reset_op])\n', (15222, 15234), True, 'import tensorflow as tf\n'), ((15406, 15466), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (15454, 15466), False, 'from tf_agents.environments import tf_py_environment\n'), ((15600, 15632), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self._time_step'], {}), '(self._time_step)\n', (15615, 15632), True, 'import tensorflow as tf\n'), ((16351, 16411), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (16399, 16411), False, 'from tf_agents.environments import tf_py_environment\n'), ((16563, 16601), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self._agent_time_step'], {}), '(self._agent_time_step)\n', (16578, 16601), True, 'import tensorflow as tf\n'), ((17304, 17364), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (17352, 17364), False, 'from tf_agents.environments import tf_py_environment\n'), ((17713, 17748), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[reset_op]'], {}), '([reset_op])\n', (17736, 17748), True, 'import tensorflow as tf\n'), ((17935, 17995), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (17983, 17995), False, 'from tf_agents.environments import tf_py_environment\n'), ((18136, 18168), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self._time_step'], {}), '(self._time_step)\n', (18151, 18168), True, 'import tensorflow as tf\n'), ((19663, 19685), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (19683, 19685), True, 'import tensorflow as tf\n'), ((20147, 20207), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (20195, 20207), False, 'from tf_agents.environments import tf_py_environment\n'), ((20432, 20470), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self._agent_time_step'], {}), '(self._agent_time_step)\n', (20447, 20470), True, 'import tensorflow as tf\n'), ((20631, 20645), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (20642, 20645), True, 'import tensorflow as tf\n'), ((21752, 21812), 'tf_agents.environments.tf_py_environment._check_not_called_concurrently', 'tf_py_environment._check_not_called_concurrently', (['self._lock'], {}), '(self._lock)\n', (21800, 21812), False, 'from tf_agents.environments import tf_py_environment\n'), ((21866, 21965), 'tensorflow.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', ([], {'structure': 'self.adversary_action_spec', 'flat_sequence': 'flattened_actions'}), '(structure=self.adversary_action_spec,\n flat_sequence=flattened_actions)\n', (21890, 21965), True, 'import tensorflow as tf\n'), ((22061, 22093), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self._time_step'], {}), '(self._time_step)\n', (22076, 22093), True, 'import tensorflow as tf\n'), ((22284, 22298), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (22295, 22298), True, 'import tensorflow as tf\n'), ((9312, 9353), 'tensorflow.cast', 'tf.cast', (['env.n_clutter_placed', 'tf.float32'], {}), '(env.n_clutter_placed, tf.float32)\n', (9319, 9353), True, 'import tensorflow as tf\n'), ((9594, 9635), 'tensorflow.cast', 'tf.cast', (['env.distance_to_goal', 'tf.float32'], {}), '(env.distance_to_goal, tf.float32)\n', (9601, 9635), True, 'import tensorflow as tf\n'), ((9890, 9941), 'tensorflow.cast', 'tf.cast', (['env.deliberate_agent_placement', 'tf.float32'], {}), '(env.deliberate_agent_placement, tf.float32)\n', (9897, 9941), True, 'import tensorflow as tf\n'), ((10724, 10757), 'tensorflow.cast', 'tf.cast', (['env.passable', 'tf.float32'], {}), '(env.passable, tf.float32)\n', (10731, 10757), True, 'import tensorflow as tf\n'), ((11016, 11061), 'tensorflow.cast', 'tf.cast', (['env.shortest_path_length', 'tf.float32'], {}), '(env.shortest_path_length, tf.float32)\n', (11023, 11061), True, 'import tensorflow as tf\n'), ((20655, 20679), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['actions'], {}), '(actions)\n', (20670, 20679), True, 'import tensorflow as tf\n'), ((20766, 20808), 'tensorflow.compat.dimension_value', 'tf.compat.dimension_value', (['action.shape[0]'], {}), '(action.shape[0])\n', (20791, 20808), True, 'import tensorflow as tf\n'), ((22308, 22332), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['actions'], {}), '(actions)\n', (22323, 22332), True, 'import tensorflow as tf\n'), ((22419, 22461), 'tensorflow.compat.dimension_value', 'tf.compat.dimension_value', (['action.shape[0]'], {}), '(action.shape[0])\n', (22444, 22461), True, 'import tensorflow as tf\n')] |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME> - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "20/04/2017"
import numpy as np
class DistributionPlan(object):
def __init__(self, communicator, n_columns, n_rows, strategy="rowblocks"):
self._communicator = communicator
self._n_rows = n_rows
self._n_columns = n_columns
self._strategy = strategy
def communicator(self):
return self._communicator
def numberRanks(self):
return self._communicator.Get_size()
def ranks(self):
return np.arange(self.numberRanks())
def myRank(self):
n_rank = self._communicator.Get_rank()
return n_rank
def _rowsForRank(self):
rows = np.arange(self._n_rows)
rows_for_rank = np.array_split(rows, self.numberRanks())
return rows_for_rank
def rowDistribution(self):
rows_for_rank = self._rowsForRank()
row_distribution = list()
for rows in rows_for_rank:
row_distribution.append((rows.min(), rows.max()))
return row_distribution
def rows(self, rank):
rows_for_rank = self._rowsForRank()
return rows_for_rank[rank]
def rankByGlobalIndex(self, global_index):
for i_rank in self.ranks():
if global_index in self.rows(i_rank):
return i_rank
raise Exception("Global index not allocated to any rank.")
def columns(self, rank):
return np.arange(self._n_columns)
def shape(self, rank):
return (len(self.rows(rank)), len(self.columns(rank)))
def localRows(self):
return self.rows(self.myRank())
def localColumns(self):
return self.columns(self.myRank())
def localShape(self):
return self.shape(self.myRank())
def totalShape(self):
return (self._n_rows, self._n_columns)
def globalToLocalIndex(self, global_index):
local_rows = self.localRows()
local_index = global_index - local_rows[0]
if local_index < 0:
raise Exception("Negative index")
if local_index >= len(local_rows):
raise Exception("%i: Index too large (global/local/number local rows): %i %i %i" % (self.myRank(), global_index, local_index, len(local_rows)))
return local_index
def localToGlobalIndex(self, local_index):
local_rows = self.localRows()
if local_index < 0:
raise Exception("Negative index")
if local_index >= len(local_rows):
raise Exception("Index too large %i %i" % (local_index, len(local_rows)) )
global_index = local_rows[0] + local_index
return global_index
def __eq__(self, other):
# return self is other
if not self.communicator() is other.communicator():
return False
if not self._n_rows == other._n_rows:
return False
if not self._n_columns == other._n_columns:
return False
if not self._strategy == other._strategy:
return False
return True
def __ne__(self, other):
return not self.__eq__(other) | [
"numpy.arange"
] | [((2025, 2048), 'numpy.arange', 'np.arange', (['self._n_rows'], {}), '(self._n_rows)\n', (2034, 2048), True, 'import numpy as np\n'), ((2768, 2794), 'numpy.arange', 'np.arange', (['self._n_columns'], {}), '(self._n_columns)\n', (2777, 2794), True, 'import numpy as np\n')] |
# Copyright 2021 Phasecraft Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import datetime
import logging
import time
from collections import defaultdict
from itertools import chain
import cirq
import uncertainties
import cirq_google as cg
import numpy as np
import pandas as pd
import uncertainties.unumpy as unp
from cirq.google import optimized_for_sycamore
from uncertainties import ufloat
import fhvqe.circuit
import fhvqe.error_mitigation
from fhvqe.circuit import (ansatz, ansatz_multilayer_circuit,
ansatz_multilayer_circuit_merge,
ansatz_multistep, prepH, prepV, prepV2wrap)
from fhvqe.tools import map_site_to_JW
module_logger = logging.getLogger("fhvqe.experiment")
Measurements = collections.namedtuple("Measurements", "pairs prep analysis")
Circuits = collections.namedtuple("Circuits", "device, initial, final, ansatz, type, analysis")
# subbatch described a sub-groupation of a single batch, for example if the
# measurement is part of the same gradient evaluation
# descriptor is some extra descriptor of the measurement/circuit, for example
# the sign -1/+1/0 which means that it's the datapoint -1 delta away from some
# parameters/ +1 delta away from some parameters/at parameters (all giving
# different set of theta).
# type is measurement type of the circuit
# batchiteration is the iterator over different thetas (for a single theta we
# have multiple circuits corresponding to different measurement needed to be
# taken)
Descriptions = collections.namedtuple("Descriptions", "subbatch, descriptor, type, analysis, batchiteration")
measured_values = {}
def start_logger(logfile="fhvqe.log"):
"""Initializes the logger for fhvqe module.
"""
# create logger with 'fhvqe'
logger = logging.getLogger('fhvqe')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def analyze(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = 0
for (q1, q2) in pairs:
res = np.mean(results[q1] * results[q2])
sum_tot += res
# print(f'Onsite energy: {U*sum_tot}')
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = 0
for (q1, q2) in pairs:
res = np.mean(results[q2] - results[q1])
sum_tot += res
# print(f'H energy: {-t*sum_tot}')
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
sum_tot = 0
size = results.shape[1]
for (q1, q2) in pairs:
res = results[q2] - results[q1]
parity = 0
for q in _parity_indices(q1, q2, nh):
parity += results[q]
parity = 1 - (parity % 2) * 2
sum_tot += np.mean(res * parity)
# print(f'V energy: {-t*sum_tot}')
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def analyze_exact(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing exact analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
total_len = nh*nv*2
import itertools
lst = list(itertools.product([0, 1], repeat = total_len - 2))
if measurement_type == "onsite":
all_indices = []
for (q1, q2) in pairs:
all_indices += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
if measurement_type in ["horiz"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
all_indices_plus += [cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
all_indices_minus += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]) for element in lst]
if measurement_type in ["vert0", "vert1", "vert"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
parity = 1
for element in lst:
parity = 1 - (sum(element[q1:q2-1]) % 2) * 2
if parity == 1:
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
else:
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = np.sum(np.abs(results[all_indices])**2)
# print(f'Onsite: {U * sum_tot}')
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2)
# print(f'Horiz: {-t * sum_tot}')
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
#TODO: fix this one... parity needs to be handled..
sum_tot = np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2)
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def analyze_exact_mgd(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing exact analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
print(f"analyze_exact_mgd {measurement_type}")
total_len = nh*nv*2
import itertools
lst = list(itertools.product([0, 1], repeat = total_len - 2))
if measurement_type == "onsite":
all_indices = []
for (q1, q2) in pairs:
all_indices += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
if measurement_type in ["horiz"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
all_indices_plus += [cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
all_indices_minus += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]) for element in lst]
if measurement_type in ["vert0", "vert1", "vert"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
parity = 1
for element in lst:
parity = 1 - (sum(element[q1:q2-1]) % 2) * 2
if parity == 1:
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
else:
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = ufloat(np.sum(np.abs(results[all_indices])**2), np.sqrt(len(results[all_indices])))
# print(f'Onsite: {U * sum_tot}')
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = ufloat(np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2), np.sqrt(len(results[all_indices_plus])) )
# print(f'Horiz: {-t * sum_tot}')
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
#TODO: fix this one... parity needs to be handled..
sum_tot = ufloat(np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2), np.sqrt(len(results[all_indices_plus])) )
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def analyze_mgd(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = ufloat(0.,0.)
for (q1, q2) in pairs:
res = np.mean(results[q1] * results[q2])
std = np.std(results[q1] * results[q2], ddof=1)
std /= np.sqrt(len(results[q1]))
sum_tot += ufloat(res, std)
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = ufloat(0.,0.)
for (q1, q2) in pairs:
res = np.mean(results[q2] - results[q1])
std = np.std(results[q2] - results[q1], ddof=1)
std /= np.sqrt(len(results[q1]))
sum_tot += ufloat(res, std)
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
sum_tot = ufloat(0.,0.)
size = results.shape[1]
for (q1, q2) in pairs:
res = results[q2] - results[q1]
parity = 0
for q in _parity_indices(q1, q2, nh):
parity += results[q]
parity = 1 - (parity % 2) * 2
std = np.std(res * parity, ddof=1) / np.sqrt(len(res))
res = np.mean(res * parity)
sum_tot += ufloat(res, std)
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def create_measurements(nh, nv, offset, measurement_type):
"""Creates necessary measurement details for a given type on a given lattice.
Given the lattice size, whether odd or even pairs are being measured,
and the measurement type, this function returns a namedtuple
with the pairs of qubits to be measured, the circuit preparation
function and the measurement_type to be passed to the analysis
function.
The measurement_type can be:
"onsite", "horiz", "vert", "vert0", "vert1"
Args:
nh -- number of horizontal sites
nv -- number of vertical sites
offset -- offset taking care of odd vs even pairing
measurement_type -- onsite, horizontal or vertical measurement
Returns:
Measurements namedtuple with measurement
(pairs, preparation circuit, analysis type)
"""
n = nh * nv
if measurement_type == "onsite":
pairs = [(i, i+n) for i in range(n)]
prep = None
if measurement_type == "horiz":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, nh) for j in range(offset,nh-1,2)]
prep = prepH
if measurement_type == "vert":
pairst = [(i*nh+j, (i+1)*nh+j) for i in range(offset, nv-1, 2) for j in range(nh)]
pairst += [(i*nh+j+n, (i+1)*nh+j+n) for i in range(offset, nv-1, 2) for j in range(0, nh)]
pairs = [ (map_site_to_JW(nh, nv, site1), map_site_to_JW(nh, nv, site2)) for (site1, site2) in pairst]
prep = prepV
if measurement_type == "vert0":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)]
prep = prepV
if measurement_type == "vert1":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)]
prep = prepV2wrap(nh, nv)
print(f"Prepped {measurement_type}, pairs={pairs}")
return Measurements(pairs=pairs, prep=prep, analysis=measurement_type)
def create_all(nh, nv):
"""Creates all the necessary measurement details for a given lattice.
This creates the measurement pairs assuming no extra details
on the connection of the qubits.
Args:
nh -- number of horizontal sites
nv -- number of vertical sites
Returns:
Dictionary of Measurement namedtuples.
"""
measurement_types = ["onsite0"]
if nh > 1:
measurement_types.append("horiz0")
if nh > 2:
measurement_types.append("horiz1")
if nv > 1:
if nh == 2:
measurement_types.append("vert02")
else:
measurement_types.append("vert0")
if nv > 2 or nh == 2:
if nh == 2:
measurement_types.append("vert12")
else:
measurement_types.append("vert1")
measurements = {}
print(f"Meas types {measurement_types}")
for measurement_type in measurement_types:
measurements[measurement_type]=create_measurements(nh, nv,
int(measurement_type[-1]),
measurement_type[:-1])
return measurements
def create_executable(init_prog, ansatz_def, theta,
optimizer=optimized_for_sycamore,
optimizer_kwargs=None,
prep=None, **kwargs):
"""Create executable program with initial and final circuits and optimizations.
Args:
init_prog -- initial state preparation
ansatz_def -- ansatz chosen for the circuit
theta -- parameters circuits is being evaluated for
optimizer -- optimization function (default optimized_for_sycamore)
prep -- measurement preparation circuit (default None)
Returns:
Executable program
"""
def _check_one_two_qubit_moments(circuit):
"Checks whether a moment only either contains one qubits gates or two."
try:
for i, moment in enumerate(circuit):
# print('Moment {}:\n{}'.format(i,moment))
num_qubits = len(list(moment)[0].qubits)
for op in moment:
assert num_qubits == len(op.qubits)
# print("all good")
except:
print("bad news, at least one of the moments is misaligned!")
prog = []
if "remap" in kwargs:
qubits = kwargs["remap"]
prog = cirq.Circuit()
prog.append(init_prog)
prog.append(ansatz_def)
# add measurement prep
if prep is not None:
prog.append(prep)
if __debug__:
module_logger.debug(f"number of moments preoptimization: {len(prog)}")
module_logger.debug(f"depth preoptimization: {len(cirq.Circuit(prog.all_operations()))}")
# finally compile
if optimizer is not None:
prog = optimizer(prog, **optimizer_kwargs)
if __debug__:
module_logger.debug(f"number of moments post-optimization: {len(prog)}")
module_logger.debug(f"depth post-optimization: {len(cirq.Circuit(prog.all_operations()))}")
if __debug__:
_check_one_two_qubit_moments(prog)
# add measurements (do last so they are the final things to be executed)
prog.append(cirq.measure(*qubits, key='x'))
if __debug__:
try:
cirq.google.Sycamore.validate_circuit(prog)
except:
print("oh no, circuit is not sycamore compliant!")
for i, moment in enumerate(prog):
print("Moment {}:\n{}".format(i,moment))
quit()
return prog
def run_executables(circuits, run_args, qc):
"""Actually runs a set of quantum circuits.
Args:
circuits -- the quantum circuits to execute
run_args -- metadata for the circuits
qc -- Engine instance to run the circuits on
Returns:
The result of running the circuits.
"""
prog_id = None
job_id = None
if "program_id" in run_args:
timestr = datetime.datetime.now().strftime("%H%M%S-%f")
temp_prog_id = run_args["program_id"]
run_args["program_id"] = run_args["program_id"] + "-" + timestr
prog_id = run_args["program_id"]
job_id = run_args["job_id"]
results = qc.run_batch(circuits,
**run_args)
if "program_id" in run_args:
run_args["program_id"] = temp_prog_id
return results, prog_id, job_id
def run_executables_exact(circuits, run_args, qc):
"""Run a set of quantum circuits using the exact simulator.
Args:
circuits -- the quantum circuits to execute
run_args -- metadata for the circuits
qc -- not used
Returns:
The result of running the circuits.
"""
from cirq import Simulator
simulator = Simulator()
results = []
for circuit in circuits:
circuit2 = cirq.Circuit(circuit.moments[:-1]) # run the circuit except for the final layer of measurements
results.append(simulator.simulate(circuit2, qubit_order=run_args["qubit_order"]))# **run_args))
return results, None, None
def prepare_executables(batch_params, descriptions, measurement_set, run_args,
num_trials, qubits, optimizer=None,
optimizer_kwargs=None):
"""Prepares a set of quantum circuits for execution, by including the final
measurement transformations required.
Args:
batch_params -- parameters for the circuits
descriptions -- additional description
measurement_set -- which measurement types to use
run_args -- additional metadata
num_trials -- the number of shots to use
qubits -- which qubits to run on
optimizer -- which optimizer to use
optimizer_kwargs -- additional optimizer arguments
Returns:
Prepared circuits and additional details
"""
extra_params = []
circuits = []
circuits_details = []
module_logger.info("Preparing executables.")
for i, (params, details) in enumerate(zip(batch_params, descriptions)):
if __debug__:
module_logger.debug(f"ansatz with params: {params}")
meas = measurement_set[0]
ansatz_compiled_no_hop, ansatz_compiled_with_hop = ansatz_multilayer_circuit_merge(meas.ansatz,
params, qubits)
for meas in measurement_set:
meas_prep = meas.final
ansatz_compiled = ansatz_compiled_with_hop
if meas.final is not None and not isinstance(meas.final, cirq.circuits.circuit.Circuit):
meas_prep = cirq.Circuit(ansatz(meas.final, [params[-1][-1]], qubits))
ansatz_compiled = ansatz_compiled_no_hop
circ = create_executable(meas.initial,
ansatz_compiled,
params,
prep=meas_prep,
qubits=qubits,
remap=qubits,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs)
if run_args is None: run_args = {}
circuits.append(circ)
circuits_details.append(Descriptions(*details + (meas.type, meas.analysis, i)))
extra_params.append(None)
run_args["params_list"] = extra_params
run_args["repetitions"] = num_trials
return circuits, run_args, circuits_details
def save_results(data, samples_filename):
"""Save retrieved samples to a text file."""
results = [''.join(str(x) for x in result) + '\n' for result in data]
with open(samples_filename, 'a') as results_file:
results_file.writelines(results)
results_file.write('\n\n')
def extract_values(batch_evals, thetas, circuits_details, results,
sample_error_mitigation=None,
noise_matrix=None,
value_error_mitigation=None,
samples_filename=None,
save_samples=False,
**kwargs):
"""Extract and analyse results returned from running the quantum circuits.
"""
E = np.zeros(batch_evals)
global measured_values
measured_values = {}
E_each_evaluation = {}
for (circ_desc, result) in zip(circuits_details, results):
if circ_desc.subbatch not in E_each_evaluation:
E_each_evaluation[circ_desc.subbatch] = {}
if circ_desc.descriptor not in E_each_evaluation[circ_desc.subbatch]:
E_each_evaluation[circ_desc.subbatch][circ_desc.descriptor] = 0.0
params = thetas[circ_desc.batchiteration]
if isinstance(result, list):
data = result[0].measurements['x'].astype(int)
else:
data = result.measurements['x'].astype(int)
num_trials = data.shape[0]
if sample_error_mitigation is not None:
mitigation_args = []
if "correct_m_type" in kwargs:
mitigation_args += [noise_matrix, circ_desc.type]
data = sample_error_mitigation(data, *mitigation_args)
# Save samples from the final onsite measurement to a file if needed
if save_samples and samples_filename is not None and circ_desc.descriptor == 0 and circ_desc.type == "onsite0":
save_results(data, samples_filename)
Em = circ_desc.analysis(data.transpose())
E[circ_desc.batchiteration] += Em
vals = {"Em": Em,
"num_trials": num_trials,
"post-processed": data.shape[0],
"params": params,
"grad": circ_desc.subbatch,
"sgn": circ_desc.descriptor,
}
measured_values.setdefault(circ_desc.type, []).append(vals)
E_each_evaluation[circ_desc.subbatch][circ_desc.descriptor] += Em
measured_values["all_E"] = E_each_evaluation
return E, measured_values
def extract_values_mgd(batch_evals, thetas, circuits_details, results,
sample_error_mitigation=None,
noise_matrix=None,
value_error_mitigation=None,
samples_filename=None,
save_samples=False,
**kwargs):
"""Extract and analyse results returned from running the quantum circuits,
in a form suitable for use with BayesMGD.
"""
E = np.zeros(batch_evals, dtype=uncertainties.core.Variable)
global measured_values
measured_values = {}
E_each_evaluation = {}
for (circ_desc, result) in zip(circuits_details, results):
if circ_desc.subbatch not in E_each_evaluation:
E_each_evaluation[circ_desc.subbatch] = {}
if circ_desc.descriptor not in E_each_evaluation[circ_desc.subbatch]:
E_each_evaluation[circ_desc.subbatch][circ_desc.descriptor] = 0
params = thetas[circ_desc.batchiteration]
if isinstance(result, list):
data = result[0].measurements['x'].astype(int)
else:
data = result.measurements['x'].astype(int)
num_trials = data.shape[0]
if sample_error_mitigation is not None:
mitigation_args = []
if "correct_m_type" in kwargs:
mitigation_args += [noise_matrix, circ_desc.type]
data = sample_error_mitigation(data, *mitigation_args)
# Save samples from the final onsite measurement to a file if needed
if save_samples and samples_filename is not None and circ_desc.descriptor == 0 and circ_desc.type == "onsite0":
save_results(data, samples_filename)
Em = circ_desc.analysis(data.transpose())
E[circ_desc.batchiteration] += Em
vals = {"Em": Em.nominal_value,
"num_trials": num_trials,
"post-processed": data.shape[0],
"params": params,
"grad": circ_desc.subbatch,
"sgn": circ_desc.descriptor,
}
measured_values.setdefault(circ_desc.type, []).append(vals)
E_each_evaluation[circ_desc.subbatch][circ_desc.descriptor] += Em.nominal_value
measured_values["all_E"] = E_each_evaluation
return E, measured_values
def extract_values_exact(batch_evals, thetas, circuits_details, results,
sample_error_mitigation=None,
noise_matrix=None,
value_error_mitigation=None, **kwargs):
"""Extract and analyse results obtained from running an exact simulation."""
E = np.zeros(batch_evals)
global measured_values
measured_values = {}
E_each_evaluation = {}
for (circ_desc, result) in zip(circuits_details, results):
if circ_desc.subbatch not in E_each_evaluation:
E_each_evaluation[circ_desc.subbatch] = {}
if circ_desc.descriptor not in E_each_evaluation[circ_desc.subbatch]:
E_each_evaluation[circ_desc.subbatch][circ_desc.descriptor] = 0.0
params = thetas[circ_desc.batchiteration]
data = result.state_vector()
Em = circ_desc.analysis(data)
if isinstance(Em, uncertainties.core.AffineScalarFunc):
E[circ_desc.batchiteration] += Em.nominal_value
else:
E[circ_desc.batchiteration] += Em
vals = {"Em": Em,
"num_trials": 0,
"post-processed": (0,),
"params": params,
"grad": circ_desc.subbatch,
"sgn": circ_desc.descriptor,
}
measured_values.setdefault(circ_desc.type, []).append(vals)
E_each_evaluation[circ_desc.subbatch][circ_desc.descriptor] += Em
measured_values["all_E"] = E_each_evaluation
return E, measured_values
def energy_calculation_wrap(run_executables_func=run_executables,
extract_values_func=extract_values):
def energy_calculation(thetas, description, qubits, measurement_set,
num_trials,
batch_evals = 0, num_layers = 0,
num_params = 0,
optimizer = None,
optimizer_kwargs = None,
mitigation_kwargs = {},
run_args = None,
save_samples=False,
samples_filename=""):
"""Objective function which returns energy calculations for a list of angles.
Args:
thetas -- parameters for which energy is evaluated
qubits -- qubits on which the program is being executed
measurement_set -- set of measurements to be carried out
num_trials -- number of shots
Returns:
Tuple containing evaluated energy values, and a dictionary with
further details on results, parameters etc.
"""
global measured_values
params = np.array(thetas)
batch_evals = description[0] #description.number_of_circuits_in_batch
descriptions = description[1] #description.descriptions
params = params.reshape(batch_evals, num_layers, num_params)
tick = time.perf_counter()
print(f"---Preparing executables")
circuits, run_args, circuits_details = prepare_executables(params, descriptions,
measurement_set,
run_args,
num_trials,
qubits,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs)
start_timestamp = datetime.datetime.now()
tock = time.perf_counter()
print(f"---Running executables, {len(circuits)} in batch, delta={tock-tick}")
tick = time.perf_counter()
results, program_id, job_id = run_executables_func(circuits, run_args,
measurement_set[0].device)
tock = time.perf_counter()
print(f"---Extracting values delta={tock-tick}")
tick = time.perf_counter()
E, mv = extract_values_func(batch_evals, params,
circuits_details, results,
save_samples=save_samples,
samples_filename=samples_filename,
**mitigation_kwargs)
tock = time.perf_counter()
print(f"---Extracted values delta={tock-tick}")
measured_values = mv
measured_values["start_timestamp"] = str(start_timestamp)
measured_values["end_timestamp"] = str(datetime.datetime.now())
if "program_id" in run_args:
measured_values["program_id"] = program_id
measured_values["job_id"] = job_id
return E
return energy_calculation
def insert_x_gates_unused(circuit):
"""Inserts X gates on qubits that are unused for a long period of time,
in an attempt to cancel out unwanted Z rotations."""
output_circuit = copy.deepcopy(circuit)
all_qubits = circuit.all_qubits()
for i, moment in enumerate(output_circuit):
if not is_two_qubit_gate_layer(moment) and i < len(output_circuit) - 3:
unused_qubits = set(all_qubits)
for gate in moment:
unused_qubits.discard(gate.qubits[0])
for gate in output_circuit[i+1]:
unused_qubits.discard(gate.qubits[0])
unused_qubits.discard(gate.qubits[1])
for next_layer in range(i+1, len(output_circuit)):
if not is_two_qubit_gate_layer(output_circuit[next_layer]):
break
for gate in output_circuit[next_layer]:
unused_qubits.discard(gate.qubits[0])
output1 = cirq.Circuit()
output2 = cirq.Circuit()
for qubit in unused_qubits:
output1.append(cirq.X.on(qubit))
output2.append(cirq.X.on(qubit))
for gate in output_circuit[i]:
output1.append(gate)
for gate in output_circuit[next_layer]:
output2.append(gate)
output_circuit[i] = output1[0]
output_circuit[next_layer] = output2[0]
return output_circuit
def is_two_qubit_gate_layer(moment):
"""Helper function to check whether a moment in a circuit contains
one- or two-qubit gates."""
if len(next(iter(moment)).qubits) == 1:
return False
return True
def insert_x_gates_all(circuit):
"""Inserts layers of X gates before and after every other layer of 2-qubit gates,
in an attempt to cancel out unwanted Z rotations."""
all_qubits = circuit.all_qubits()
x_layer = cirq.Circuit()
for qubit in all_qubits:
x_layer.append(cirq.X.on(qubit))
output_circuit = cirq.Circuit()
two_qubit_gate_layer_count = 0
for i, moment in enumerate(circuit):
if is_two_qubit_gate_layer(moment):
two_qubit_gate_layer_count += 1
if is_two_qubit_gate_layer(moment) and two_qubit_gate_layer_count % 2 == 1:
output_circuit.append(x_layer)
output_circuit.append(moment)
if is_two_qubit_gate_layer(moment) and two_qubit_gate_layer_count % 2 == 1:
output_circuit.append(x_layer)
return output_circuit
def optimize_one_qubit_layer(layer):
"""Optimizes a set of moments in a quantum circuit which contain 1-qubit gates only.
Combines each sequence of 1-qubit gates into one gate."""
if len(layer) == 1:
return layer
matrices = {}
needs_update = {}
first_gate = {}
# Prepare a list of matrices corresponding to the 1-qubit gate acting on each qubit.
for moment in layer:
for gate in moment:
qubit = gate.qubits[0]
if matrices.get(qubit) is None:
matrices[qubit] = np.matrix(cirq.unitary(gate))
first_gate[qubit] = gate
else:
matrices[qubit] = np.matrix(cirq.unitary(gate)) @ matrices[qubit]
needs_update[qubit] = True
# Work out a hardware-native gate decomposition for each 1-qubit matrix.
output_layer = cirq.Circuit()
for qubit, matrix in matrices.items():
if not np.allclose(matrix, [[1,0],[0,1]]):
if needs_update.get(qubit):
gate = cirq.single_qubit_matrix_to_phxz(matrix)
if gate is not None:
output_layer.append(gate.on(qubit))
else:
output_layer.append(first_gate[qubit])
return output_layer
def check_circuit(circuit):
"""Checks whether a quantum circuit is in the desired form that alternates
between layers of 1- and 2-qubit gates."""
for i, moment in enumerate(circuit):
has_one_qubit_gate = False
has_two_qubit_gate = False
for gate in moment:
if len(gate.qubits) == 1:
has_one_qubit_gate = True
if len(gate.qubits) == 2:
has_two_qubit_gate = True
if has_one_qubit_gate and has_two_qubit_gate:
print(f"ERROR: moment {i} contains one and two qubit gates")
for i, moment in enumerate(circuit):
print("Moment {}:\n{}".format(i,moment))
quit()
def optimize_in_layers(circuit, **kwargs):
""" Optimizes a quantum circuit for Sycamore, but preserving all 2-qubit gate layers.
Assumes that we're given a circuit which has some layers of 1-qubit gates, and some layers of
2-qubit gates. Combines and optimizes the 1-qubit layers, but leaves the 2-qubit gate layers alone.
Args:
circuit -- circuit to optimize
Returns:
optimized circuit
"""
output_circuit = cirq.Circuit()
output_layer = cirq.Circuit()
tick = time.perf_counter()
if __debug__:
check_circuit(circuit)
# Optimize the layers of 1-qubit gates in the circuit one-by-one.
for i, moment in enumerate(circuit):
num_qubits = len(next(iter(moment)).qubits)
if num_qubits <= 1:
output_layer.append(moment)
else:
output_circuit.append(optimize_one_qubit_layer(output_layer))
output_circuit.append(moment)
output_layer = cirq.Circuit()
output_circuit.append(optimize_one_qubit_layer(output_layer))
# Insert X gates for error mitigation if required.
insert_xs = kwargs.get("add_x_gates")
if insert_xs is None:
return output_circuit
if insert_xs == "unused":
circuit_with_xs = insert_x_gates_unused(output_circuit)
elif insert_xs == "all":
circuit_with_xs = insert_x_gates_all(output_circuit)
else:
circuit_with_xs = output_circuit
# Now optimise the circuit again to combine any added X gates with other
# 1-qubit gates.
final_output_circuit = cirq.Circuit()
output_layer = cirq.Circuit()
for i, moment in enumerate(circuit_with_xs):
num_qubits = len(next(iter(moment)).qubits)
if num_qubits == 1:
output_layer.append(moment)
else:
output_layer = optimize_one_qubit_layer(output_layer)
final_output_circuit.append(output_layer)
final_output_circuit.append(moment)
output_layer = cirq.Circuit()
final_output_circuit.append(optimize_one_qubit_layer(output_layer))
tock = time.perf_counter()
return final_output_circuit
NAMED_OBJECTIVE = {
"energy": energy_calculation_wrap,
}
| [
"logging.getLogger",
"logging.StreamHandler",
"cirq.Circuit",
"numpy.array",
"cirq.unitary",
"copy.deepcopy",
"uncertainties.ufloat",
"fhvqe.circuit.ansatz_multilayer_circuit_merge",
"numpy.mean",
"cirq.single_qubit_matrix_to_phxz",
"itertools.product",
"time.perf_counter",
"fhvqe.circuit.pr... | [((1252, 1289), 'logging.getLogger', 'logging.getLogger', (['"""fhvqe.experiment"""'], {}), "('fhvqe.experiment')\n", (1269, 1289), False, 'import logging\n'), ((1305, 1366), 'collections.namedtuple', 'collections.namedtuple', (['"""Measurements"""', '"""pairs prep analysis"""'], {}), "('Measurements', 'pairs prep analysis')\n", (1327, 1366), False, 'import collections\n'), ((1378, 1466), 'collections.namedtuple', 'collections.namedtuple', (['"""Circuits"""', '"""device, initial, final, ansatz, type, analysis"""'], {}), "('Circuits',\n 'device, initial, final, ansatz, type, analysis')\n", (1400, 1466), False, 'import collections\n'), ((2085, 2183), 'collections.namedtuple', 'collections.namedtuple', (['"""Descriptions"""', '"""subbatch, descriptor, type, analysis, batchiteration"""'], {}), "('Descriptions',\n 'subbatch, descriptor, type, analysis, batchiteration')\n", (2107, 2183), False, 'import collections\n'), ((2344, 2370), 'logging.getLogger', 'logging.getLogger', (['"""fhvqe"""'], {}), "('fhvqe')\n", (2361, 2370), False, 'import logging\n'), ((2472, 2500), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (2491, 2500), False, 'import logging\n'), ((2594, 2617), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2615, 2617), False, 'import logging\n'), ((2715, 2804), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s"""'], {}), "(\n '%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s')\n", (2732, 2804), False, 'import logging\n'), ((18940, 18954), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (18952, 18954), False, 'import cirq\n'), ((21320, 21331), 'cirq.Simulator', 'Simulator', ([], {}), '()\n', (21329, 21331), False, 'from cirq import Simulator\n'), ((24785, 24806), 'numpy.zeros', 'np.zeros', (['batch_evals'], {}), '(batch_evals)\n', (24793, 24806), True, 'import numpy as np\n'), ((27054, 27110), 'numpy.zeros', 'np.zeros', (['batch_evals'], {'dtype': 'uncertainties.core.Variable'}), '(batch_evals, dtype=uncertainties.core.Variable)\n', (27062, 27110), True, 'import numpy as np\n'), ((29206, 29227), 'numpy.zeros', 'np.zeros', (['batch_evals'], {}), '(batch_evals)\n', (29214, 29227), True, 'import numpy as np\n'), ((33926, 33948), 'copy.deepcopy', 'copy.deepcopy', (['circuit'], {}), '(circuit)\n', (33939, 33948), False, 'import copy\n'), ((35743, 35757), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (35755, 35757), False, 'import cirq\n'), ((35850, 35864), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (35862, 35864), False, 'import cirq\n'), ((37275, 37289), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (37287, 37289), False, 'import cirq\n'), ((38871, 38885), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (38883, 38885), False, 'import cirq\n'), ((38905, 38919), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (38917, 38919), False, 'import cirq\n'), ((38936, 38955), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (38953, 38955), False, 'import time\n'), ((40015, 40029), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (40027, 40029), False, 'import cirq\n'), ((40049, 40063), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (40061, 40063), False, 'import cirq\n'), ((40551, 40570), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (40568, 40570), False, 'import time\n'), ((6061, 6108), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': '(total_len - 2)'}), '([0, 1], repeat=total_len - 2)\n', (6078, 6108), False, 'import itertools\n'), ((9567, 9614), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': '(total_len - 2)'}), '([0, 1], repeat=total_len - 2)\n', (9584, 9614), False, 'import itertools\n'), ((13199, 13215), 'uncertainties.ufloat', 'ufloat', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (13205, 13215), False, 'from uncertainties import ufloat\n'), ((13569, 13585), 'uncertainties.ufloat', 'ufloat', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (13575, 13585), False, 'from uncertainties import ufloat\n'), ((13967, 13983), 'uncertainties.ufloat', 'ufloat', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (13973, 13983), False, 'from uncertainties import ufloat\n'), ((16364, 16382), 'fhvqe.circuit.prepV2wrap', 'prepV2wrap', (['nh', 'nv'], {}), '(nh, nv)\n', (16374, 16382), False, 'from fhvqe.circuit import ansatz, ansatz_multilayer_circuit, ansatz_multilayer_circuit_merge, ansatz_multistep, prepH, prepV, prepV2wrap\n'), ((19770, 19800), 'cirq.measure', 'cirq.measure', (['*qubits'], {'key': '"""x"""'}), "(*qubits, key='x')\n", (19782, 19800), False, 'import cirq\n'), ((21397, 21431), 'cirq.Circuit', 'cirq.Circuit', (['circuit.moments[:-1]'], {}), '(circuit.moments[:-1])\n', (21409, 21431), False, 'import cirq\n'), ((22789, 22849), 'fhvqe.circuit.ansatz_multilayer_circuit_merge', 'ansatz_multilayer_circuit_merge', (['meas.ansatz', 'params', 'qubits'], {}), '(meas.ansatz, params, qubits)\n', (22820, 22849), False, 'from fhvqe.circuit import ansatz, ansatz_multilayer_circuit, ansatz_multilayer_circuit_merge, ansatz_multistep, prepH, prepV, prepV2wrap\n'), ((31605, 31621), 'numpy.array', 'np.array', (['thetas'], {}), '(thetas)\n', (31613, 31621), True, 'import numpy as np\n'), ((31853, 31872), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (31870, 31872), False, 'import time\n'), ((32458, 32481), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32479, 32481), False, 'import datetime\n'), ((32497, 32516), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32514, 32516), False, 'import time\n'), ((32618, 32637), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32635, 32637), False, 'import time\n'), ((32812, 32831), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32829, 32831), False, 'import time\n'), ((32904, 32923), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32921, 32923), False, 'import time\n'), ((33302, 33321), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (33319, 33321), False, 'import time\n'), ((3996, 4030), 'numpy.mean', 'np.mean', (['(results[q1] * results[q2])'], {}), '(results[q1] * results[q2])\n', (4003, 4030), True, 'import numpy as np\n'), ((4283, 4317), 'numpy.mean', 'np.mean', (['(results[q2] - results[q1])'], {}), '(results[q2] - results[q1])\n', (4290, 4317), True, 'import numpy as np\n'), ((4826, 4847), 'numpy.mean', 'np.mean', (['(res * parity)'], {}), '(res * parity)\n', (4833, 4847), True, 'import numpy as np\n'), ((13262, 13296), 'numpy.mean', 'np.mean', (['(results[q1] * results[q2])'], {}), '(results[q1] * results[q2])\n', (13269, 13296), True, 'import numpy as np\n'), ((13315, 13356), 'numpy.std', 'np.std', (['(results[q1] * results[q2])'], {'ddof': '(1)'}), '(results[q1] * results[q2], ddof=1)\n', (13321, 13356), True, 'import numpy as np\n'), ((13425, 13441), 'uncertainties.ufloat', 'ufloat', (['res', 'std'], {}), '(res, std)\n', (13431, 13441), False, 'from uncertainties import ufloat\n'), ((13632, 13666), 'numpy.mean', 'np.mean', (['(results[q2] - results[q1])'], {}), '(results[q2] - results[q1])\n', (13639, 13666), True, 'import numpy as np\n'), ((13685, 13726), 'numpy.std', 'np.std', (['(results[q2] - results[q1])'], {'ddof': '(1)'}), '(results[q2] - results[q1], ddof=1)\n', (13691, 13726), True, 'import numpy as np\n'), ((13795, 13811), 'uncertainties.ufloat', 'ufloat', (['res', 'std'], {}), '(res, std)\n', (13801, 13811), False, 'from uncertainties import ufloat\n'), ((14325, 14346), 'numpy.mean', 'np.mean', (['(res * parity)'], {}), '(res * parity)\n', (14332, 14346), True, 'import numpy as np\n'), ((14370, 14386), 'uncertainties.ufloat', 'ufloat', (['res', 'std'], {}), '(res, std)\n', (14376, 14386), False, 'from uncertainties import ufloat\n'), ((19845, 19888), 'cirq.google.Sycamore.validate_circuit', 'cirq.google.Sycamore.validate_circuit', (['prog'], {}), '(prog)\n', (19882, 19888), False, 'import cirq\n'), ((33520, 33543), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (33541, 33543), False, 'import datetime\n'), ((34752, 34766), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (34764, 34766), False, 'import cirq\n'), ((34789, 34803), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (34801, 34803), False, 'import cirq\n'), ((35810, 35826), 'cirq.X.on', 'cirq.X.on', (['qubit'], {}), '(qubit)\n', (35819, 35826), False, 'import cirq\n'), ((37348, 37385), 'numpy.allclose', 'np.allclose', (['matrix', '[[1, 0], [0, 1]]'], {}), '(matrix, [[1, 0], [0, 1]])\n', (37359, 37385), True, 'import numpy as np\n'), ((39403, 39417), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (39415, 39417), False, 'import cirq\n'), ((40447, 40461), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (40459, 40461), False, 'import cirq\n'), ((6233, 6332), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (6260, 6332), False, 'import cirq\n'), ((6500, 6599), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (6527, 6599), False, 'import cirq\n'), ((6638, 6737), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) +\n element[q2 - 1:])\n', (6665, 6737), False, 'import cirq\n'), ((7689, 7717), 'numpy.abs', 'np.abs', (['results[all_indices]'], {}), '(results[all_indices])\n', (7695, 7717), True, 'import numpy as np\n'), ((9739, 9838), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (9766, 9838), False, 'import cirq\n'), ((10006, 10105), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (10033, 10105), False, 'import cirq\n'), ((10144, 10243), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) +\n element[q2 - 1:])\n', (10171, 10243), False, 'import cirq\n'), ((14258, 14286), 'numpy.std', 'np.std', (['(res * parity)'], {'ddof': '(1)'}), '(res * parity, ddof=1)\n', (14264, 14286), True, 'import numpy as np\n'), ((15981, 16010), 'fhvqe.tools.map_site_to_JW', 'map_site_to_JW', (['nh', 'nv', 'site1'], {}), '(nh, nv, site1)\n', (15995, 16010), False, 'from fhvqe.tools import map_site_to_JW\n'), ((16012, 16041), 'fhvqe.tools.map_site_to_JW', 'map_site_to_JW', (['nh', 'nv', 'site2'], {}), '(nh, nv, site2)\n', (16026, 16041), False, 'from fhvqe.tools import map_site_to_JW\n'), ((20528, 20551), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20549, 20551), False, 'import datetime\n'), ((37447, 37487), 'cirq.single_qubit_matrix_to_phxz', 'cirq.single_qubit_matrix_to_phxz', (['matrix'], {}), '(matrix)\n', (37479, 37487), False, 'import cirq\n'), ((7898, 7931), 'numpy.abs', 'np.abs', (['results[all_indices_plus]'], {}), '(results[all_indices_plus])\n', (7904, 7931), True, 'import numpy as np\n'), ((7945, 7979), 'numpy.abs', 'np.abs', (['results[all_indices_minus]'], {}), '(results[all_indices_minus])\n', (7951, 7979), True, 'import numpy as np\n'), ((8247, 8280), 'numpy.abs', 'np.abs', (['results[all_indices_plus]'], {}), '(results[all_indices_plus])\n', (8253, 8280), True, 'import numpy as np\n'), ((8294, 8328), 'numpy.abs', 'np.abs', (['results[all_indices_minus]'], {}), '(results[all_indices_minus])\n', (8300, 8328), True, 'import numpy as np\n'), ((11202, 11230), 'numpy.abs', 'np.abs', (['results[all_indices]'], {}), '(results[all_indices])\n', (11208, 11230), True, 'import numpy as np\n'), ((23177, 23221), 'fhvqe.circuit.ansatz', 'ansatz', (['meas.final', '[params[-1][-1]]', 'qubits'], {}), '(meas.final, [params[-1][-1]], qubits)\n', (23183, 23221), False, 'from fhvqe.circuit import ansatz, ansatz_multilayer_circuit, ansatz_multilayer_circuit_merge, ansatz_multistep, prepH, prepV, prepV2wrap\n'), ((34875, 34891), 'cirq.X.on', 'cirq.X.on', (['qubit'], {}), '(qubit)\n', (34884, 34891), False, 'import cirq\n'), ((34924, 34940), 'cirq.X.on', 'cirq.X.on', (['qubit'], {}), '(qubit)\n', (34933, 34940), False, 'import cirq\n'), ((36962, 36980), 'cirq.unitary', 'cirq.unitary', (['gate'], {}), '(gate)\n', (36974, 36980), False, 'import cirq\n'), ((7081, 7180), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (7108, 7180), False, 'import cirq\n'), ((7211, 7310), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) +\n element[q2 - 1:])\n', (7238, 7310), False, 'import cirq\n'), ((7363, 7462), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (7390, 7462), False, 'import cirq\n'), ((7492, 7591), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) +\n element[q2 - 1:])\n', (7519, 7591), False, 'import cirq\n'), ((10587, 10686), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (10614, 10686), False, 'import cirq\n'), ((10717, 10816), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) +\n element[q2 - 1:])\n', (10744, 10816), False, 'import cirq\n'), ((10869, 10968), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) + element[q2 - 1:])'], {}), '(element[:q1] + (0,) + element[q1:q2 - 1] + (1,) +\n element[q2 - 1:])\n', (10896, 10968), False, 'import cirq\n'), ((10998, 11097), 'cirq.big_endian_bits_to_int', 'cirq.big_endian_bits_to_int', (['(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) + element[q2 - 1:])'], {}), '(element[:q1] + (1,) + element[q1:q2 - 1] + (0,) +\n element[q2 - 1:])\n', (11025, 11097), False, 'import cirq\n'), ((11455, 11488), 'numpy.abs', 'np.abs', (['results[all_indices_plus]'], {}), '(results[all_indices_plus])\n', (11461, 11488), True, 'import numpy as np\n'), ((11502, 11536), 'numpy.abs', 'np.abs', (['results[all_indices_minus]'], {}), '(results[all_indices_minus])\n', (11508, 11536), True, 'import numpy as np\n'), ((11854, 11887), 'numpy.abs', 'np.abs', (['results[all_indices_plus]'], {}), '(results[all_indices_plus])\n', (11860, 11887), True, 'import numpy as np\n'), ((11901, 11935), 'numpy.abs', 'np.abs', (['results[all_indices_minus]'], {}), '(results[all_indices_minus])\n', (11907, 11935), True, 'import numpy as np\n'), ((37085, 37103), 'cirq.unitary', 'cirq.unitary', (['gate'], {}), '(gate)\n', (37097, 37103), False, 'import cirq\n')] |
#!/usr/bin/env python
"""Tests for the pysiaf aperture classes.
Authors
-------
<NAME>
"""
import numpy as np
import pytest
from ..aperture import HstAperture
from ..iando import read
from ..siaf import Siaf, get_jwst_apertures
from ..utils.tools import get_grid_coordinates
@pytest.fixture(scope='module')
def siaf_objects():
"""Return list of Siaf objects.
:return:
"""
# for instrument in 'NIRISS NIRCam MIRI FGS NIRSpec'.split():
siafs = []
for instrument in 'NIRCam NIRISS FGS MIRI'.split():
siaf = Siaf(instrument)
siafs.append(siaf)
return siafs
def test_hst_aperture_init():
"""Test the initialization of an HstAperture object."""
hst_aperture = HstAperture()
hst_aperture.a_v2_ref = -100.
assert hst_aperture.a_v2_ref == hst_aperture.V2Ref #, 'HST aperture initialisation failed')
def test_jwst_aperture_transforms(siaf_objects, verbose=False):
"""Test transformations between frames.
Transform back and forth between frames and verify that input==output.
Parameters
----------
siaf_objects
verbose
"""
labels = ['X', 'Y']
from_frame = 'sci'
to_frames = 'det idl tel'.split()
x_sci = np.linspace(-10, 10, 3)
y_sci = np.linspace(10, -10, 3)
for siaf in siaf_objects:
if siaf.instrument in ['MIRI']:
threshold = 0.2
elif siaf.instrument in ['NIRCam']:
threshold = 42.
else:
threshold = 0.1
for aper_name in siaf.apertures.keys():
skip = False
# aperture
aperture = siaf[aper_name]
if (aperture.AperType in ['COMPOUND', 'TRANSFORM']) or (
siaf.instrument in ['NIRCam', 'MIRI', 'NIRSpec'] and
aperture.AperType == 'SLIT'):
skip = True
if skip is False:
# test transformations
if verbose:
print('testing {} {}'.format(siaf.instrument, aper_name))
for to_frame in to_frames:
forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))
backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))
x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))
x_mean_error = np.mean(np.abs(x_sci - x_out))
y_mean_error = np.mean(np.abs(y_sci - y_out))
for i, error in enumerate([x_mean_error, y_mean_error]):
if verbose:
print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(
siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))
assert error < threshold
def test_jwst_aperture_vertices(siaf_objects):
"""Test the JwstAperture vertices by rederiving them and comparing to SIAF.
Rederive Idl vertices and compare with content of SIAFXML
"""
verbose = False
threshold = 0.2
labels = ['X', 'Y']
for siaf in siaf_objects:
for aper_name in siaf.apertures.keys():
skip = False
#aperture
aperture = siaf[aper_name]
if (aperture.AperType in ['COMPOUND', 'TRANSFORM']) or \
(siaf.instrument in ['NIRCam', 'MIRI', 'NIRSpec']
and aperture.AperType == 'SLIT'):
skip = True
if skip is False:
if verbose:
print('testing {} {}'.format(siaf.instrument, aper_name))
# Idl corners from Sci attributes (XSciRef, XSciSize etc.)
x_idl_vertices_rederived, y_idl_vertices_rederived = aperture.corners('idl',
rederive=True)
# Idl corners from SIAFXML
x_idl_vertices = np.array([getattr(aperture, 'XIdlVert{:d}'.format(j)) for j in [1, 2, 3, 4]])
y_idl_vertices = np.array([getattr(aperture, 'YIdlVert{:d}'.format(j)) for j in [1, 2, 3, 4]])
if verbose:
print(x_idl_vertices, x_idl_vertices_rederived)
print(y_idl_vertices, y_idl_vertices_rederived)
x_mean_error = np.abs(np.mean(x_idl_vertices) - np.mean(x_idl_vertices_rederived))
y_mean_error = np.abs(np.mean(y_idl_vertices) - np.mean(y_idl_vertices_rederived))
if verbose:
for i, error in enumerate([x_mean_error, y_mean_error]):
print('{} {}: Error in {}Idl_vertices is {:02.6f})'.format(siaf.instrument, aper_name, labels[i], error))
assert x_mean_error < threshold
assert y_mean_error < threshold
def test_raw_transformations(verbose=False):
"""Test raw_to_sci and sci_to_raw transformations"""
siaf_detector_layout = read.read_siaf_detector_layout()
master_aperture_names = siaf_detector_layout['AperName'].data
apertures_dict = {'instrument': siaf_detector_layout['InstrName'].data}
apertures_dict['pattern'] = master_aperture_names
apertures = get_jwst_apertures(apertures_dict, exact_pattern_match=True)
grid_amplitude = 2048
x_raw, y_raw = get_grid_coordinates(10, (grid_amplitude/2, grid_amplitude/2), grid_amplitude)
labels = ['X', 'Y']
threshold = 0.1
from_frame = 'raw'
to_frame = 'sci'
# compute roundtrip error
for aper_name, aperture in apertures.apertures.items():
forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))
backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))
x_out, y_out = backward_transform(*forward_transform(x_raw, y_raw))
x_mean_error = np.mean(np.abs(x_raw - x_out))
y_mean_error = np.mean(np.abs(y_raw - y_out))
for i, error in enumerate([x_mean_error, y_mean_error]):
if verbose:
print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(
aperture.InstrName, aper_name, from_frame, to_frame, labels[i], error))
assert error < threshold
| [
"pytest.fixture",
"numpy.abs",
"numpy.linspace",
"numpy.mean"
] | [((286, 316), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (300, 316), False, 'import pytest\n'), ((1217, 1240), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(3)'], {}), '(-10, 10, 3)\n', (1228, 1240), True, 'import numpy as np\n'), ((1253, 1276), 'numpy.linspace', 'np.linspace', (['(10)', '(-10)', '(3)'], {}), '(10, -10, 3)\n', (1264, 1276), True, 'import numpy as np\n'), ((5892, 5913), 'numpy.abs', 'np.abs', (['(x_raw - x_out)'], {}), '(x_raw - x_out)\n', (5898, 5913), True, 'import numpy as np\n'), ((5946, 5967), 'numpy.abs', 'np.abs', (['(y_raw - y_out)'], {}), '(y_raw - y_out)\n', (5952, 5967), True, 'import numpy as np\n'), ((2398, 2419), 'numpy.abs', 'np.abs', (['(x_sci - x_out)'], {}), '(x_sci - x_out)\n', (2404, 2419), True, 'import numpy as np\n'), ((2464, 2485), 'numpy.abs', 'np.abs', (['(y_sci - y_out)'], {}), '(y_sci - y_out)\n', (2470, 2485), True, 'import numpy as np\n'), ((4375, 4398), 'numpy.mean', 'np.mean', (['x_idl_vertices'], {}), '(x_idl_vertices)\n', (4382, 4398), True, 'import numpy as np\n'), ((4401, 4434), 'numpy.mean', 'np.mean', (['x_idl_vertices_rederived'], {}), '(x_idl_vertices_rederived)\n', (4408, 4434), True, 'import numpy as np\n'), ((4474, 4497), 'numpy.mean', 'np.mean', (['y_idl_vertices'], {}), '(y_idl_vertices)\n', (4481, 4497), True, 'import numpy as np\n'), ((4500, 4533), 'numpy.mean', 'np.mean', (['y_idl_vertices_rederived'], {}), '(y_idl_vertices_rederived)\n', (4507, 4533), True, 'import numpy as np\n')] |
"""
The following colors are mentioned in Systra's graphical charter: \n
red shades \n
grey shades \n
rainbow shades : spot colors, vivid and highly contrasted \n
sorted colors advised for word documents \n
secondary colors \n
"""
import itertools
import matplotlib.pyplot as plt
import numpy as np
# Couleurs d'accompagnement de la charte graphique
rainbow_shades = ["#D22328", "#559BB4", "#91A564", "#DC9100", "#8C4B7D", "#A08C69",
"#647D6E", "#5A7382", "#64411E", "#A00037", "#643C5A"]
# Nuances de rouge
# en rgb [(105,18,20),(157,26,30),(210,35,40),(232,119,122),(240,164,166),(247,210,211)]
red_shades = ['#691214', '#9d1a1e', '#d22328', '#e8777a', '#f0a4a6', '#f7d2d3']
# Nuances de gris
# en rgb [(48,48,50),(90,90,90),(127,127,127),(166,165,165),(199,199,200),(227,227,228)]
grey_shades = ['#303032', '#5a5a5a', '#7f7f7f', '#a6a5a5', '#c7c7c8', '#e3e3e4']
# Couleurs ordonné dans le sens des préconisations de la charte graphique
sorted_colors = ['#d22328', '#7f7f7f', '#691214', '#f0a4a6']
# Couleurs secondaires
# en rgb [(100,60,90),(158,27,22),(100,66,30),(100,125,110),(91,115,130),(84,154,179),(219,145,3),(84,160,60)]
secondary_colors = ['#643c5a', '#9e1b16', '#64421e', '#647d6e', '#5b7382', '#549ab3',
'#db9103', '#54a03c']
# Couleurs utilisées par Linedraft
linedraft_shades = ["#1f77b4", "#2ca02c", "#d62728", "#9467bd", "#ff7f0e", "#8c564b",
"#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
all_colors = {
'rainbow_shades': rainbow_shades,
'red_shades': red_shades,
'grey_shades': grey_shades,
'sorted_colors': sorted_colors,
'secondary_colors': secondary_colors,
'linedraft_shades': linedraft_shades
}
def display_colors(label_hexa_dict):
"""
Displays colors from a dict {label: hexadecimal}
"""
x = np.linspace(0, 1, 10)
i = -1
for n, c in label_hexa_dict.items():
i += 1
plot = plt.plot(x, x * 0 + i, linewidth=10, color=c, label=n)
plt.legend(loc='center left', bbox_to_anchor=(1.1, 0.5))
return plot
def show_all_colors(
figsize=(15, 10),
color_lists=['rainbow_shades', 'red_shades', 'grey_shades',
'sorted_colors', 'secondary_colors', 'linedraft_shades']
):
n = len(color_lists)
if n <= 3:
n_rows = 1
n_cols = n
else:
n_rows = 2
n_cols = int(np.ceil(n / n_rows))
f = plt.figure(figsize=figsize)
index = 1
for name, c_list in all_colors.items():
if name in color_lists:
to_show = {}
for i in range(len(c_list)):
to_show.update({'{}[{}] - {}'.format(name, i, c_list[i]): c_list[i]})
_ = f.add_subplot(n_rows, n_cols, index)
_ = display_colors(to_show)
if (index - 1) // n_cols == 0:
plt.legend(loc='lower left', bbox_to_anchor=(0, 1.1))
else:
plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1))
index += 1
def itercolors(color_list, repetition):
return list(itertools.chain(*[[color] * repetition for color in color_list]))
_NUMERALS = '0123456789abcdefABCDEF'
_HEXDEC = {v: int(v, 16) for v in (x + y for x in _NUMERALS for y in _NUMERALS)}
LOWERCASE, UPPERCASE = 'x', 'X'
def rgb(triplet):
return _HEXDEC[triplet[1:][0:2]], _HEXDEC[triplet[1:][2:4]], _HEXDEC[triplet[1:][4:6]]
def triplet(rgb, lettercase=LOWERCASE):
return '#' + (format(rgb[0] << 16 | rgb[1] << 8 | rgb[2], '06' + lettercase)).upper()
def clear(rgb, x=50):
(r, g, b) = rgb
_r = round(((100 - x) * r + x * 255) / 100)
_g = round(((100 - x) * g + x * 255) / 100)
_b = round(((100 - x) * b + x * 255) / 100)
return (_r, _g, _b)
def clear_shades():
return [triplet(clear(rgb(shade))) for shade in rainbow_shades]
d = {
'marron': 8,
'orange': 5,
'rouge': 0,
'bleue': 1,
'verte': 2,
'jaune': 3,
'violette': 4,
'rose': 9
}
def in_string(name):
for c in d.keys():
if c in name:
return rainbow_shades[d[c]]
return rainbow_shades[7]
| [
"itertools.chain",
"numpy.ceil",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.legend"
] | [((1830, 1851), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1841, 1851), True, 'import numpy as np\n'), ((2413, 2440), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2423, 2440), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1988), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(x * 0 + i)'], {'linewidth': '(10)', 'color': 'c', 'label': 'n'}), '(x, x * 0 + i, linewidth=10, color=c, label=n)\n', (1942, 1988), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2053), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1.1, 0.5)'}), "(loc='center left', bbox_to_anchor=(1.1, 0.5))\n", (2007, 2053), True, 'import matplotlib.pyplot as plt\n'), ((3059, 3125), 'itertools.chain', 'itertools.chain', (['*[([color] * repetition) for color in color_list]'], {}), '(*[([color] * repetition) for color in color_list])\n', (3074, 3125), False, 'import itertools\n'), ((2384, 2403), 'numpy.ceil', 'np.ceil', (['(n / n_rows)'], {}), '(n / n_rows)\n', (2391, 2403), True, 'import numpy as np\n'), ((2835, 2888), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""', 'bbox_to_anchor': '(0, 1.1)'}), "(loc='lower left', bbox_to_anchor=(0, 1.1))\n", (2845, 2888), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2977), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'bbox_to_anchor': '(0, -0.1)'}), "(loc='upper left', bbox_to_anchor=(0, -0.1))\n", (2933, 2977), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.